cleaned sql and ui language sets

This commit is contained in:
ValueOn AG 2026-04-08 20:28:34 +02:00
parent c395495300
commit cc1fdb13e5
59 changed files with 5864 additions and 1626 deletions

6
app.py
View file

@ -578,6 +578,9 @@ app.include_router(invitationsRouter)
from modules.routes.routeNotifications import router as notificationsRouter from modules.routes.routeNotifications import router as notificationsRouter
app.include_router(notificationsRouter) app.include_router(notificationsRouter)
from modules.routes.routeI18n import router as i18nRouter
app.include_router(i18nRouter)
from modules.routes.routeAdminRbacExport import router as rbacAdminExportRouter from modules.routes.routeAdminRbacExport import router as rbacAdminExportRouter
app.include_router(rbacAdminExportRouter) app.include_router(rbacAdminExportRouter)
@ -600,6 +603,9 @@ from modules.routes.routeSystem import router as systemRouter, navigationRouter
app.include_router(systemRouter) app.include_router(systemRouter)
app.include_router(navigationRouter) app.include_router(navigationRouter)
from modules.routes.routeWorkflowDashboard import router as workflowDashboardRouter
app.include_router(workflowDashboardRouter)
# ============================================================================ # ============================================================================
# PLUG&PLAY FEATURE ROUTERS # PLUG&PLAY FEATURE ROUTERS
# Dynamically load routers from feature containers in modules/features/ # Dynamically load routers from feature containers in modules/features/

View file

@ -946,13 +946,14 @@ class DatabaseConnector:
if recordFilter: if recordFilter:
for field, value in recordFilter.items(): for field, value in recordFilter.items():
if value is None: if value is None:
# Use IS NULL for None values (= NULL is always false in SQL)
where_conditions.append(f'"{field}" IS NULL') where_conditions.append(f'"{field}" IS NULL')
elif isinstance(value, list):
where_conditions.append(f'"{field}" = ANY(%s)')
where_values.append(value)
else: else:
where_conditions.append(f'"{field}" = %s') where_conditions.append(f'"{field}" = %s')
where_values.append(value) where_values.append(value)
# Build the query
if where_conditions: if where_conditions:
where_clause = " WHERE " + " AND ".join(where_conditions) where_clause = " WHERE " + " AND ".join(where_conditions)
else: else:
@ -1113,13 +1114,15 @@ class DatabaseConnector:
orderParts: List[str] = [] orderParts: List[str] = []
if pagination and pagination.sort: if pagination and pagination.sort:
for sf in pagination.sort: for sf in pagination.sort:
if sf.field in validColumns: sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None)
direction = "DESC" if sf.direction.lower() == "desc" else "ASC" sfDir = sf.get("direction", "asc") if isinstance(sf, dict) else getattr(sf, "direction", "asc")
colType = fields.get(sf.field, "TEXT") if sfField and sfField in validColumns:
direction = "DESC" if str(sfDir).lower() == "desc" else "ASC"
colType = fields.get(sfField, "TEXT")
if colType == "BOOLEAN": if colType == "BOOLEAN":
orderParts.append(f'COALESCE("{sf.field}", FALSE) {direction}') orderParts.append(f'COALESCE("{sfField}", FALSE) {direction}')
else: else:
orderParts.append(f'"{sf.field}" {direction} NULLS LAST') orderParts.append(f'"{sfField}" {direction} NULLS LAST')
if not orderParts: if not orderParts:
orderParts.append('"id"') orderParts.append('"id"')
order_clause = " ORDER BY " + ", ".join(orderParts) order_clause = " ORDER BY " + ", ".join(orderParts)

View file

@ -1,7 +1,7 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# All rights reserved. # All rights reserved.
from typing import Any, Dict, List, Optional, Literal, Union from typing import Any, Dict, List, Optional, Literal, Union
from pydantic import BaseModel, Field from pydantic import BaseModel, Field, field_serializer
from datetime import datetime from datetime import datetime
@ -117,10 +117,11 @@ class RenderedDocument(BaseModel):
documentType: Optional[str] = Field(default=None, description="Type of document (e.g., 'report', 'invoice', 'analysis')") documentType: Optional[str] = Field(default=None, description="Type of document (e.g., 'report', 'invoice', 'analysis')")
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Document metadata (title, author, etc.)") metadata: Optional[Dict[str, Any]] = Field(default=None, description="Document metadata (title, author, etc.)")
class Config: @field_serializer("documentData")
json_encoders = { def _serializeDocumentData(self, v: bytes) -> str:
bytes: lambda v: v.decode('utf-8', errors='replace') if isinstance(v, bytes) else v if isinstance(v, bytes):
} return v.decode("utf-8", errors="replace")
return str(v)
# Update forward references # Update forward references

View file

@ -0,0 +1,85 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""UI language sets: global i18n strings (German key -> translated value)."""
from typing import Dict, Optional, Literal
from pydantic import Field
from modules.datamodels.datamodelBase import PowerOnModel
from modules.shared.attributeUtils import registerModelLabels
UiLanguageStatus = Literal["complete", "incomplete", "generating"]
class UiLanguageSet(PowerOnModel):
"""
One row per ISO 639-1 UI language. id equals code (e.g. de, en).
keys: flat map German plaintext key -> translation for this language.
For language de, values equal keys.
"""
id: str = Field(
...,
description="ISO 639-1 language code (primary key), e.g. de, en, fr",
json_schema_extra={
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": True,
},
)
label: str = Field(
...,
description="Human-readable language name",
json_schema_extra={
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": True,
},
)
keys: Dict[str, str] = Field(
default_factory=dict,
description="German plaintext key -> translated label",
json_schema_extra={
"frontend_type": "textarea",
"frontend_readonly": False,
"frontend_required": False,
},
)
status: UiLanguageStatus = Field(
default="complete",
description="complete | incomplete | generating",
json_schema_extra={
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
"frontend_options": [
{"value": "complete", "label": {"de": "Vollständig", "en": "Complete"}},
{"value": "incomplete", "label": {"de": "Unvollständig", "en": "Incomplete"}},
{"value": "generating", "label": {"de": "Wird erzeugt", "en": "Generating"}},
],
},
)
isDefault: bool = Field(
default=False,
description="Exactly one set should be default (de)",
json_schema_extra={
"frontend_type": "boolean",
"frontend_readonly": False,
"frontend_required": False,
},
)
registerModelLabels(
"UiLanguageSet",
{"en": "UI Language Set", "de": "UI-Sprachset"},
{
"id": {"en": "Code", "de": "Code"},
"label": {"en": "Label", "de": "Bezeichnung"},
"keys": {"en": "Keys", "de": "Schlüssel"},
"status": {"en": "Status", "de": "Status"},
"isDefault": {"en": "Default", "de": "Standard"},
},
)

View file

@ -462,11 +462,7 @@ class AICenterChatModel(BaseChatModel):
elif isinstance(args_schema, BaseModel): elif isinstance(args_schema, BaseModel):
# It's a Pydantic model instance # It's a Pydantic model instance
if hasattr(args_schema, "model_dump"): if hasattr(args_schema, "model_dump"):
# Pydantic v2
parameters = args_schema.model_dump() parameters = args_schema.model_dump()
elif hasattr(args_schema, "dict"):
# Pydantic v1
parameters = args_schema.dict()
elif hasattr(args_schema, "schema"): elif hasattr(args_schema, "schema"):
# Has schema method (might be a class) # Has schema method (might be a class)
try: try:

View file

@ -150,8 +150,6 @@ def get_chatbot_threads(
if hasattr(workflow, 'model_dump'): if hasattr(workflow, 'model_dump'):
workflow_dict = workflow.model_dump() workflow_dict = workflow.model_dump()
elif hasattr(workflow, 'dict'):
workflow_dict = workflow.dict()
elif isinstance(workflow, dict): elif isinstance(workflow, dict):
workflow_dict = dict(workflow) workflow_dict = dict(workflow)
else: else:
@ -317,11 +315,11 @@ async def stream_chatbot_start(
# Emit filtered items # Emit filtered items
for item in filtered_items: for item in filtered_items:
# Convert Pydantic models to dicts for JSON serialization _inner = item.get("item")
serializable_item = { serializable_item = {
"type": item.get("type"), "type": item.get("type"),
"createdAt": item.get("createdAt"), "createdAt": item.get("createdAt"),
"item": item.get("item").model_dump() if hasattr(item.get("item"), "model_dump") else (item.get("item").dict() if hasattr(item.get("item"), "dict") else item.get("item")) "item": _inner.model_dump() if _inner is not None and hasattr(_inner, "model_dump") else _inner,
} }
# Emit item directly in exact chatData format: {type, createdAt, item} # Emit item directly in exact chatData format: {type, createdAt, item}
yield f"data: {json.dumps(serializable_item)}\n\n" yield f"data: {json.dumps(serializable_item)}\n\n"
@ -399,9 +397,6 @@ async def stream_chatbot_start(
if hasattr(item_obj, "model_dump"): if hasattr(item_obj, "model_dump"):
chatdata_item = chatdata_item.copy() chatdata_item = chatdata_item.copy()
chatdata_item["item"] = item_obj.model_dump() chatdata_item["item"] = item_obj.model_dump()
elif hasattr(item_obj, "dict"):
chatdata_item = chatdata_item.copy()
chatdata_item["item"] = item_obj.dict()
yield f"data: {json.dumps(chatdata_item)}\n\n" yield f"data: {json.dumps(chatdata_item)}\n\n"
# Handle completion/stopped events to close stream # Handle completion/stopped events to close stream

View file

@ -278,7 +278,7 @@ async def _update_conversation_name_async(
# Emit stat event so frontend can refresh thread list/title # Emit stat event so frontend can refresh thread list/title
workflow = interfaceDbChat.getWorkflow(workflowId) workflow = interfaceDbChat.getWorkflow(workflowId)
if workflow: if workflow:
wf_dict = workflow.model_dump() if hasattr(workflow, "model_dump") else workflow.dict() wf_dict = workflow.model_dump()
await event_manager.emit_event( await event_manager.emit_event(
context_id=workflowId, context_id=workflowId,
event_type="chatdata", event_type="chatdata",
@ -966,7 +966,7 @@ async def _bridge_chatbot_events(
data={ data={
"type": "message", "type": "message",
"createdAt": message_timestamp, "createdAt": message_timestamp,
"item": last_message.dict() "item": last_message.model_dump()
}, },
event_category="chat" event_category="chat"
) )
@ -1005,7 +1005,7 @@ async def _bridge_chatbot_events(
data={ data={
"type": "message", "type": "message",
"createdAt": message_timestamp, "createdAt": message_timestamp,
"item": assistant_msg.dict() "item": assistant_msg.model_dump()
}, },
event_category="chat" event_category="chat"
) )
@ -1089,7 +1089,7 @@ async def _bridge_chatbot_events(
data={ data={
"type": "message", "type": "message",
"createdAt": message_timestamp, "createdAt": message_timestamp,
"item": error_msg.dict() "item": error_msg.model_dump()
}, },
event_category="chat" event_category="chat"
) )
@ -1490,7 +1490,7 @@ async def _processChatbotMessageLangGraph(
data={ data={
"type": "message", "type": "message",
"createdAt": message_timestamp, "createdAt": message_timestamp,
"item": errorMessage.dict() "item": errorMessage.model_dump()
}, },
event_category="chat" event_category="chat"
) )

View file

@ -234,6 +234,16 @@ class AutoRun(PowerOnModel):
description="Workflow ID", description="Workflow ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}, json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
) )
mandateId: Optional[str] = Field(
default=None,
description="Mandate ID for cross-feature querying",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
ownerId: Optional[str] = Field(
default=None,
description="User ID who triggered this run",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
versionId: Optional[str] = Field( versionId: Optional[str] = Field(
default=None, default=None,
description="AutoVersion ID used for this run", description="AutoVersion ID used for this run",
@ -297,6 +307,8 @@ registerModelLabels(
{ {
"id": {"en": "ID", "de": "ID", "fr": "ID"}, "id": {"en": "ID", "de": "ID", "fr": "ID"},
"workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"}, "workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
"mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
"ownerId": {"en": "Owner", "de": "Auslöser", "fr": "Propriétaire"},
"versionId": {"en": "Version ID", "de": "Versions-ID", "fr": "ID version"}, "versionId": {"en": "Version ID", "de": "Versions-ID", "fr": "ID version"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"}, "status": {"en": "Status", "de": "Status", "fr": "Statut"},
"trigger": {"en": "Trigger", "de": "Auslöser", "fr": "Déclencheur"}, "trigger": {"en": "Trigger", "de": "Auslöser", "fr": "Déclencheur"},

View file

@ -267,13 +267,16 @@ class GraphicalEditorObjects:
def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None) -> Dict[str, Any]: def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None) -> Dict[str, Any]:
"""Create a new workflow run.""" """Create a new workflow run."""
ctx = context or {}
data = { data = {
"id": str(uuid.uuid4()), "id": str(uuid.uuid4()),
"workflowId": workflowId, "workflowId": workflowId,
"status": "running", "status": "running",
"nodeOutputs": _make_json_serializable(nodeOutputs or {}), "nodeOutputs": _make_json_serializable(nodeOutputs or {}),
"currentNodeId": None, "currentNodeId": None,
"context": context or {}, "context": ctx,
"mandateId": ctx.get("mandateId") or self.mandateId,
"ownerId": ctx.get("userId") or (self.currentUser.id if self.currentUser else None),
} }
created = self.db.recordCreate(Automation2WorkflowRun, data) created = self.db.recordCreate(Automation2WorkflowRun, data)
return dict(created) return dict(created)

View file

@ -45,11 +45,6 @@ UI_OBJECTS = [
"label": {"en": "Tasks", "de": "Tasks", "fr": "Tâches"}, "label": {"en": "Tasks", "de": "Tasks", "fr": "Tâches"},
"meta": {"area": "tasks"} "meta": {"area": "tasks"}
}, },
{
"objectKey": "ui.feature.graphicalEditor.dashboard",
"label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
"meta": {"area": "dashboard"}
},
] ]
RESOURCE_OBJECTS = [ RESOURCE_OBJECTS = [
@ -79,7 +74,6 @@ TEMPLATE_ROLES = [
"fr": "Visualiseur Éditeur graphique - Consulter les workflows (lecture seule)", "fr": "Visualiseur Éditeur graphique - Consulter les workflows (lecture seule)",
}, },
"accessRules": [ "accessRules": [
{"context": "UI", "item": "ui.feature.graphicalEditor.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True},
@ -94,7 +88,6 @@ TEMPLATE_ROLES = [
"fr": "Utilisateur Éditeur graphique - Utiliser le flow builder", "fr": "Utilisateur Éditeur graphique - Utiliser le flow builder",
}, },
"accessRules": [ "accessRules": [
{"context": "UI", "item": "ui.feature.graphicalEditor.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.editor", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.editor", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True},
@ -141,10 +134,11 @@ def getGraphicalEditorServices(
_workflow = workflow _workflow = workflow
if _workflow is None: if _workflow is None:
import uuid as _uuid
_workflow = type( _workflow = type(
"_Placeholder", "_Placeholder",
(), (),
{"featureCode": FEATURE_CODE, "id": None, "workflowMode": None, "messages": []}, {"featureCode": FEATURE_CODE, "id": f"transient-{_uuid.uuid4().hex[:12]}", "workflowMode": None, "messages": []},
)() )()
ctx = ServiceCenterContext( ctx = ServiceCenterContext(
@ -159,7 +153,7 @@ def getGraphicalEditorServices(
hub.mandateId = mandateId hub.mandateId = mandateId
hub.featureInstanceId = featureInstanceId hub.featureInstanceId = featureInstanceId
hub._service_context = ctx hub._service_context = ctx
hub.workflow = workflow hub.workflow = _workflow
hub.featureCode = FEATURE_CODE hub.featureCode = FEATURE_CODE
for spec in REQUIRED_SERVICES: for spec in REQUIRED_SERVICES:

View file

@ -10,6 +10,7 @@ from .sharepoint import SHAREPOINT_NODES
from .clickup import CLICKUP_NODES from .clickup import CLICKUP_NODES
from .file import FILE_NODES from .file import FILE_NODES
from .trustee import TRUSTEE_NODES from .trustee import TRUSTEE_NODES
from .data import DATA_NODES
STATIC_NODE_TYPES = ( STATIC_NODE_TYPES = (
TRIGGER_NODES TRIGGER_NODES
@ -21,4 +22,5 @@ STATIC_NODE_TYPES = (
+ CLICKUP_NODES + CLICKUP_NODES
+ FILE_NODES + FILE_NODES
+ TRUSTEE_NODES + TRUSTEE_NODES
+ DATA_NODES
) )

View file

@ -8,14 +8,19 @@ AI_NODES = [
"label": {"en": "Prompt", "de": "Prompt", "fr": "Invite"}, "label": {"en": "Prompt", "de": "Prompt", "fr": "Invite"},
"description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"}, "description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"},
"parameters": [ "parameters": [
{"name": "prompt", "type": "string", "required": True, "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}}, {"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea",
"description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}},
{"name": "outputFormat", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["text", "json", "emailDraft"]},
"description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "text"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-robot", "color": "#9C27B0"}, "meta": {"icon": "mdi-robot", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "process", "_action": "process",
"_paramMap": {"prompt": "aiPrompt"},
}, },
{ {
"id": "ai.webResearch", "id": "ai.webResearch",
@ -23,14 +28,16 @@ AI_NODES = [
"label": {"en": "Web Research", "de": "Web-Recherche", "fr": "Recherche web"}, "label": {"en": "Web Research", "de": "Web-Recherche", "fr": "Recherche web"},
"description": {"en": "Research on the web", "de": "Recherche im Web", "fr": "Recherche sur le web"}, "description": {"en": "Research on the web", "de": "Recherche im Web", "fr": "Recherche sur le web"},
"parameters": [ "parameters": [
{"name": "query", "type": "string", "required": True, "description": {"en": "Research query", "de": "Recherche-Anfrage", "fr": "Requête de recherche"}}, {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
"description": {"en": "Research query", "de": "Recherche-Anfrage", "fr": "Requête de recherche"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-magnify", "color": "#9C27B0"}, "meta": {"icon": "mdi-magnify", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "webResearch", "_action": "webResearch",
"_paramMap": {"query": "prompt"},
}, },
{ {
"id": "ai.summarizeDocument", "id": "ai.summarizeDocument",
@ -38,14 +45,17 @@ AI_NODES = [
"label": {"en": "Summarize Document", "de": "Dokument zusammenfassen", "fr": "Résumer document"}, "label": {"en": "Summarize Document", "de": "Dokument zusammenfassen", "fr": "Résumer document"},
"description": {"en": "Summarize document content", "de": "Dokumentinhalt zusammenfassen", "fr": "Résumer le contenu du document"}, "description": {"en": "Summarize document content", "de": "Dokumentinhalt zusammenfassen", "fr": "Résumer le contenu du document"},
"parameters": [ "parameters": [
{"name": "summaryLength", "type": "string", "required": False, "description": {"en": "Short, medium, or long", "de": "Kurz, mittel oder lang", "fr": "Court, moyen ou long"}, "default": "medium"}, {"name": "summaryLength", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["short", "medium", "long"]},
"description": {"en": "Short, medium, or long", "de": "Kurz, mittel oder lang", "fr": "Court, moyen ou long"}, "default": "medium"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0"}, "meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "summarizeDocument", "_action": "summarizeDocument",
"_paramMap": {},
}, },
{ {
"id": "ai.translateDocument", "id": "ai.translateDocument",
@ -53,14 +63,17 @@ AI_NODES = [
"label": {"en": "Translate Document", "de": "Dokument übersetzen", "fr": "Traduire document"}, "label": {"en": "Translate Document", "de": "Dokument übersetzen", "fr": "Traduire document"},
"description": {"en": "Translate document to target language", "de": "Dokument in Zielsprache übersetzen", "fr": "Traduire le document"}, "description": {"en": "Translate document to target language", "de": "Dokument in Zielsprache übersetzen", "fr": "Traduire le document"},
"parameters": [ "parameters": [
{"name": "targetLanguage", "type": "string", "required": True, "description": {"en": "Target language (e.g. en, de, fr)", "de": "Zielsprache", "fr": "Langue cible"}}, {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["en", "de", "fr", "it", "es", "pt", "nl"]},
"description": {"en": "Target language", "de": "Zielsprache", "fr": "Langue cible"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-translate", "color": "#9C27B0"}, "meta": {"icon": "mdi-translate", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "translateDocument", "_action": "translateDocument",
"_paramMap": {"targetLanguage": "targetLanguage"},
}, },
{ {
"id": "ai.convertDocument", "id": "ai.convertDocument",
@ -68,14 +81,17 @@ AI_NODES = [
"label": {"en": "Convert Document", "de": "Dokument konvertieren", "fr": "Convertir document"}, "label": {"en": "Convert Document", "de": "Dokument konvertieren", "fr": "Convertir document"},
"description": {"en": "Convert document to another format", "de": "Dokument in anderes Format konvertieren", "fr": "Convertir le document"}, "description": {"en": "Convert document to another format", "de": "Dokument in anderes Format konvertieren", "fr": "Convertir le document"},
"parameters": [ "parameters": [
{"name": "targetFormat", "type": "string", "required": True, "description": {"en": "Target format (pdf, docx, txt, etc.)", "de": "Zielformat", "fr": "Format cible"}}, {"name": "targetFormat", "type": "string", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["pdf", "docx", "txt", "html", "md"]},
"description": {"en": "Target format", "de": "Zielformat", "fr": "Format cible"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-convert", "color": "#9C27B0"}, "meta": {"icon": "mdi-file-convert", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "convertDocument", "_action": "convertDocument",
"_paramMap": {"targetFormat": "targetFormat"},
}, },
{ {
"id": "ai.generateDocument", "id": "ai.generateDocument",
@ -83,14 +99,16 @@ AI_NODES = [
"label": {"en": "Generate Document", "de": "Dokument generieren", "fr": "Générer document"}, "label": {"en": "Generate Document", "de": "Dokument generieren", "fr": "Générer document"},
"description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"}, "description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"},
"parameters": [ "parameters": [
{"name": "prompt", "type": "string", "required": True, "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}}, {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
"description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-plus", "color": "#9C27B0"}, "meta": {"icon": "mdi-file-plus", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "generateDocument", "_action": "generateDocument",
"_paramMap": {"prompt": "prompt", "format": "format"},
}, },
{ {
"id": "ai.generateCode", "id": "ai.generateCode",
@ -98,14 +116,18 @@ AI_NODES = [
"label": {"en": "Generate Code", "de": "Code generieren", "fr": "Générer code"}, "label": {"en": "Generate Code", "de": "Code generieren", "fr": "Générer code"},
"description": {"en": "Generate code from description", "de": "Code aus Beschreibung generieren", "fr": "Générer du code"}, "description": {"en": "Generate code from description", "de": "Code aus Beschreibung generieren", "fr": "Générer du code"},
"parameters": [ "parameters": [
{"name": "prompt", "type": "string", "required": True, "description": {"en": "Code generation prompt", "de": "Code-Generierungs-Prompt", "fr": "Invite de génération de code"}}, {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
{"name": "language", "type": "string", "required": False, "description": {"en": "Programming language", "de": "Programmiersprache", "fr": "Langage de programmation"}, "default": "python"}, "description": {"en": "Code generation prompt", "de": "Code-Generierungs-Prompt", "fr": "Invite de génération de code"}},
{"name": "language", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["python", "javascript", "typescript", "java", "csharp", "go"]},
"description": {"en": "Programming language", "de": "Programmiersprache", "fr": "Langage de programmation"}, "default": "python"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-code-tags", "color": "#9C27B0"}, "meta": {"icon": "mdi-code-tags", "color": "#9C27B0"},
"_method": "ai", "_method": "ai",
"_action": "generateCode", "_action": "generateCode",
"_paramMap": {"prompt": "prompt", "language": "language"},
}, },
] ]

View file

@ -7,102 +7,57 @@ CLICKUP_NODES = [
"id": "clickup.searchTasks", "id": "clickup.searchTasks",
"category": "clickup", "category": "clickup",
"label": {"en": "Search tasks", "de": "Aufgaben suchen", "fr": "Rechercher tâches"}, "label": {"en": "Search tasks", "de": "Aufgaben suchen", "fr": "Rechercher tâches"},
"description": { "description": {"en": "Search tasks in a workspace", "de": "Aufgaben in einem Workspace suchen", "fr": "Rechercher des tâches"},
"en": "Search tasks in a workspace (team)",
"de": "Aufgaben in einem Workspace suchen",
"fr": "Rechercher des tâches dans un espace",
},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "teamId", "type": "string", "required": True, "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}}, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
{"name": "query", "type": "string", "required": True, "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}}, {"name": "teamId", "type": "string", "required": True, "frontendType": "text",
{"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}},
{ {"name": "query", "type": "string", "required": True, "frontendType": "text",
"name": "listId", "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}},
"type": "string", {"name": "page", "type": "number", "required": False, "frontendType": "number",
"required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0},
"description": { {"name": "listId", "type": "string", "required": False, "frontendType": "clickupList",
"en": "If set, search this list via list API (not team search).", "frontendOptions": {"dependsOn": "connectionReference"},
"de": "Wenn gesetzt: Suche in dieser Liste (Listen-API, nicht Team-Suche).", "description": {"en": "Search in this list", "de": "In dieser Liste suchen", "fr": "Rechercher dans cette liste"}},
"fr": "Si défini : recherche dans cette liste (API liste).", {"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox",
}, "description": {"en": "Include closed tasks", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False},
}, {"name": "fullTaskData", "type": "boolean", "required": False, "frontendType": "checkbox",
{ "description": {"en": "Return full task data", "de": "Vollständige Daten", "fr": "Données complètes"}, "default": False},
"name": "includeClosed", {"name": "matchNameOnly", "type": "boolean", "required": False, "frontendType": "checkbox",
"type": "boolean", "description": {"en": "Match title only", "de": "Nur Titel", "fr": "Titre uniquement"}, "default": True},
"required": False,
"default": False,
"description": {
"en": "With listId: include closed tasks.",
"de": "Mit Liste: erledigte Aufgaben einbeziehen.",
"fr": "Avec liste : inclure les tâches terminées.",
},
},
{
"name": "fullTaskData",
"type": "boolean",
"required": False,
"default": False,
"description": {
"en": "Return full ClickUp API JSON per task (very large). Default: slim fields only.",
"de": "Vollständige ClickUp-Rohdaten pro Task (sehr groß). Standard: nur schlanke Felder.",
"fr": "Réponse brute complète (très volumineuse). Par défaut : champs réduits.",
},
},
{
"name": "matchNameOnly",
"type": "boolean",
"required": False,
"default": True,
"description": {
"en": "Keep only tasks whose title contains the search query (default: on).",
"de": "Nur Aufgaben, deren Titel den Suchbegriff enthält (Standard: an).",
"fr": "Ne garder que les tâches dont le titre contient la requête (défaut : oui).",
},
},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "TaskList"}},
"meta": {"icon": "mdi-magnify", "color": "#7B68EE"}, "meta": {"icon": "mdi-magnify", "color": "#7B68EE"},
"_method": "clickup", "_method": "clickup",
"_action": "searchTasks", "_action": "searchTasks",
"_paramMap": {
"connectionId": "connectionReference",
"teamId": "teamId",
"query": "query",
"page": "page",
"listId": "listId",
"fullTaskData": "fullTaskData",
"matchNameOnly": "matchNameOnly",
"includeClosed": "includeClosed",
},
}, },
{ {
"id": "clickup.listTasks", "id": "clickup.listTasks",
"category": "clickup", "category": "clickup",
"label": {"en": "List tasks", "de": "Aufgaben auflisten", "fr": "Lister les tâches"}, "label": {"en": "List tasks", "de": "Aufgaben auflisten", "fr": "Lister les tâches"},
"description": { "description": {"en": "List tasks in a list", "de": "Aufgaben einer Liste auflisten", "fr": "Lister les tâches"},
"en": "List tasks in a list (pick list path from browse)",
"de": "Aufgaben einer Liste auflisten (Pfad aus Browse)",
"fr": "Lister les tâches d'une liste",
},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "path", "type": "string", "required": True, "description": {"en": "Virtual path to list /team/.../list/...", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}}, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
{"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, {"name": "pathQuery", "type": "string", "required": True, "frontendType": "clickupList",
{"name": "includeClosed", "type": "boolean", "required": False, "description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "Path to list", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}},
{"name": "page", "type": "number", "required": False, "frontendType": "number",
"description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0},
{"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "TaskList"}},
"meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"}, "meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"},
"_method": "clickup", "_method": "clickup",
"_action": "listTasks", "_action": "listTasks",
"_paramMap": {
"connectionId": "connectionReference",
"path": "pathQuery",
"page": "page",
"includeClosed": "includeClosed",
},
}, },
{ {
"id": "clickup.getTask", "id": "clickup.getTask",
@ -110,118 +65,112 @@ CLICKUP_NODES = [
"label": {"en": "Get task", "de": "Aufgabe abrufen", "fr": "Obtenir la tâche"}, "label": {"en": "Get task", "de": "Aufgabe abrufen", "fr": "Obtenir la tâche"},
"description": {"en": "Get one task by ID or path", "de": "Eine Aufgabe abrufen", "fr": "Obtenir une tâche"}, "description": {"en": "Get one task by ID or path", "de": "Eine Aufgabe abrufen", "fr": "Obtenir une tâche"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
{"name": "path", "type": "string", "required": False, "description": {"en": "Or path .../task/{id}", "de": "Oder Pfad .../task/{id}", "fr": "Ou chemin .../task/{id}"}}, {"name": "taskId", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}},
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Or path .../task/{id}", "de": "Oder Pfad", "fr": "Ou chemin"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "TaskResult"}},
"meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"}, "meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"},
"_method": "clickup", "_method": "clickup",
"_action": "getTask", "_action": "getTask",
"_paramMap": {"connectionId": "connectionReference", "taskId": "taskId", "path": "pathQuery"},
}, },
{ {
"id": "clickup.createTask", "id": "clickup.createTask",
"category": "clickup", "category": "clickup",
"label": {"en": "Create task", "de": "Aufgabe erstellen", "fr": "Créer une tâche"}, "label": {"en": "Create task", "de": "Aufgabe erstellen", "fr": "Créer une tâche"},
"description": {"en": "Create a task in a list", "de": "Aufgabe in einer Liste erstellen", "fr": "Créer une tâche dans une liste"}, "description": {"en": "Create a task in a list", "de": "Aufgabe erstellen", "fr": "Créer une tâche"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "teamId", "type": "string", "required": False, "description": {"en": "Workspace (team) for list picker", "de": "Workspace für Listen-Auswahl", "fr": "Équipe"}}, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
{"name": "path", "type": "string", "required": False, "description": {"en": "Optional path /team/.../list/...", "de": "Optional: Pfad zur Liste", "fr": "Chemin optionnel"}}, {"name": "teamId", "type": "string", "required": False, "frontendType": "text",
{"name": "listId", "type": "string", "required": False, "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}}, "description": {"en": "Workspace (team)", "de": "Workspace", "fr": "Équipe"}},
{"name": "name", "type": "string", "required": True, "description": {"en": "Task name", "de": "Name", "fr": "Nom"}}, {"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList",
{"name": "description", "type": "string", "required": False, "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}}, "frontendOptions": {"dependsOn": "connectionReference"},
{"name": "taskStatus", "type": "string", "required": False, "description": {"en": "Status (list status name)", "de": "Status (wie in der Liste)", "fr": "Statut"}}, "description": {"en": "Path to list", "de": "Pfad zur Liste", "fr": "Chemin"}},
{"name": "taskPriority", "type": "string", "required": False, "description": {"en": "14 or empty", "de": "14 oder leer", "fr": "14"}}, {"name": "listId", "type": "string", "required": False, "frontendType": "text",
{"name": "taskDueDateMs", "type": "string", "required": False, "description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}}, "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}},
{"name": "taskAssigneeIds", "type": "object", "required": False, "description": {"en": "Assignee user ids", "de": "Zugewiesene (User-IDs)", "fr": "Assignés"}}, {"name": "name", "type": "string", "required": True, "frontendType": "text",
{"name": "taskTimeEstimateMs", "type": "string", "required": False, "description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}}, "description": {"en": "Task name", "de": "Name", "fr": "Nom"}},
{"name": "taskTimeEstimateHours", "type": "string", "required": False, "description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (Stunden)", "fr": "Heures"}}, {"name": "description", "type": "string", "required": False, "frontendType": "textarea",
{"name": "customFieldValues", "type": "object", "required": False, "description": {"en": "Custom field id → value", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}}, "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}},
{"name": "taskFields", "type": "string", "required": False, "description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON (fortgeschritten)", "fr": "JSON avancé"}}, {"name": "taskStatus", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Status", "de": "Status", "fr": "Statut"}},
{"name": "taskPriority", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["1", "2", "3", "4"]},
"description": {"en": "Priority 1-4", "de": "Priorität 1-4", "fr": "Priorité 1-4"}},
{"name": "taskDueDateMs", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}},
{"name": "taskAssigneeIds", "type": "object", "required": False, "frontendType": "json",
"description": {"en": "Assignee user ids", "de": "Zugewiesene", "fr": "Assignés"}},
{"name": "taskTimeEstimateMs", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}},
{"name": "taskTimeEstimateHours", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (h)", "fr": "Heures"}},
{"name": "customFieldValues", "type": "object", "required": False, "frontendType": "json",
"description": {"en": "Custom fields", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}},
{"name": "taskFields", "type": "string", "required": False, "frontendType": "json",
"description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON", "fr": "JSON avancé"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "TaskResult"}},
"meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"}, "meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"},
"_method": "clickup", "_method": "clickup",
"_action": "createTask", "_action": "createTask",
"_paramMap": {
"connectionId": "connectionReference",
"teamId": "teamId",
"path": "pathQuery",
"listId": "listId",
"name": "name",
"description": "description",
"taskStatus": "taskStatus",
"taskPriority": "taskPriority",
"taskDueDateMs": "taskDueDateMs",
"taskAssigneeIds": "taskAssigneeIds",
"taskTimeEstimateMs": "taskTimeEstimateMs",
"taskTimeEstimateHours": "taskTimeEstimateHours",
"customFieldValues": "customFieldValues",
"taskFields": "taskFields",
},
}, },
{ {
"id": "clickup.updateTask", "id": "clickup.updateTask",
"category": "clickup", "category": "clickup",
"label": {"en": "Update task", "de": "Aufgabe aktualisieren", "fr": "Mettre à jour la tâche"}, "label": {"en": "Update task", "de": "Aufgabe aktualisieren", "fr": "Mettre à jour la tâche"},
"description": { "description": {"en": "Update task fields", "de": "Felder der Aufgabe ändern", "fr": "Mettre à jour les champs"},
"en": "Update task fields (rows or JSON)",
"de": "Felder der Aufgabe ändern (Zeilen oder JSON)",
"fr": "Mettre à jour les champs (lignes ou JSON)",
},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
{"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, {"name": "taskId", "type": "string", "required": False, "frontendType": "text",
{ "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}},
"name": "taskUpdateEntries", {"name": "path", "type": "string", "required": False, "frontendType": "text",
"type": "object", "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}},
"required": False, {"name": "taskUpdateEntries", "type": "object", "required": False, "frontendType": "keyValueRows",
"description": { "description": {"en": "Fields to update", "de": "Zu ändernde Felder", "fr": "Champs à mettre à jour"}},
"en": "List of {fieldKey, value, customFieldId?}", {"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json",
"de": "Liste der zu ändernden Felder (fieldKey, value, optional customFieldId)", "description": {"en": "JSON body (advanced)", "de": "JSON für API", "fr": "Corps JSON"}},
"fr": "Liste de champs à mettre à jour",
},
},
{"name": "taskUpdate", "type": "string", "required": False, "description": {"en": "JSON body for API (optional if rows set)", "de": "JSON für API (optional wenn Zeilen gesetzt)", "fr": "Corps JSON"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["TaskResult", "Transit"]}},
"outputPorts": {0: {"schema": "TaskResult"}},
"meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"}, "meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"},
"_method": "clickup", "_method": "clickup",
"_action": "updateTask", "_action": "updateTask",
"_paramMap": {
"connectionId": "connectionReference",
"taskId": "taskId",
"path": "path",
"taskUpdate": "taskUpdate",
},
}, },
{ {
"id": "clickup.uploadAttachment", "id": "clickup.uploadAttachment",
"category": "clickup", "category": "clickup",
"label": {"en": "Upload attachment", "de": "Anhang hochladen", "fr": "Téléverser pièce jointe"}, "label": {"en": "Upload attachment", "de": "Anhang hochladen", "fr": "Téléverser pièce jointe"},
"description": {"en": "Upload file to a task (upstream file)", "de": "Datei an Task anhängen", "fr": "Joindre un fichier à la tâche"}, "description": {"en": "Upload file to a task", "de": "Datei an Task anhängen", "fr": "Joindre un fichier"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
{"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, {"name": "taskId", "type": "string", "required": False, "frontendType": "text",
{"name": "fileName", "type": "string", "required": False, "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}}, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}},
{"name": "path", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}},
{"name": "fileName", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-attachment", "color": "#7B68EE"}, "meta": {"icon": "mdi-attachment", "color": "#7B68EE"},
"_method": "clickup", "_method": "clickup",
"_action": "uploadAttachment", "_action": "uploadAttachment",
"_paramMap": {
"connectionId": "connectionReference",
"taskId": "taskId",
"path": "path",
"fileName": "fileName",
},
}, },
] ]

View file

@ -0,0 +1,54 @@
# Copyright (c) 2025 Patrick Motsch
# Data manipulation node definitions: aggregate, transform, filter.
DATA_NODES = [
{
"id": "data.aggregate",
"category": "data",
"label": {"en": "Aggregate", "de": "Sammeln", "fr": "Agréger"},
"description": {"en": "Collect results from loop iterations", "de": "Ergebnisse aus Schleifen-Iterationen sammeln", "fr": "Collecter les résultats des itérations"},
"parameters": [
{"name": "mode", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["collect", "concat", "sum", "count"]},
"description": {"en": "Aggregation mode", "de": "Aggregationsmodus", "fr": "Mode d'agrégation"}, "default": "collect"},
],
"inputs": 1,
"outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "AggregateResult"}},
"executor": "data",
"meta": {"icon": "mdi-playlist-plus", "color": "#607D8B"},
},
{
"id": "data.transform",
"category": "data",
"label": {"en": "Transform", "de": "Umwandeln", "fr": "Transformer"},
"description": {"en": "Map and restructure data", "de": "Daten umstrukturieren", "fr": "Restructurer les données"},
"parameters": [
{"name": "mappings", "type": "json", "required": True, "frontendType": "mappingTable",
"description": {"en": "Field mappings", "de": "Feld-Zuordnungen", "fr": "Correspondances"}, "default": []},
],
"inputs": 1,
"outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "ActionResult", "dynamic": True, "deriveFrom": "mappings"}},
"executor": "data",
"meta": {"icon": "mdi-swap-horizontal-bold", "color": "#607D8B"},
},
{
"id": "data.filter",
"category": "data",
"label": {"en": "Filter", "de": "Filtern", "fr": "Filtrer"},
"description": {"en": "Filter items by condition", "de": "Elemente nach Bedingung filtern", "fr": "Filtrer par condition"},
"parameters": [
{"name": "condition", "type": "string", "required": True, "frontendType": "filterExpression",
"description": {"en": "Filter condition", "de": "Filterbedingung", "fr": "Condition de filtre"}},
],
"inputs": 1,
"outputs": 1,
"inputPorts": {0: {"accepts": ["AggregateResult", "FileList", "TaskList", "EmailList", "DocumentList"]}},
"outputPorts": {0: {"schema": "Transit"}},
"executor": "data",
"meta": {"icon": "mdi-filter-outline", "color": "#607D8B"},
},
]

View file

@ -1,70 +1,92 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# Email node definitions - map to methodOutlook actions. # Email node definitions - map to methodOutlook actions.
# Use connectionId from user connections (like AI workspace sources).
EMAIL_NODES = [ EMAIL_NODES = [
{ {
"id": "email.checkEmail", "id": "email.checkEmail",
"category": "email", "category": "email",
"label": {"en": "Check Email", "de": "E-Mail prüfen", "fr": "Vérifier email"}, "label": {"en": "Check Email", "de": "E-Mail prüfen", "fr": "Vérifier email"},
"description": {"en": "Check for new emails (general or from specific account)", "de": "Neue E-Mails prüfen", "fr": "Vérifier les nouveaux emails"}, "description": {"en": "Check for new emails", "de": "Neue E-Mails prüfen", "fr": "Vérifier les nouveaux emails"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "folder", "type": "string", "required": False, "description": {"en": "Folder (e.g. Inbox)", "de": "Ordner (z.B. Posteingang)", "fr": "Dossier (ex. Boîte de réception)"}, "default": "Inbox"}, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
{"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to fetch", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100}, {"name": "folder", "type": "string", "required": False, "frontendType": "text",
{"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""}, "description": {"en": "Folder (e.g. Inbox)", "de": "Ordner", "fr": "Dossier"}, "default": "Inbox"},
{"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""}, {"name": "limit", "type": "number", "required": False, "frontendType": "number",
{"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False}, "description": {"en": "Max emails to fetch", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100},
{"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw filter (overrides above if set)", "de": "Erweitert: Filter-Text (überschreibt obige)", "fr": "Avancé: filtre brut"}, "default": ""}, {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Only emails from this address", "de": "Nur von dieser Adresse", "fr": "Seulement de cette adresse"}, "default": ""},
{"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Subject must contain", "de": "Betreff muss enthalten", "fr": "Le sujet doit contenir"}, "default": ""},
{"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": {"en": "Only with attachments", "de": "Nur mit Anhängen", "fr": "Avec pièces jointes"}, "default": False},
{"name": "filter", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Advanced: raw filter", "de": "Erweitert: Filter-Text", "fr": "Avancé: filtre brut"}, "default": ""},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "EmailList"}},
"meta": {"icon": "mdi-email-check", "color": "#1976D2"}, "meta": {"icon": "mdi-email-check", "color": "#1976D2"},
"_method": "outlook", "_method": "outlook",
"_action": "readEmails", "_action": "readEmails",
"_paramMap": {"connectionId": "connectionReference", "folder": "folder", "limit": "limit", "filter": "filter"},
}, },
{ {
"id": "email.searchEmail", "id": "email.searchEmail",
"category": "email", "category": "email",
"label": {"en": "Search Email", "de": "E-Mail suchen", "fr": "Rechercher email"}, "label": {"en": "Search Email", "de": "E-Mail suchen", "fr": "Rechercher email"},
"description": {"en": "Search or find emails", "de": "E-Mails suchen oder finden", "fr": "Rechercher des emails"}, "description": {"en": "Search or find emails", "de": "E-Mails suchen", "fr": "Rechercher des emails"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "query", "type": "string", "required": False, "description": {"en": "General search term (searches subject, body, from)", "de": "Suchbegriff (durchsucht Betreff, Inhalt, Absender)", "fr": "Terme de recherche (sujet, corps, expéditeur)"}, "default": ""}, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
{"name": "folder", "type": "string", "required": False, "description": {"en": "Folder to search", "de": "Ordner zum Suchen", "fr": "Dossier à rechercher"}, "default": "Inbox"}, {"name": "query", "type": "string", "required": False, "frontendType": "text",
{"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to return", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100}, "description": {"en": "Search term", "de": "Suchbegriff", "fr": "Terme de recherche"}, "default": ""},
{"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""}, {"name": "folder", "type": "string", "required": False, "frontendType": "text",
{"name": "toAddress", "type": "string", "required": False, "description": {"en": "Only emails to this recipient", "de": "Nur E-Mails an diesen Empfänger", "fr": "Seulement les e-mails à ce destinataire"}, "default": ""}, "description": {"en": "Folder to search", "de": "Ordner", "fr": "Dossier"}, "default": "Inbox"},
{"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""}, {"name": "limit", "type": "number", "required": False, "frontendType": "number",
{"name": "bodyContains", "type": "string", "required": False, "description": {"en": "Body/content must contain this text", "de": "Inhalt muss diesen Text enthalten", "fr": "Le corps doit contenir ce texte"}, "default": ""}, "description": {"en": "Max emails", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100},
{"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False}, {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
{"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw KQL (overrides above if set)", "de": "Erweitert: KQL-Filter (überschreibt obige)", "fr": "Avancé: filtre KQL brut"}, "default": ""}, "description": {"en": "From address", "de": "Von Adresse", "fr": "De l'adresse"}, "default": ""},
{"name": "toAddress", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "To address", "de": "An Adresse", "fr": "À l'adresse"}, "default": ""},
{"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Subject contains", "de": "Betreff enthält", "fr": "Sujet contient"}, "default": ""},
{"name": "bodyContains", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Body contains", "de": "Inhalt enthält", "fr": "Corps contient"}, "default": ""},
{"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": {"en": "With attachments", "de": "Mit Anhängen", "fr": "Avec pièces jointes"}, "default": False},
{"name": "filter", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Advanced: raw KQL", "de": "Erweitert: KQL-Filter", "fr": "Avancé: filtre KQL"}, "default": ""},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "EmailList"}},
"meta": {"icon": "mdi-email-search", "color": "#1976D2"}, "meta": {"icon": "mdi-email-search", "color": "#1976D2"},
"_method": "outlook", "_method": "outlook",
"_action": "searchEmails", "_action": "searchEmails",
"_paramMap": {"connectionId": "connectionReference", "query": "query", "folder": "folder", "limit": "limit", "filter": "filter"},
}, },
{ {
"id": "email.draftEmail", "id": "email.draftEmail",
"category": "email", "category": "email",
"label": {"en": "Draft Email", "de": "E-Mail entwerfen", "fr": "Brouillon email"}, "label": {"en": "Draft Email", "de": "E-Mail entwerfen", "fr": "Brouillon email"},
"description": {"en": "Create a draft email", "de": "E-Mail-Entwurf erstellen", "fr": "Créer un brouillon d'email"}, "description": {"en": "Create a draft email", "de": "E-Mail-Entwurf erstellen", "fr": "Créer un brouillon"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "subject", "type": "string", "required": True, "description": {"en": "Email subject", "de": "E-Mail-Betreff", "fr": "Sujet"}}, "description": {"en": "Email account", "de": "E-Mail-Konto", "fr": "Compte email"}},
{"name": "body", "type": "string", "required": True, "description": {"en": "Email body", "de": "E-Mail-Text", "fr": "Corps de l'email"}}, {"name": "subject", "type": "string", "required": True, "frontendType": "text",
{"name": "to", "type": "string", "required": False, "description": {"en": "Recipient(s)", "de": "Empfänger", "fr": "Destinataire(s)"}, "default": ""}, "description": {"en": "Subject", "de": "Betreff", "fr": "Sujet"}},
{"name": "body", "type": "string", "required": True, "frontendType": "textarea",
"description": {"en": "Body", "de": "Inhalt", "fr": "Corps"}},
{"name": "to", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Recipient(s)", "de": "Empfänger", "fr": "Destinataire(s)"}, "default": ""},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["EmailDraft", "AiResult", "Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-email-edit", "color": "#1976D2"}, "meta": {"icon": "mdi-email-edit", "color": "#1976D2"},
"_method": "outlook", "_method": "outlook",
"_action": "composeAndDraftEmailWithContext", "_action": "composeAndDraftEmailWithContext",
"_paramMap": {"connectionId": "connectionReference", "to": "to"},
"_contextFrom": ["subject", "body"],
}, },
] ]

View file

@ -7,54 +7,31 @@ FILE_NODES = [
"category": "file", "category": "file",
"label": {"en": "Create File", "de": "Datei erstellen", "fr": "Créer fichier"}, "label": {"en": "Create File", "de": "Datei erstellen", "fr": "Créer fichier"},
"description": { "description": {
"en": "Create a file from context (text/markdown from AI). Configurable format and style.", "en": "Create a file from context (text/markdown from AI).",
"de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI). Format und Stil konfigurierbar.", "de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI).",
"fr": "Crée un fichier à partir du contexte. Format et style configurables.", "fr": "Crée un fichier à partir du contexte.",
}, },
"parameters": [ "parameters": [
{ {"name": "contentSources", "type": "json", "required": False, "frontendType": "json",
"name": "contentSources", "description": {"en": "Context source refs", "de": "Kontext-Quellen", "fr": "Sources de contexte"}, "default": []},
"type": "json", {"name": "outputFormat", "type": "string", "required": True, "frontendType": "select",
"required": False, "frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]},
"description": { "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "docx"},
"en": "Array of context refs (e.g. AI, form). Concatenated in order. Empty = from connected node.", {"name": "title", "type": "string", "required": False, "frontendType": "text",
"de": "Liste von Kontext-Quellen (z.B. KI, Formular). Werden nacheinander zusammengefügt. Leer = vom verbundenen Node.", "description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"}},
"fr": "Liste de sources de contexte. Concaténées dans l'ordre. Vide = du noeud connecté.", {"name": "templateName", "type": "string", "required": False, "frontendType": "select",
}, "frontendOptions": {"options": ["default", "corporate", "minimal"]},
"default": [], "description": {"en": "Style preset", "de": "Stil-Vorlage", "fr": "Prését style"}},
}, {"name": "language", "type": "string", "required": False, "frontendType": "select",
{ "frontendOptions": {"options": ["de", "en", "fr"]},
"name": "outputFormat", "description": {"en": "Language", "de": "Sprache", "fr": "Langue"}, "default": "de"},
"type": "string",
"required": True,
"description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"},
"default": "docx",
},
{
"name": "title",
"type": "string",
"required": False,
"description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"},
},
{
"name": "templateName",
"type": "string",
"required": False,
"description": {"en": "Style preset: default, corporate, minimal", "de": "Stil-Vorlage", "fr": "Prését style"},
},
{
"name": "language",
"type": "string",
"required": False,
"description": {"en": "Language code (de, en, fr)", "de": "Sprachcode", "fr": "Code langue"},
"default": "de",
},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"}, "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"},
"_method": "file", "_method": "file",
"_action": "create", "_action": "create",
"_paramMap": {},
}, },
] ]

View file

@ -8,11 +8,19 @@ FLOW_NODES = [
"label": {"en": "If / Else", "de": "Wenn / Sonst", "fr": "Si / Sinon"}, "label": {"en": "If / Else", "de": "Wenn / Sonst", "fr": "Si / Sinon"},
"description": {"en": "Branch based on condition", "de": "Verzweigung nach Bedingung", "fr": "Branche selon condition"}, "description": {"en": "Branch based on condition", "de": "Verzweigung nach Bedingung", "fr": "Branche selon condition"},
"parameters": [ "parameters": [
{"name": "condition", "type": "string", "required": True, "description": {"en": "Expression to evaluate (e.g. {{value}} > 0)", "de": "Bedingung", "fr": "Condition"}}, {
"name": "condition",
"type": "string",
"required": True,
"frontendType": "condition",
"description": {"en": "Condition to evaluate", "de": "Bedingung", "fr": "Condition"},
},
], ],
"inputs": 1, "inputs": 1,
"outputs": 2, "outputs": 2,
"outputLabels": {"en": ["Yes", "No"], "de": ["Ja", "Nein"], "fr": ["Oui", "Non"]}, "outputLabels": {"en": ["Yes", "No"], "de": ["Ja", "Nein"], "fr": ["Oui", "Non"]},
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "Transit"}, 1: {"schema": "Transit"}},
"executor": "flow", "executor": "flow",
"meta": {"icon": "mdi-source-branch", "color": "#FF9800"}, "meta": {"icon": "mdi-source-branch", "color": "#FF9800"},
}, },
@ -22,11 +30,25 @@ FLOW_NODES = [
"label": {"en": "Switch", "de": "Switch", "fr": "Switch"}, "label": {"en": "Switch", "de": "Switch", "fr": "Switch"},
"description": {"en": "Multiple branches based on value", "de": "Mehrere Zweige nach Wert", "fr": "Branches multiples selon valeur"}, "description": {"en": "Multiple branches based on value", "de": "Mehrere Zweige nach Wert", "fr": "Branches multiples selon valeur"},
"parameters": [ "parameters": [
{"name": "value", "type": "string", "required": True, "description": {"en": "Value to match", "de": "Zu vergleichender Wert", "fr": "Valeur à comparer"}}, {
{"name": "cases", "type": "array", "required": False, "description": {"en": "List of cases", "de": "Fälle", "fr": "Cas"}}, "name": "value",
"type": "string",
"required": True,
"frontendType": "text",
"description": {"en": "Value to match", "de": "Zu vergleichender Wert", "fr": "Valeur à comparer"},
},
{
"name": "cases",
"type": "array",
"required": False,
"frontendType": "caseList",
"description": {"en": "List of cases", "de": "Fälle", "fr": "Cas"},
},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "Transit"}},
"executor": "flow", "executor": "flow",
"meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"}, "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"},
}, },
@ -36,11 +58,42 @@ FLOW_NODES = [
"label": {"en": "Loop / For Each", "de": "Schleife / Für Jedes", "fr": "Boucle / Pour Chaque"}, "label": {"en": "Loop / For Each", "de": "Schleife / Für Jedes", "fr": "Boucle / Pour Chaque"},
"description": {"en": "Iterate over array items", "de": "Über Array-Elemente iterieren", "fr": "Itérer sur les éléments"}, "description": {"en": "Iterate over array items", "de": "Über Array-Elemente iterieren", "fr": "Itérer sur les éléments"},
"parameters": [ "parameters": [
{"name": "items", "type": "string", "required": True, "description": {"en": "Path to array (e.g. {{input.items}})", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"}}, {
"name": "items",
"type": "string",
"required": True,
"frontendType": "text",
"description": {"en": "Path to array (e.g. {{input.items}})", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"},
},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "LoopItem"}},
"executor": "flow", "executor": "flow",
"meta": {"icon": "mdi-repeat", "color": "#FF9800"}, "meta": {"icon": "mdi-repeat", "color": "#FF9800"},
}, },
{
"id": "flow.merge",
"category": "flow",
"label": {"en": "Merge", "de": "Zusammenführen", "fr": "Fusionner"},
"description": {"en": "Merge multiple branches", "de": "Mehrere Zweige zusammenführen", "fr": "Fusionner plusieurs branches"},
"parameters": [
{
"name": "mode",
"type": "string",
"required": False,
"frontendType": "select",
"frontendOptions": {"options": ["first", "all", "append"]},
"description": {"en": "Merge mode", "de": "Zusammenführungsmodus", "fr": "Mode de fusion"},
"default": "first",
},
],
"inputs": 2,
"outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}, 1: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "MergeResult"}},
"executor": "flow",
"meta": {"icon": "mdi-call-merge", "color": "#FF9800"},
},
] ]

View file

@ -12,9 +12,10 @@ INPUT_NODES = [
"name": "fields", "name": "fields",
"type": "json", "type": "json",
"required": True, "required": True,
"frontendType": "fieldBuilder",
"description": { "description": {
"en": "Form fields: [{name, type, label, required, options?}]. type may include clickup_tasks with clickupConnectionId + clickupListId for a ClickUp task dropdown (value {add, rem}).", "en": "Form fields: [{name, type, label, required, options?}]",
"de": "Formularfelder. type: u. a. clickup_tasks mit clickupConnectionId und clickupListId für ClickUp-Aufgaben-Dropdown (Wert wie Relationship-Feld).", "de": "Formularfelder",
"fr": "Champs du formulaire", "fr": "Champs du formulaire",
}, },
"default": [], "default": [],
@ -22,6 +23,8 @@ INPUT_NODES = [
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "fields"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-form-textbox", "color": "#9C27B0"}, "meta": {"icon": "mdi-form-textbox", "color": "#9C27B0"},
}, },
@ -31,12 +34,18 @@ INPUT_NODES = [
"label": {"en": "Approval", "de": "Genehmigung", "fr": "Approbation"}, "label": {"en": "Approval", "de": "Genehmigung", "fr": "Approbation"},
"description": {"en": "User approves or rejects", "de": "Benutzer genehmigt oder lehnt ab", "fr": "L'utilisateur approuve ou rejette"}, "description": {"en": "User approves or rejects", "de": "Benutzer genehmigt oder lehnt ab", "fr": "L'utilisateur approuve ou rejette"},
"parameters": [ "parameters": [
{"name": "title", "type": "string", "required": True, "description": {"en": "Approval title", "de": "Genehmigungstitel", "fr": "Titre"}}, {"name": "title", "type": "string", "required": True, "frontendType": "text",
{"name": "description", "type": "string", "required": False, "description": {"en": "What to approve", "de": "Was genehmigt werden soll", "fr": "Ce qu'il faut approuver"}}, "description": {"en": "Approval title", "de": "Genehmigungstitel", "fr": "Titre"}},
{"name": "approvalType", "type": "string", "required": False, "description": {"en": "Type: document or generic", "de": "Typ: document oder generic", "fr": "Type: document ou generic"}, "default": "generic"}, {"name": "description", "type": "string", "required": False, "frontendType": "textarea",
"description": {"en": "What to approve", "de": "Was genehmigt werden soll", "fr": "Ce qu'il faut approuver"}},
{"name": "approvalType", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["generic", "document"]},
"description": {"en": "Type: document or generic", "de": "Typ: document oder generic", "fr": "Type"}, "default": "generic"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "BoolResult"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-check-decagram", "color": "#4CAF50"}, "meta": {"icon": "mdi-check-decagram", "color": "#4CAF50"},
}, },
@ -46,13 +55,20 @@ INPUT_NODES = [
"label": {"en": "Upload", "de": "Upload", "fr": "Téléversement"}, "label": {"en": "Upload", "de": "Upload", "fr": "Téléversement"},
"description": {"en": "User uploads file(s)", "de": "Benutzer lädt Datei(en) hoch", "fr": "L'utilisateur téléverse des fichiers"}, "description": {"en": "User uploads file(s)", "de": "Benutzer lädt Datei(en) hoch", "fr": "L'utilisateur téléverse des fichiers"},
"parameters": [ "parameters": [
{"name": "accept", "type": "string", "required": False, "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String für Dateiauswahl", "fr": "Chaîne accept"}, "default": ""}, {"name": "accept", "type": "string", "required": False, "frontendType": "text",
{"name": "allowedTypes", "type": "json", "required": False, "description": {"en": "Selected file types (from UI multi-select)", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []}, "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String", "fr": "Chaîne accept"}, "default": ""},
{"name": "maxSize", "type": "number", "required": False, "description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10}, {"name": "allowedTypes", "type": "json", "required": False, "frontendType": "multiselect",
{"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False}, "frontendOptions": {"options": ["pdf", "docx", "xlsx", "pptx", "txt", "csv", "jpg", "png", "gif"]},
"description": {"en": "Selected file types", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []},
{"name": "maxSize", "type": "number", "required": False, "frontendType": "number",
"description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10},
{"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-upload", "color": "#2196F3"}, "meta": {"icon": "mdi-upload", "color": "#2196F3"},
}, },
@ -62,11 +78,15 @@ INPUT_NODES = [
"label": {"en": "Comment", "de": "Kommentar", "fr": "Commentaire"}, "label": {"en": "Comment", "de": "Kommentar", "fr": "Commentaire"},
"description": {"en": "User adds a comment", "de": "Benutzer fügt einen Kommentar hinzu", "fr": "L'utilisateur ajoute un commentaire"}, "description": {"en": "User adds a comment", "de": "Benutzer fügt einen Kommentar hinzu", "fr": "L'utilisateur ajoute un commentaire"},
"parameters": [ "parameters": [
{"name": "placeholder", "type": "string", "required": False, "description": {"en": "Placeholder text", "de": "Platzhalter", "fr": "Texte indicatif"}, "default": ""}, {"name": "placeholder", "type": "string", "required": False, "frontendType": "text",
{"name": "required", "type": "boolean", "required": False, "description": {"en": "Comment required", "de": "Kommentar erforderlich", "fr": "Commentaire requis"}, "default": True}, "description": {"en": "Placeholder text", "de": "Platzhalter", "fr": "Texte indicatif"}, "default": ""},
{"name": "required", "type": "boolean", "required": False, "frontendType": "checkbox",
"description": {"en": "Comment required", "de": "Kommentar erforderlich", "fr": "Commentaire requis"}, "default": True},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "TextResult"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-comment-text", "color": "#FF9800"}, "meta": {"icon": "mdi-comment-text", "color": "#FF9800"},
}, },
@ -76,11 +96,16 @@ INPUT_NODES = [
"label": {"en": "Review", "de": "Prüfung", "fr": "Revue"}, "label": {"en": "Review", "de": "Prüfung", "fr": "Revue"},
"description": {"en": "User reviews content", "de": "Benutzer prüft Inhalt", "fr": "L'utilisateur révise le contenu"}, "description": {"en": "User reviews content", "de": "Benutzer prüft Inhalt", "fr": "L'utilisateur révise le contenu"},
"parameters": [ "parameters": [
{"name": "contentRef", "type": "string", "required": True, "description": {"en": "Reference to content (e.g. {{nodeId.field}})", "de": "Referenz auf Inhalt", "fr": "Référence au contenu"}}, {"name": "contentRef", "type": "string", "required": True, "frontendType": "text",
{"name": "reviewType", "type": "string", "required": False, "description": {"en": "Type of review", "de": "Art der Prüfung", "fr": "Type de revue"}, "default": "generic"}, "description": {"en": "Reference to content", "de": "Referenz auf Inhalt", "fr": "Référence au contenu"}},
{"name": "reviewType", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["generic", "document"]},
"description": {"en": "Type of review", "de": "Art der Prüfung", "fr": "Type de revue"}, "default": "generic"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "BoolResult"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-magnify-scan", "color": "#673AB7"}, "meta": {"icon": "mdi-magnify-scan", "color": "#673AB7"},
}, },
@ -90,17 +115,15 @@ INPUT_NODES = [
"label": {"en": "Selection", "de": "Auswahl", "fr": "Sélection"}, "label": {"en": "Selection", "de": "Auswahl", "fr": "Sélection"},
"description": {"en": "User selects from options", "de": "Benutzer wählt aus Optionen", "fr": "L'utilisateur choisit parmi les options"}, "description": {"en": "User selects from options", "de": "Benutzer wählt aus Optionen", "fr": "L'utilisateur choisit parmi les options"},
"parameters": [ "parameters": [
{ {"name": "options", "type": "json", "required": True, "frontendType": "keyValueRows",
"name": "options", "description": {"en": "Options: [{value, label}]", "de": "Optionen", "fr": "Options"}, "default": []},
"type": "json", {"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox",
"required": True, "description": {"en": "Allow multiple selection", "de": "Mehrfachauswahl erlauben", "fr": "Sélection multiple"}, "default": False},
"description": {"en": "Options: [{value, label}]", "de": "Optionen", "fr": "Options"},
"default": [],
},
{"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple selection", "de": "Mehrfachauswahl erlauben", "fr": "Sélection multiple"}, "default": False},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "TextResult"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-format-list-checks", "color": "#009688"}, "meta": {"icon": "mdi-format-list-checks", "color": "#009688"},
}, },
@ -110,12 +133,17 @@ INPUT_NODES = [
"label": {"en": "Confirmation", "de": "Bestätigung", "fr": "Confirmation"}, "label": {"en": "Confirmation", "de": "Bestätigung", "fr": "Confirmation"},
"description": {"en": "User confirms yes/no", "de": "Benutzer bestätigt Ja/Nein", "fr": "L'utilisateur confirme oui/non"}, "description": {"en": "User confirms yes/no", "de": "Benutzer bestätigt Ja/Nein", "fr": "L'utilisateur confirme oui/non"},
"parameters": [ "parameters": [
{"name": "question", "type": "string", "required": True, "description": {"en": "Question to confirm", "de": "Zu bestätigende Frage", "fr": "Question à confirmer"}}, {"name": "question", "type": "string", "required": True, "frontendType": "text",
{"name": "confirmLabel", "type": "string", "required": False, "description": {"en": "Label for confirm button", "de": "Label für Bestätigen-Button", "fr": "Libellé du bouton confirmer"}, "default": "Confirm"}, "description": {"en": "Question to confirm", "de": "Zu bestätigende Frage", "fr": "Question à confirmer"}},
{"name": "rejectLabel", "type": "string", "required": False, "description": {"en": "Label for reject button", "de": "Label für Ablehnen-Button", "fr": "Libellé du bouton refuser"}, "default": "Reject"}, {"name": "confirmLabel", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Label for confirm button", "de": "Label für Bestätigen-Button", "fr": "Libellé confirmer"}, "default": "Confirm"},
{"name": "rejectLabel", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Label for reject button", "de": "Label für Ablehnen-Button", "fr": "Libellé refuser"}, "default": "Reject"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "BoolResult"}},
"executor": "input", "executor": "input",
"meta": {"icon": "mdi-checkbox-marked-circle", "color": "#8BC34A"}, "meta": {"icon": "mdi-checkbox-marked-circle", "color": "#8BC34A"},
}, },

View file

@ -1,6 +1,5 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# SharePoint node definitions - map to methodSharepoint actions. # SharePoint node definitions - map to methodSharepoint actions.
# Use connectionId and path from connection selector (like workflow folder view).
SHAREPOINT_NODES = [ SHAREPOINT_NODES = [
{ {
@ -9,17 +8,22 @@ SHAREPOINT_NODES = [
"label": {"en": "Find File", "de": "Datei finden", "fr": "Trouver fichier"}, "label": {"en": "Find File", "de": "Datei finden", "fr": "Trouver fichier"},
"description": {"en": "Find file by path or search", "de": "Datei nach Pfad oder Suche finden", "fr": "Trouver fichier par chemin ou recherche"}, "description": {"en": "Find file by path or search", "de": "Datei nach Pfad oder Suche finden", "fr": "Trouver fichier par chemin ou recherche"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "searchQuery", "type": "string", "required": True, "description": {"en": "Search query or path", "de": "Suchanfrage oder Pfad", "fr": "Requête ou chemin"}}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "site", "type": "string", "required": False, "description": {"en": "Optional site hint", "de": "Optionaler Site-Hinweis", "fr": "Indication de site"}, "default": ""}, {"name": "searchQuery", "type": "string", "required": True, "frontendType": "text",
{"name": "maxResults", "type": "number", "required": False, "description": {"en": "Max results", "de": "Max Ergebnisse", "fr": "Max résultats"}, "default": 1000}, "description": {"en": "Search query or path", "de": "Suchanfrage oder Pfad", "fr": "Requête ou chemin"}},
{"name": "site", "type": "string", "required": False, "frontendType": "text",
"description": {"en": "Optional site hint", "de": "Optionaler Site-Hinweis", "fr": "Indication de site"}, "default": ""},
{"name": "maxResults", "type": "number", "required": False, "frontendType": "number",
"description": {"en": "Max results", "de": "Max Ergebnisse", "fr": "Max résultats"}, "default": 1000},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "FileList"}},
"meta": {"icon": "mdi-file-search", "color": "#0078D4"}, "meta": {"icon": "mdi-file-search", "color": "#0078D4"},
"_method": "sharepoint", "_method": "sharepoint",
"_action": "findDocumentPath", "_action": "findDocumentPath",
"_paramMap": {"connectionId": "connectionReference", "searchQuery": "searchQuery", "site": "site", "maxResults": "maxResults"},
}, },
{ {
"id": "sharepoint.readFile", "id": "sharepoint.readFile",
@ -27,15 +31,19 @@ SHAREPOINT_NODES = [
"label": {"en": "Read File", "de": "Datei lesen", "fr": "Lire fichier"}, "label": {"en": "Read File", "de": "Datei lesen", "fr": "Lire fichier"},
"description": {"en": "Extract content from file", "de": "Inhalt aus Datei extrahieren", "fr": "Extraire le contenu du fichier"}, "description": {"en": "Extract content from file", "de": "Inhalt aus Datei extrahieren", "fr": "Extraire le contenu du fichier"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "path", "type": "string", "required": True, "description": {"en": "File path or documentList from find file", "de": "Dateipfad oder documentList von Find", "fr": "Chemin ou documentList"}}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "File path", "de": "Dateipfad", "fr": "Chemin"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-document", "color": "#0078D4"}, "meta": {"icon": "mdi-file-document", "color": "#0078D4"},
"_method": "sharepoint", "_method": "sharepoint",
"_action": "readDocuments", "_action": "readDocuments",
"_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
}, },
{ {
"id": "sharepoint.uploadFile", "id": "sharepoint.uploadFile",
@ -43,47 +51,59 @@ SHAREPOINT_NODES = [
"label": {"en": "Upload File", "de": "Datei hochladen", "fr": "Téléverser fichier"}, "label": {"en": "Upload File", "de": "Datei hochladen", "fr": "Téléverser fichier"},
"description": {"en": "Upload file to SharePoint", "de": "Datei zu SharePoint hochladen", "fr": "Téléverser fichier vers SharePoint"}, "description": {"en": "Upload file to SharePoint", "de": "Datei zu SharePoint hochladen", "fr": "Téléverser fichier vers SharePoint"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "path", "type": "string", "required": True, "description": {"en": "Target folder path (e.g. /sites/.../Folder)", "de": "Zielordner-Pfad", "fr": "Chemin du dossier cible"}}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "Target folder path", "de": "Zielordner-Pfad", "fr": "Chemin du dossier cible"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-upload", "color": "#0078D4"}, "meta": {"icon": "mdi-upload", "color": "#0078D4"},
"_method": "sharepoint", "_method": "sharepoint",
"_action": "uploadFile", "_action": "uploadFile",
"_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
}, },
{ {
"id": "sharepoint.listFiles", "id": "sharepoint.listFiles",
"category": "sharepoint", "category": "sharepoint",
"label": {"en": "List Files", "de": "Dateien auflisten", "fr": "Lister fichiers"}, "label": {"en": "List Files", "de": "Dateien auflisten", "fr": "Lister fichiers"},
"description": {"en": "List files in folder or SharePoint", "de": "Dateien in Ordner oder SharePoint auflisten", "fr": "Lister les fichiers dans un dossier"}, "description": {"en": "List files in folder", "de": "Dateien in Ordner auflisten", "fr": "Lister les fichiers"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "path", "type": "string", "required": False, "description": {"en": "Folder path (e.g. /sites/SiteName/Shared Documents)", "de": "Ordnerpfad", "fr": "Chemin du dossier"}, "default": "/"}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "Folder path", "de": "Ordnerpfad", "fr": "Chemin du dossier"}, "default": "/"},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "FileList"}},
"meta": {"icon": "mdi-folder-open", "color": "#0078D4"}, "meta": {"icon": "mdi-folder-open", "color": "#0078D4"},
"_method": "sharepoint", "_method": "sharepoint",
"_action": "listDocuments", "_action": "listDocuments",
"_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
}, },
{ {
"id": "sharepoint.downloadFile", "id": "sharepoint.downloadFile",
"category": "sharepoint", "category": "sharepoint",
"label": {"en": "Download File", "de": "Datei herunterladen", "fr": "Télécharger fichier"}, "label": {"en": "Download File", "de": "Datei herunterladen", "fr": "Télécharger fichier"},
"description": {"en": "Download file from path (e.g. /sites/SiteName/Shared Documents/file.pdf)", "de": "Datei vom Pfad herunterladen", "fr": "Télécharger le fichier"}, "description": {"en": "Download file from path", "de": "Datei vom Pfad herunterladen", "fr": "Télécharger le fichier"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "path", "type": "string", "required": True, "description": {"en": "Full file path (e.g. /sites/SiteName/Shared Documents/file.pdf)", "de": "Vollständiger Dateipfad", "fr": "Chemin complet du fichier"}}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "Full file path", "de": "Vollständiger Dateipfad", "fr": "Chemin complet du fichier"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-download", "color": "#0078D4"}, "meta": {"icon": "mdi-download", "color": "#0078D4"},
"_method": "sharepoint", "_method": "sharepoint",
"_action": "downloadFileByPath", "_action": "downloadFileByPath",
"_paramMap": {"connectionId": "connectionReference", "path": "pathQuery", "siteId": "siteId", "filePath": "filePath"},
}, },
{ {
"id": "sharepoint.copyFile", "id": "sharepoint.copyFile",
@ -91,15 +111,21 @@ SHAREPOINT_NODES = [
"label": {"en": "Copy File", "de": "Datei kopieren", "fr": "Copier fichier"}, "label": {"en": "Copy File", "de": "Datei kopieren", "fr": "Copier fichier"},
"description": {"en": "Copy file to destination", "de": "Datei an Ziel kopieren", "fr": "Copier le fichier"}, "description": {"en": "Copy file to destination", "de": "Datei an Ziel kopieren", "fr": "Copier le fichier"},
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
{"name": "sourcePath", "type": "string", "required": True, "description": {"en": "Source file path (from browse)", "de": "Quelldatei-Pfad", "fr": "Chemin fichier source"}}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "destPath", "type": "string", "required": True, "description": {"en": "Destination folder path (from browse)", "de": "Zielordner-Pfad", "fr": "Chemin dossier cible"}}, {"name": "sourcePath", "type": "string", "required": True, "frontendType": "sharepointFile",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "Source file path", "de": "Quelldatei-Pfad", "fr": "Chemin fichier source"}},
{"name": "destPath", "type": "string", "required": True, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "Destination folder", "de": "Zielordner", "fr": "Dossier cible"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-content-copy", "color": "#0078D4"}, "meta": {"icon": "mdi-content-copy", "color": "#0078D4"},
"_method": "sharepoint", "_method": "sharepoint",
"_action": "copyFile", "_action": "copyFile",
"_paramMap": {"connectionId": "connectionReference", "sourcePath": "sourcePath", "destPath": "destPath"},
}, },
] ]

View file

@ -14,6 +14,8 @@ TRIGGER_NODES = [
"parameters": [], "parameters": [],
"inputs": 0, "inputs": 0,
"outputs": 1, "outputs": 1,
"inputPorts": {},
"outputPorts": {0: {"schema": "ActionResult"}},
"executor": "trigger", "executor": "trigger",
"meta": {"icon": "mdi-play", "color": "#4CAF50"}, "meta": {"icon": "mdi-play", "color": "#4CAF50"},
}, },
@ -31,11 +33,14 @@ TRIGGER_NODES = [
"name": "formFields", "name": "formFields",
"type": "json", "type": "json",
"required": False, "required": False,
"frontendType": "fieldBuilder",
"description": {"en": "Field definitions", "de": "Felddefinitionen", "fr": "Définitions"}, "description": {"en": "Field definitions", "de": "Felddefinitionen", "fr": "Définitions"},
}, },
], ],
"inputs": 0, "inputs": 0,
"outputs": 1, "outputs": 1,
"inputPorts": {},
"outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "formFields"}},
"executor": "trigger", "executor": "trigger",
"meta": {"icon": "mdi-form-select", "color": "#9C27B0"}, "meta": {"icon": "mdi-form-select", "color": "#9C27B0"},
}, },
@ -53,11 +58,14 @@ TRIGGER_NODES = [
"name": "cron", "name": "cron",
"type": "string", "type": "string",
"required": False, "required": False,
"frontendType": "cron",
"description": {"en": "Cron expression", "de": "Cron-Ausdruck", "fr": "Expression cron"}, "description": {"en": "Cron expression", "de": "Cron-Ausdruck", "fr": "Expression cron"},
}, },
], ],
"inputs": 0, "inputs": 0,
"outputs": 1, "outputs": 1,
"inputPorts": {},
"outputPorts": {0: {"schema": "ActionResult"}},
"executor": "trigger", "executor": "trigger",
"meta": {"icon": "mdi-clock", "color": "#2196F3"}, "meta": {"icon": "mdi-clock", "color": "#2196F3"},
}, },

View file

@ -1,6 +1,5 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# Trustee node definitions - map to methodTrustee actions. # Trustee node definitions - map to methodTrustee actions.
# Pipeline: extractFromFiles -> processDocuments -> syncToAccounting.
TRUSTEE_NODES = [ TRUSTEE_NODES = [
{ {
@ -8,83 +7,100 @@ TRUSTEE_NODES = [
"category": "trustee", "category": "trustee",
"label": {"en": "Refresh Accounting Data", "de": "Buchhaltungsdaten aktualisieren", "fr": "Actualiser données comptables"}, "label": {"en": "Refresh Accounting Data", "de": "Buchhaltungsdaten aktualisieren", "fr": "Actualiser données comptables"},
"description": { "description": {
"en": "Import/refresh accounting data from external system (e.g. Abacus). Skips import if data is fresh unless forceRefresh is set.", "en": "Import/refresh accounting data from external system (e.g. Abacus).",
"de": "Buchhaltungsdaten aus externem System importieren/aktualisieren (z.B. Abacus). Überspringt Import wenn Daten frisch sind, ausser forceRefresh ist gesetzt.", "de": "Buchhaltungsdaten aus externem System importieren/aktualisieren.",
"fr": "Importer/actualiser les données comptables depuis le système externe (ex. Abacus).", "fr": "Importer/actualiser les données comptables.",
}, },
"parameters": [ "parameters": [
{"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
{"name": "forceRefresh", "type": "boolean", "required": False, "description": {"en": "Force re-import even if data is fresh (default: false)", "de": "Import erzwingen auch wenn Daten frisch sind", "fr": "Forcer la réimportation"}, "default": False}, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}},
{"name": "dateFrom", "type": "string", "required": False, "description": {"en": "Start date filter (YYYY-MM-DD)", "de": "Startdatum-Filter (JJJJ-MM-TT)", "fr": "Date début (AAAA-MM-JJ)"}, "default": ""}, {"name": "forceRefresh", "type": "boolean", "required": False, "frontendType": "checkbox",
{"name": "dateTo", "type": "string", "required": False, "description": {"en": "End date filter (YYYY-MM-DD)", "de": "Enddatum-Filter (JJJJ-MM-TT)", "fr": "Date fin (AAAA-MM-JJ)"}, "default": ""}, "description": {"en": "Force re-import", "de": "Import erzwingen", "fr": "Forcer la réimportation"}, "default": False},
{"name": "dateFrom", "type": "string", "required": False, "frontendType": "date",
"description": {"en": "Start date (YYYY-MM-DD)", "de": "Startdatum", "fr": "Date début"}, "default": ""},
{"name": "dateTo", "type": "string", "required": False, "frontendType": "date",
"description": {"en": "End date (YYYY-MM-DD)", "de": "Enddatum", "fr": "Date fin"}, "default": ""},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-database-refresh", "color": "#4CAF50"}, "meta": {"icon": "mdi-database-refresh", "color": "#4CAF50"},
"_method": "trustee", "_method": "trustee",
"_action": "refreshAccountingData", "_action": "refreshAccountingData",
"_paramMap": {"featureInstanceId": "featureInstanceId", "forceRefresh": "forceRefresh", "dateFrom": "dateFrom", "dateTo": "dateTo"},
}, },
{ {
"id": "trustee.extractFromFiles", "id": "trustee.extractFromFiles",
"category": "trustee", "category": "trustee",
"label": {"en": "Extract Documents", "de": "Dokumente extrahieren", "fr": "Extraire documents"}, "label": {"en": "Extract Documents", "de": "Dokumente extrahieren", "fr": "Extraire documents"},
"description": { "description": {
"en": "Extract document type and data from PDF/JPG via AI (from fileIds or SharePoint folder)", "en": "Extract document type and data from PDF/JPG via AI.",
"de": "Dokumenttyp und Daten aus PDF/JPG per AI extrahieren (aus Dateien oder SharePoint-Ordner)", "de": "Dokumenttyp und Daten aus PDF/JPG per AI extrahieren.",
"fr": "Extraire type et données de PDF/JPG par IA", "fr": "Extraire type et données de PDF/JPG par IA.",
}, },
"parameters": [ "parameters": [
{"name": "connectionId", "type": "string", "required": False, "description": {"en": "SharePoint connection (if reading from SharePoint)", "de": "SharePoint-Verbindung (falls aus SharePoint)", "fr": "Connexion SharePoint"}, "default": ""}, {"name": "connectionReference", "type": "string", "required": False, "frontendType": "userConnection",
{"name": "sharepointFolder", "type": "string", "required": False, "description": {"en": "SharePoint folder path (e.g. /sites/MySite/Documents/Expenses)", "de": "SharePoint-Ordnerpfad", "fr": "Chemin dossier SharePoint"}, "default": ""}, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}, "default": ""},
{"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, {"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder",
{"name": "prompt", "type": "string", "required": False, "description": {"en": "AI prompt for extraction (optional)", "de": "AI-Prompt für Extraktion (optional)", "fr": "Prompt IA pour extraction"}, "default": ""}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": {"en": "SharePoint folder path", "de": "SharePoint-Ordnerpfad", "fr": "Chemin dossier SharePoint"}, "default": ""},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}},
{"name": "prompt", "type": "string", "required": False, "frontendType": "textarea",
"description": {"en": "AI prompt for extraction", "de": "AI-Prompt für Extraktion", "fr": "Prompt IA"}, "default": ""},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50"}, "meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50"},
"_method": "trustee", "_method": "trustee",
"_action": "extractFromFiles", "_action": "extractFromFiles",
"_paramMap": {"connectionId": "connectionReference", "sharepointFolder": "sharepointFolder", "featureInstanceId": "featureInstanceId", "prompt": "prompt"},
}, },
{ {
"id": "trustee.processDocuments", "id": "trustee.processDocuments",
"category": "trustee", "category": "trustee",
"label": {"en": "Process Documents", "de": "Dokumente verarbeiten", "fr": "Traiter documents"}, "label": {"en": "Process Documents", "de": "Dokumente verarbeiten", "fr": "Traiter documents"},
"description": { "description": {
"en": "Create TrusteeDocument + TrusteePosition from extraction result", "en": "Create TrusteeDocument + TrusteePosition from extraction result.",
"de": "TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen", "de": "TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen.",
"fr": "Créer TrusteeDocument + TrusteePosition à partir du résultat", "fr": "Créer TrusteeDocument + TrusteePosition.",
}, },
"parameters": [ "parameters": [
{"name": "documentList", "type": "string", "required": True, "description": {"en": "Reference to extractFromFiles result", "de": "Referenz auf extractFromFiles-Ergebnis", "fr": "Référence au résultat extractFromFiles"}}, {"name": "documentList", "type": "string", "required": True, "frontendType": "text",
{"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, "description": {"en": "Reference to extraction result", "de": "Referenz auf Ergebnis", "fr": "Référence au résultat"}},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-file-document-check", "color": "#4CAF50"}, "meta": {"icon": "mdi-file-document-check", "color": "#4CAF50"},
"_method": "trustee", "_method": "trustee",
"_action": "processDocuments", "_action": "processDocuments",
"_paramMap": {"documentList": "documentList", "featureInstanceId": "featureInstanceId"},
}, },
{ {
"id": "trustee.syncToAccounting", "id": "trustee.syncToAccounting",
"category": "trustee", "category": "trustee",
"label": {"en": "Sync to Accounting", "de": "In Buchhaltung synchronisieren", "fr": "Synchroniser comptabilité"}, "label": {"en": "Sync to Accounting", "de": "In Buchhaltung synchronisieren", "fr": "Synchroniser comptabilité"},
"description": { "description": {
"en": "Push trustee positions to accounting system", "en": "Push trustee positions to accounting system.",
"de": "Trustee-Positionen in Buchhaltungssystem übertragen", "de": "Trustee-Positionen in Buchhaltungssystem übertragen.",
"fr": "Transférer les positions vers la comptabilité", "fr": "Transférer les positions vers la comptabilité.",
}, },
"parameters": [ "parameters": [
{"name": "documentList", "type": "string", "required": True, "description": {"en": "Reference to processDocuments result", "de": "Referenz auf processDocuments-Ergebnis", "fr": "Référence au résultat processDocuments"}}, {"name": "documentList", "type": "string", "required": True, "frontendType": "text",
{"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, "description": {"en": "Reference to processed documents", "de": "Referenz auf Ergebnis", "fr": "Référence au résultat"}},
{"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
"description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}},
"outputPorts": {0: {"schema": "ActionResult"}},
"meta": {"icon": "mdi-calculator", "color": "#4CAF50"}, "meta": {"icon": "mdi-calculator", "color": "#4CAF50"},
"_method": "trustee", "_method": "trustee",
"_action": "syncToAccounting", "_action": "syncToAccounting",
"_paramMap": {"documentList": "documentList", "featureInstanceId": "featureInstanceId"},
}, },
] ]

View file

@ -9,6 +9,7 @@ import logging
from typing import Dict, List, Any from typing import Dict, List, Any
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -25,10 +26,9 @@ def getNodeTypes(
def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
"""Apply language to label/description/parameters.""" """Apply language to label/description/parameters. Keep inputPorts/outputPorts."""
lang = language if language in ("en", "de", "fr") else "en" lang = language if language in ("en", "de", "fr") else "en"
out = dict(node) out = dict(node)
# Strip internal keys for API response
for key in list(out.keys()): for key in list(out.keys()):
if key.startswith("_"): if key.startswith("_"):
del out[key] del out[key]
@ -56,7 +56,7 @@ def getNodeTypesForApi(
language: str = "en", language: str = "en",
) -> Dict[str, Any]: ) -> Dict[str, Any]:
""" """
API-ready response: nodeTypes with localized strings, plus categories list. API-ready response: nodeTypes with localized strings, plus categories, portTypeCatalog, systemVariables.
""" """
nodes = getNodeTypes(services, language) nodes = getNodeTypes(services, language)
localized = [_localizeNode(n, language) for n in nodes] localized = [_localizeNode(n, language) for n in nodes]
@ -72,7 +72,20 @@ def getNodeTypesForApi(
{"id": "clickup", "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}}, {"id": "clickup", "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}},
{"id": "trustee", "label": {"en": "Trustee", "de": "Treuhand", "fr": "Fiduciaire"}}, {"id": "trustee", "label": {"en": "Trustee", "de": "Treuhand", "fr": "Fiduciaire"}},
] ]
return {"nodeTypes": localized, "categories": categories}
catalogSerialized = {}
for name, schema in PORT_TYPE_CATALOG.items():
catalogSerialized[name] = {
"name": schema.name,
"fields": [f.model_dump() for f in schema.fields],
}
return {
"nodeTypes": localized,
"categories": categories,
"portTypeCatalog": catalogSerialized,
"systemVariables": SYSTEM_VARIABLES,
}
def getNodeTypeToMethodAction() -> Dict[str, tuple]: def getNodeTypeToMethodAction() -> Dict[str, tuple]:

View file

@ -0,0 +1,504 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Typed Port System for the Graphical Editor.
Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES,
output normalizers, input extractors, and Transit helpers.
"""
import logging
import time
import uuid
from typing import Any, Callable, Dict, List, Optional
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Pydantic models
# ---------------------------------------------------------------------------
class PortField(BaseModel):
name: str
type: str # str, int, bool, List[str], List[Document], Dict[str,Any]
description: Dict[str, str] = {} # {en, de, fr}
required: bool = True
class PortSchema(BaseModel):
name: str # e.g. "EmailDraft", "AiResult", "Transit"
fields: List[PortField]
class InputPortDef(BaseModel):
accepts: List[str] # list of accepted schema names
class OutputPortDef(BaseModel):
model_config = {"populate_by_name": True}
schema_: str = Field(alias="schema")
dynamic: bool = False
deriveFrom: Optional[str] = None
def model_dump(self, **kw):
d = super().model_dump(**kw)
d["schema"] = d.pop("schema_", d.get("schema"))
return d
# ---------------------------------------------------------------------------
# PORT_TYPE_CATALOG
# ---------------------------------------------------------------------------
PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
"DocumentList": PortSchema(name="DocumentList", fields=[
PortField(name="documents", type="List[Document]",
description={"en": "List of documents", "de": "Dokumentenliste", "fr": "Liste de documents"}),
]),
"FileList": PortSchema(name="FileList", fields=[
PortField(name="files", type="List[File]",
description={"en": "List of files", "de": "Dateiliste", "fr": "Liste de fichiers"}),
]),
"EmailDraft": PortSchema(name="EmailDraft", fields=[
PortField(name="subject", type="str",
description={"en": "Subject", "de": "Betreff", "fr": "Sujet"}),
PortField(name="body", type="str",
description={"en": "Body", "de": "Inhalt", "fr": "Corps"}),
PortField(name="to", type="List[str]",
description={"en": "Recipients", "de": "Empfänger", "fr": "Destinataires"}),
PortField(name="cc", type="List[str]", required=False,
description={"en": "CC", "de": "CC", "fr": "CC"}),
PortField(name="attachments", type="List[Document]", required=False,
description={"en": "Attachments", "de": "Anhänge", "fr": "Pièces jointes"}),
]),
"EmailList": PortSchema(name="EmailList", fields=[
PortField(name="emails", type="List[Email]",
description={"en": "Emails", "de": "E-Mails", "fr": "Emails"}),
]),
"TaskList": PortSchema(name="TaskList", fields=[
PortField(name="tasks", type="List[Task]",
description={"en": "Tasks", "de": "Aufgaben", "fr": "Tâches"}),
]),
"TaskResult": PortSchema(name="TaskResult", fields=[
PortField(name="success", type="bool",
description={"en": "Success", "de": "Erfolg", "fr": "Succès"}),
PortField(name="taskId", type="str",
description={"en": "Task ID", "de": "Aufgaben-ID", "fr": "ID tâche"}),
PortField(name="task", type="Dict",
description={"en": "Task data", "de": "Aufgabendaten", "fr": "Données tâche"}),
]),
"FormPayload": PortSchema(name="FormPayload", fields=[
PortField(name="payload", type="Dict[str,Any]",
description={"en": "Form data", "de": "Formulardaten", "fr": "Données formulaire"}),
]),
"AiResult": PortSchema(name="AiResult", fields=[
PortField(name="prompt", type="str",
description={"en": "Prompt", "de": "Prompt", "fr": "Invite"}),
PortField(name="response", type="str",
description={"en": "Response text", "de": "Antworttext", "fr": "Texte réponse"}),
PortField(name="responseData", type="Dict", required=False,
description={"en": "Structured response", "de": "Strukturierte Antwort", "fr": "Réponse structurée"}),
PortField(name="context", type="str",
description={"en": "Context", "de": "Kontext", "fr": "Contexte"}),
PortField(name="documents", type="List[Document]",
description={"en": "Documents", "de": "Dokumente", "fr": "Documents"}),
]),
"BoolResult": PortSchema(name="BoolResult", fields=[
PortField(name="result", type="bool",
description={"en": "Result", "de": "Ergebnis", "fr": "Résultat"}),
PortField(name="reason", type="str", required=False,
description={"en": "Reason", "de": "Begründung", "fr": "Raison"}),
]),
"TextResult": PortSchema(name="TextResult", fields=[
PortField(name="text", type="str",
description={"en": "Text", "de": "Text", "fr": "Texte"}),
]),
"LoopItem": PortSchema(name="LoopItem", fields=[
PortField(name="currentItem", type="Any",
description={"en": "Current item", "de": "Aktuelles Element", "fr": "Élément courant"}),
PortField(name="currentIndex", type="int",
description={"en": "Current index", "de": "Aktueller Index", "fr": "Index courant"}),
PortField(name="items", type="List[Any]",
description={"en": "All items", "de": "Alle Elemente", "fr": "Tous les éléments"}),
PortField(name="count", type="int",
description={"en": "Total count", "de": "Gesamtanzahl", "fr": "Nombre total"}),
]),
"AggregateResult": PortSchema(name="AggregateResult", fields=[
PortField(name="items", type="List[Any]",
description={"en": "Collected items", "de": "Gesammelte Elemente", "fr": "Éléments collectés"}),
PortField(name="count", type="int",
description={"en": "Count", "de": "Anzahl", "fr": "Nombre"}),
]),
"MergeResult": PortSchema(name="MergeResult", fields=[
PortField(name="inputs", type="Dict[int,Any]",
description={"en": "Inputs by port", "de": "Eingaben nach Port", "fr": "Entrées par port"}),
PortField(name="first", type="Any",
description={"en": "First available", "de": "Erstes verfügbares", "fr": "Premier disponible"}),
PortField(name="merged", type="Dict",
description={"en": "Merged data", "de": "Zusammengeführte Daten", "fr": "Données fusionnées"}),
]),
"ActionResult": PortSchema(name="ActionResult", fields=[
PortField(name="success", type="bool",
description={"en": "Success", "de": "Erfolg", "fr": "Succès"}),
PortField(name="error", type="str", required=False,
description={"en": "Error", "de": "Fehler", "fr": "Erreur"}),
PortField(name="data", type="Dict", required=False,
description={"en": "Result data", "de": "Ergebnisdaten", "fr": "Données résultat"}),
]),
"Transit": PortSchema(name="Transit", fields=[]),
}
# ---------------------------------------------------------------------------
# SYSTEM_VARIABLES
# ---------------------------------------------------------------------------
SYSTEM_VARIABLES: Dict[str, Dict[str, str]] = {
"system.timestamp": {"type": "int", "description": "Unix timestamp (ms)"},
"system.date": {"type": "str", "description": "ISO date (YYYY-MM-DD)"},
"system.datetime": {"type": "str", "description": "ISO datetime"},
"system.time": {"type": "str", "description": "HH:MM:SS"},
"system.userId": {"type": "str", "description": "Current user ID"},
"system.userName": {"type": "str", "description": "Current user name"},
"system.userEmail": {"type": "str", "description": "Current user email"},
"system.workflowId": {"type": "str", "description": "Workflow ID"},
"system.runId": {"type": "str", "description": "Run ID"},
"system.instanceId": {"type": "str", "description": "Feature instance ID"},
"system.mandateId": {"type": "str", "description": "Mandate ID"},
"system.loopIndex": {"type": "int", "description": "Current loop index (only in loop)"},
"system.loopCount": {"type": "int", "description": "Loop item count (only in loop)"},
"system.uuid": {"type": "str", "description": "Random UUID"},
}
def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any:
"""Resolve a system variable name to its runtime value."""
from datetime import datetime, timezone
now = datetime.now(timezone.utc)
mapping = {
"system.timestamp": lambda: int(now.timestamp() * 1000),
"system.date": lambda: now.strftime("%Y-%m-%d"),
"system.datetime": lambda: now.isoformat(),
"system.time": lambda: now.strftime("%H:%M:%S"),
"system.userId": lambda: context.get("userId", ""),
"system.userName": lambda: context.get("userName", ""),
"system.userEmail": lambda: context.get("userEmail", ""),
"system.workflowId": lambda: context.get("workflowId", ""),
"system.runId": lambda: context.get("_runId", ""),
"system.instanceId": lambda: context.get("instanceId", ""),
"system.mandateId": lambda: context.get("mandateId", ""),
"system.loopIndex": lambda: (context.get("_loopState") or {}).get("currentIndex", -1),
"system.loopCount": lambda: len((context.get("_loopState") or {}).get("items", [])),
"system.uuid": lambda: str(uuid.uuid4()),
}
resolver = mapping.get(variable)
if resolver:
return resolver()
logger.warning("Unknown system variable: %s", variable)
return None
# ---------------------------------------------------------------------------
# Output normalizers
# ---------------------------------------------------------------------------
def _normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
"""
Normalize raw executor output to match the declared port schema.
Ensures _success/_error meta-fields are always present.
"""
if not isinstance(raw, dict):
raw = {"value": raw} if raw is not None else {}
result = dict(raw)
result.setdefault("_success", not bool(raw.get("error")))
result.setdefault("_error", raw.get("error"))
schema = PORT_TYPE_CATALOG.get(schemaName)
if not schema or schemaName == "Transit":
return result
for field in schema.fields:
if field.name not in result:
result[field.name] = _defaultForType(field.type)
return result
def _defaultForType(typeStr: str) -> Any:
"""Return a sensible default for a type string."""
if typeStr.startswith("List"):
return []
if typeStr.startswith("Dict"):
return {}
if typeStr == "bool":
return False
if typeStr == "int":
return 0
if typeStr == "str":
return ""
return None
def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]:
"""Build an error envelope matching the schema with _success=False."""
result = {"_success": False, "_error": str(error)}
schema = PORT_TYPE_CATALOG.get(schemaName)
if schema:
for field in schema.fields:
result.setdefault(field.name, _defaultForType(field.type))
return result
# ---------------------------------------------------------------------------
# Input extractors (one per input port type)
# ---------------------------------------------------------------------------
def _extractEmailDraft(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract EmailDraft fields from upstream output."""
result = {}
if upstream.get("responseData") and isinstance(upstream["responseData"], dict):
rd = upstream["responseData"]
for key in ("subject", "body", "to", "cc"):
if key in rd:
result[key] = rd[key]
if not result:
for key in ("subject", "body", "to", "cc"):
if key in upstream:
result[key] = upstream[key]
return result
def _extractDocuments(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract documents from upstream output."""
docs = upstream.get("documents") or upstream.get("documentList") or []
if not docs and isinstance(upstream.get("data"), dict):
docs = upstream["data"].get("documents") or upstream["data"].get("documentList") or []
# input.upload format
if not docs:
files = upstream.get("files") or []
fileObj = upstream.get("file")
fileIds = upstream.get("fileIds") or []
if fileObj:
docs = [fileObj]
elif files:
docs = files
elif fileIds:
docs = [{"validationMetadata": {"fileId": fid}} for fid in fileIds]
return {"documents": docs if isinstance(docs, list) else [docs]} if docs else {}
def _extractText(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract text from upstream output."""
text = upstream.get("text") or upstream.get("response") or upstream.get("context") or ""
if not text and upstream.get("payload"):
import json
payload = upstream["payload"]
text = json.dumps(payload, ensure_ascii=False) if isinstance(payload, dict) else str(payload)
return {"text": str(text)} if text else {}
def _extractEmailList(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract email list from upstream output."""
emails = upstream.get("emails") or []
if not emails:
docs = upstream.get("documents") or upstream.get("documentList") or []
if docs:
import json
for doc in docs:
raw = doc.get("documentData") if isinstance(doc, dict) else None
if raw:
try:
data = json.loads(raw) if isinstance(raw, str) else raw
if isinstance(data, dict):
found = (data.get("emails", {}).get("emails", [])
or data.get("searchResults", {}).get("results", []))
if found:
emails = found
break
except (json.JSONDecodeError, TypeError):
pass
return {"emails": emails} if emails else {}
def _extractTaskList(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract task list from upstream output."""
tasks = upstream.get("tasks") or []
if not tasks:
docs = upstream.get("documents") or upstream.get("documentList") or []
if docs:
import json
for doc in docs:
raw = doc.get("documentData") if isinstance(doc, dict) else None
if raw:
try:
data = json.loads(raw) if isinstance(raw, str) else raw
if isinstance(data, dict) and "tasks" in data:
tasks = data["tasks"]
break
except (json.JSONDecodeError, TypeError):
pass
return {"tasks": tasks} if tasks else {}
def _extractFileList(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract file list from upstream output."""
files = upstream.get("files") or []
return {"files": files} if files else {}
def _extractFormPayload(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract form payload from upstream output."""
payload = upstream.get("payload")
if payload and isinstance(payload, dict):
return {"payload": payload}
return {}
def _extractAiResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract AI result fields from upstream output."""
result = {}
for key in ("prompt", "response", "responseData", "context", "documents"):
if key in upstream:
result[key] = upstream[key]
return result
def _extractBoolResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract bool result from upstream output."""
result = upstream.get("result")
if isinstance(result, bool):
return {"result": result, "reason": upstream.get("reason", "")}
approved = upstream.get("approved")
if isinstance(approved, bool):
return {"result": approved, "reason": upstream.get("reason", "")}
return {}
def _extractTaskResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract task result from upstream output."""
result = {}
if "taskId" in upstream:
result["taskId"] = upstream["taskId"]
if "task" in upstream:
result["task"] = upstream["task"]
elif "clickupTask" in upstream:
result["task"] = upstream["clickupTask"]
if "success" in upstream:
result["success"] = upstream["success"]
return result
def _extractAggregateResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract aggregate result from upstream output."""
items = upstream.get("items") or []
return {"items": items, "count": len(items)}
def _extractMergeResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
"""Extract merge result from upstream output."""
return {
"inputs": upstream.get("inputs", {}),
"first": upstream.get("first"),
"merged": upstream.get("merged", {}),
}
INPUT_EXTRACTORS: Dict[str, Callable] = {
"EmailDraft": _extractEmailDraft,
"DocumentList": _extractDocuments,
"TextResult": _extractText,
"EmailList": _extractEmailList,
"TaskList": _extractTaskList,
"FileList": _extractFileList,
"FormPayload": _extractFormPayload,
"AiResult": _extractAiResult,
"BoolResult": _extractBoolResult,
"TaskResult": _extractTaskResult,
"AggregateResult": _extractAggregateResult,
"MergeResult": _extractMergeResult,
}
# ---------------------------------------------------------------------------
# Transit helpers
# ---------------------------------------------------------------------------
def _wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]:
"""Wrap data in a Transit envelope."""
return {"_transit": True, "_meta": meta, "data": data}
def _unwrapTransit(output: Any) -> Any:
"""Unwrap a Transit envelope, returning the inner data."""
if isinstance(output, dict) and output.get("_transit"):
return output.get("data")
return output
def _resolveTransitChain(
nodeId: str,
nodeOutputs: Dict[str, Any],
connectionMap: Dict[str, list],
) -> Any:
"""
Follow _transit chain backwards until a real (non-transit) producer is found.
Returns the unwrapped output of the real producer.
"""
visited = set()
current = nodeId
while current and current not in visited:
visited.add(current)
out = nodeOutputs.get(current)
if not isinstance(out, dict) or not out.get("_transit"):
return out
sources = connectionMap.get(current, [])
if not sources:
return _unwrapTransit(out)
srcId = sources[0][0] if sources else None
if not srcId:
return _unwrapTransit(out)
current = srcId
return nodeOutputs.get(nodeId)
# ---------------------------------------------------------------------------
# Schema derivation for dynamic outputs
# ---------------------------------------------------------------------------
def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
"""Derive output schema from form field definitions."""
fields_param = (node.get("parameters") or {}).get("fields")
if not fields_param or not isinstance(fields_param, list):
return None
portFields = []
for f in fields_param:
if isinstance(f, dict) and f.get("name"):
portFields.append(PortField(
name=f["name"],
type=f.get("type", "str"),
description=f.get("label", {}) if isinstance(f.get("label"), dict) else {"en": str(f.get("label", f["name"]))},
required=f.get("required", False),
))
return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None
def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
"""Derive output schema from transform mappings."""
mappings = (node.get("parameters") or {}).get("mappings")
if not mappings or not isinstance(mappings, list):
return None
portFields = []
for m in mappings:
if isinstance(m, dict) and m.get("outputField"):
portFields.append(PortField(
name=m["outputField"],
type=m.get("type", "str"),
description={"en": m.get("label", m["outputField"])},
))
return PortSchema(name="Transform_dynamic", fields=portFields) if portFields else None

View file

@ -1642,6 +1642,22 @@ def get_import_status(
return counts return counts
# ===== AI Data Cache =====
@router.post("/{instanceId}/accounting/clear-cache")
@limiter.limit("10/minute")
def clear_ai_data_cache(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
context: RequestContext = Depends(getRequestContext),
) -> Dict[str, Any]:
"""Clear the AI feature-data query cache for this instance so the next AI query reads fresh DB data."""
_validateInstanceAccess(instanceId, context)
from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
removed = clearFeatureQueryCache(instanceId)
return {"cleared": removed, "featureInstanceId": instanceId}
# ===== Position-Document Query ===== # ===== Position-Document Query =====
@router.get("/{instanceId}/positions/document/{documentId}", response_model=List[TrusteePosition]) @router.get("/{instanceId}/positions/document/{documentId}", response_model=List[TrusteePosition])

View file

@ -356,8 +356,6 @@ def _workspaceMessageToClientDict(msg: Any) -> Dict[str, Any]:
raw = dict(msg) raw = dict(msg)
elif hasattr(msg, "model_dump"): elif hasattr(msg, "model_dump"):
raw = msg.model_dump() raw = msg.model_dump()
elif hasattr(msg, "dict"):
raw = msg.dict()
else: else:
raw = { raw = {
"id": getattr(msg, "id", None), "id": getattr(msg, "id", None),
@ -378,8 +376,6 @@ def _workspaceMessageToClientDict(msg: Any) -> Dict[str, Any]:
serialized_docs.append(doc) serialized_docs.append(doc)
elif hasattr(doc, "model_dump"): elif hasattr(doc, "model_dump"):
serialized_docs.append(doc.model_dump()) serialized_docs.append(doc.model_dump())
elif hasattr(doc, "dict"):
serialized_docs.append(doc.dict())
else: else:
serialized_docs.append({ serialized_docs.append({
"id": getattr(doc, "id", ""), "id": getattr(doc, "id", ""),

View file

@ -563,6 +563,46 @@ class AppObjects:
logger.error(f"Error getting user by ID: {str(e)}") logger.error(f"Error getting user by ID: {str(e)}")
return None return None
def getUsersByIds(self, userIds: list[str]) -> dict[str, User]:
"""Batch-load users by IDs in a single SQL query (id = ANY(...)).
Returns {userId: User} dict. Skips IDs not found or not accessible."""
if not userIds:
return {}
try:
uniqueIds = list(set(userIds))
records = self.db.getRecordset(UserInDB, recordFilter={"id": uniqueIds})
result: dict[str, User] = {}
for rec in (records or []):
cleaned = dict(rec)
if cleaned.get("roleLabels") is None:
cleaned["roleLabels"] = []
uid = cleaned.get("id")
if uid:
result[uid] = User(**cleaned)
return result
except Exception as e:
logger.error(f"Error batch-loading users: {e}")
return {}
def getMandatesByIds(self, mandateIds: list[str]) -> dict[str, Mandate]:
"""Batch-load mandates by IDs in a single SQL query (id = ANY(...)).
Returns {mandateId: Mandate} dict."""
if not mandateIds:
return {}
try:
uniqueIds = list(set(mandateIds))
records = self.db.getRecordset(Mandate, recordFilter={"id": uniqueIds})
result: dict[str, Mandate] = {}
for rec in (records or []):
cleaned = dict(rec)
mid = cleaned.get("id")
if mid:
result[mid] = Mandate(**cleaned)
return result
except Exception as e:
logger.error(f"Error batch-loading mandates: {e}")
return {}
def _getUserForAuthentication(self, username: str) -> Optional[Dict[str, Any]]: def _getUserForAuthentication(self, username: str) -> Optional[Dict[str, Any]]:
""" """
Get user record by username for authentication purposes. Get user record by username for authentication purposes.

View file

@ -1416,6 +1416,355 @@ class BillingObjects:
return balances return balances
@staticmethod
def _mapPaginationColumns(pagination: PaginationParams) -> PaginationParams:
"""Remap frontend column names to DB column names in filters and sort."""
_COL_MAP = {"createdAt": "sysCreatedAt"}
_ENRICHED_COLS = {"mandateName", "userName", "mandateId", "userId"}
import copy
p = copy.deepcopy(pagination)
if p.filters:
mapped = {}
for k, v in p.filters.items():
if k in _ENRICHED_COLS:
continue
mapped[_COL_MAP.get(k, k)] = v
p.filters = mapped
if p.sort:
mapped = []
for s in p.sort:
field = s.get("field", "") if isinstance(s, dict) else getattr(s, "field", "")
if field in _ENRICHED_COLS:
continue
newField = _COL_MAP.get(field, field)
if isinstance(s, dict):
mapped.append({**s, "field": newField})
else:
mapped.append({"field": newField, "direction": getattr(s, "direction", "asc")})
p.sort = mapped if mapped else [{"field": "sysCreatedAt", "direction": "desc"}]
return p
def getTransactionsForMandatesPaginated(
self,
mandateIds: Optional[List[str]],
pagination: PaginationParams,
scope: str = "all",
userId: Optional[str] = None,
) -> PaginatedResult:
"""
SQL-level paginated transactions across multiple mandates.
Single SQL query with WHERE accountId = ANY(...), ORDER BY, LIMIT/OFFSET.
Enrichment (userName, mandateName) only for the returned page.
"""
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
try:
mappedPagination = self._mapPaginationColumns(pagination)
allAccounts = self.db.getRecordset(BillingAccount)
if mandateIds:
allAccounts = [a for a in allAccounts if a.get("mandateId") in set(mandateIds)]
accountIds = [a.get("id") for a in allAccounts if a.get("id")]
if not accountIds:
return PaginatedResult(items=[], totalItems=0, totalPages=0)
recordFilter: Dict[str, Any] = {"accountId": accountIds}
if scope == "personal" and userId:
recordFilter["createdByUserId"] = userId
result = self.db.getRecordsetPaginated(
BillingTransaction,
pagination=mappedPagination,
recordFilter=recordFilter,
)
pageItems = result.get("items", []) if isinstance(result, dict) else result.items
accountMap = {a.get("id"): a for a in allAccounts}
pageUserIds = set()
pageMandateIds = set()
for t in pageItems:
accId = t.get("accountId")
acc = accountMap.get(accId, {})
mid = acc.get("mandateId")
uid = t.get("createdByUserId") or acc.get("userId")
if uid:
pageUserIds.add(uid)
if mid:
pageMandateIds.add(mid)
appInterface = getAppInterface(self.currentUser)
userMap: Dict[str, str] = {}
if pageUserIds:
users = appInterface.getUsersByIds(list(pageUserIds))
for uid, u in users.items():
dn = getattr(u, "displayName", None) or getattr(u, "username", None) or uid
userMap[uid] = dn
mandateMap: Dict[str, str] = {}
if pageMandateIds:
mandates = appInterface.getMandatesByIds(list(pageMandateIds))
for mid, m in mandates.items():
mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or mid
enriched = []
for t in pageItems:
row = dict(t)
accId = row.get("accountId")
acc = accountMap.get(accId, {})
mid = acc.get("mandateId")
txUserId = row.get("createdByUserId") or acc.get("userId")
row["mandateId"] = mid
row["mandateName"] = mandateMap.get(mid, "")
row["userId"] = txUserId
row["userName"] = userMap.get(txUserId, txUserId) if txUserId else None
enriched.append(row)
totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages
return PaginatedResult(items=enriched, totalItems=totalItems, totalPages=totalPages)
except Exception as e:
logger.error(f"Error in getTransactionsForMandatesPaginated: {e}")
return PaginatedResult(items=[], totalItems=0, totalPages=0)
def _buildScopeFilter(
self,
mandateIds: Optional[List[str]],
scope: str = "all",
userId: Optional[str] = None,
startTs: Optional[float] = None,
endTs: Optional[float] = None,
) -> tuple:
"""Build WHERE clause parts for scoped transaction queries. Returns (conditions, values, accountIds)."""
allAccounts = self.db.getRecordset(BillingAccount)
if mandateIds:
mandateSet = set(mandateIds)
allAccounts = [a for a in allAccounts if a.get("mandateId") in mandateSet]
accountIds = [a.get("id") for a in allAccounts if a.get("id")]
if not accountIds:
return [], [], [], allAccounts
conditions = ['"accountId" = ANY(%s)', '"transactionType" = %s']
values: list = [accountIds, "DEBIT"]
if scope == "personal" and userId:
conditions.append('"createdByUserId" = %s')
values.append(userId)
if startTs is not None:
conditions.append('"sysCreatedAt" >= %s')
values.append(startTs)
if endTs is not None:
conditions.append('"sysCreatedAt" < %s')
values.append(endTs)
return conditions, values, accountIds, allAccounts
def getTransactionStatisticsAggregated(
self,
mandateIds: Optional[List[str]],
scope: str = "all",
userId: Optional[str] = None,
startTs: Optional[float] = None,
endTs: Optional[float] = None,
period: str = "month",
) -> Dict[str, Any]:
"""
Pure SQL aggregation for statistics. No row-level loading.
Returns: totalCost, transactionCount, costByProvider, costByModel,
costByFeature, costByAccountId, timeSeries
"""
table = BillingTransaction.__name__
try:
if not self.db._ensureTableExists(BillingTransaction):
return self._emptyStats()
conditions, values, accountIds, allAccounts = self._buildScopeFilter(
mandateIds, scope, userId, startTs, endTs
)
if not accountIds:
return self._emptyStats()
whereClause = " WHERE " + " AND ".join(conditions)
self.db._ensure_connection()
result: Dict[str, Any] = {}
with self.db.connection.cursor() as cur:
# 1) Totals
cur.execute(
f'SELECT COALESCE(SUM("amount"), 0) AS total, COUNT(*) AS cnt FROM "{table}"{whereClause}',
values,
)
row = cur.fetchone()
result["totalCost"] = round(float(row["total"]), 4)
result["transactionCount"] = int(row["cnt"])
# 2) GROUP BY aicoreProvider
cur.execute(
f'SELECT COALESCE("aicoreProvider", \'unknown\') AS grp, SUM("amount") AS total '
f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC',
values,
)
result["costByProvider"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()}
# 3) GROUP BY aicoreModel
cur.execute(
f'SELECT COALESCE("aicoreModel", \'unknown\') AS grp, SUM("amount") AS total '
f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC',
values,
)
result["costByModel"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()}
# 4) GROUP BY accountId (will be enriched to mandateName by caller)
cur.execute(
f'SELECT "accountId" AS grp, SUM("amount") AS total '
f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC',
values,
)
result["costByAccountId"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()}
# 5) GROUP BY accountId + featureCode (for costByFeature)
cur.execute(
f'SELECT "accountId", COALESCE("featureCode", \'unknown\') AS fc, SUM("amount") AS total '
f'FROM "{table}"{whereClause} GROUP BY "accountId", fc ORDER BY total DESC',
values,
)
result["costByAccountFeature"] = [
{"accountId": r["accountId"], "featureCode": r["fc"], "total": round(float(r["total"]), 4)}
for r in cur.fetchall()
]
# 6) Time series via DATE_TRUNC on epoch timestamp
if period == "day":
truncExpr = "DATE_TRUNC('day', TO_TIMESTAMP(\"sysCreatedAt\"))"
else:
truncExpr = "DATE_TRUNC('month', TO_TIMESTAMP(\"sysCreatedAt\"))"
cur.execute(
f'SELECT {truncExpr} AS bucket, SUM("amount") AS total, COUNT(*) AS cnt '
f'FROM "{table}"{whereClause} AND "sysCreatedAt" IS NOT NULL '
f'GROUP BY bucket ORDER BY bucket',
values,
)
timeSeries = []
for r in cur.fetchall():
bucket = r["bucket"]
if period == "day":
label = bucket.strftime("%Y-%m-%d") if bucket else "unknown"
else:
label = bucket.strftime("%Y-%m") if bucket else "unknown"
timeSeries.append({
"date": label,
"cost": round(float(r["total"]), 4),
"count": int(r["cnt"]),
})
result["timeSeries"] = timeSeries
self.db.connection.commit()
result["_allAccounts"] = allAccounts
return result
except Exception as e:
logger.error(f"Error in getTransactionStatisticsAggregated: {e}", exc_info=True)
try:
self.db.connection.rollback()
except Exception:
pass
return self._emptyStats()
@staticmethod
def _emptyStats() -> Dict[str, Any]:
return {
"totalCost": 0.0,
"transactionCount": 0,
"costByProvider": {},
"costByModel": {},
"costByAccountId": {},
"costByAccountFeature": [],
"timeSeries": [],
"_allAccounts": [],
}
def getTransactionDistinctValues(
self,
mandateIds: Optional[List[str]],
column: str,
pagination: Optional[PaginationParams] = None,
scope: str = "all",
userId: Optional[str] = None,
) -> List[str]:
"""SQL DISTINCT for filter-values on BillingTransaction, scoped by mandates."""
_COLUMN_MAP = {
"createdAt": "sysCreatedAt",
"mandateId": "accountId",
"mandateName": "accountId",
}
dbColumn = _COLUMN_MAP.get(column, column)
mappedPagination = self._mapPaginationColumns(pagination) if pagination else None
try:
allAccounts = self.db.getRecordset(BillingAccount)
if mandateIds:
allAccounts = [a for a in allAccounts if a.get("mandateId") in set(mandateIds)]
accountIds = [a.get("id") for a in allAccounts if a.get("id")]
if not accountIds:
return []
recordFilter: Dict[str, Any] = {"accountId": accountIds}
if scope == "personal" and userId:
recordFilter["createdByUserId"] = userId
if column in ("mandateName", "userName"):
return self._getEnrichedDistinctValues(column, allAccounts, recordFilter, mappedPagination)
return self.db.getDistinctColumnValues(
BillingTransaction, dbColumn, mappedPagination, recordFilter
)
except Exception as e:
logger.error(f"Error in getTransactionDistinctValues({column}): {e}")
return []
def _getEnrichedDistinctValues(
self,
column: str,
allAccounts: List[Dict],
recordFilter: Dict[str, Any],
pagination: Optional[PaginationParams],
) -> List[str]:
"""Resolve enriched columns (mandateName, userName) via batch lookup."""
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
if column == "mandateName":
mandateIds = list({a.get("mandateId") for a in allAccounts if a.get("mandateId")})
appInterface = getAppInterface(self.currentUser)
mandates = appInterface.getMandatesByIds(mandateIds)
return sorted(
{getattr(m, "label", None) or getattr(m, "name", "") or mid for mid, m in mandates.items()},
key=lambda v: v.lower(),
)
if column == "userName":
dbCol = "createdByUserId"
values = self.db.getDistinctColumnValues(BillingTransaction, dbCol, pagination, recordFilter)
if not values:
return []
appInterface = getAppInterface(self.currentUser)
users = appInterface.getUsersByIds(values)
return sorted(
{getattr(u, "displayName", None) or getattr(u, "username", None) or uid for uid, u in users.items()},
key=lambda v: v.lower(),
)
return []
def getUserTransactionsForMandates(self, mandateIds: List[str] = None, limit: int = 100) -> List[Dict[str, Any]]: def getUserTransactionsForMandates(self, mandateIds: List[str] = None, limit: int = 100) -> List[Dict[str, Any]]:
""" """
Get all transactions for specified mandates. Get all transactions for specified mandates.

View file

@ -1161,7 +1161,7 @@ class ChatObjects:
data={ data={
"type": "message", "type": "message",
"createdAt": message_timestamp, "createdAt": message_timestamp,
"item": chat_message.dict() "item": chat_message.model_dump()
}, },
event_category="chat" event_category="chat"
)) ))
@ -1535,7 +1535,7 @@ class ChatObjects:
data={ data={
"type": "log", "type": "log",
"createdAt": log_timestamp, "createdAt": log_timestamp,
"item": ChatLog(**createdLog).dict() "item": ChatLog(**createdLog).model_dump()
}, },
event_category="chat" event_category="chat"
)) ))

View file

@ -187,6 +187,7 @@ class ComponentObjects:
try: try:
# Initialize standard prompts # Initialize standard prompts
self._initializeStandardPrompts() self._initializeStandardPrompts()
self._seedUiLanguageSetsIfEmpty()
# Add other record initializations here # Add other record initializations here
@ -196,6 +197,44 @@ class ComponentObjects:
# Don't raise the error, just log it # Don't raise the error, just log it
# This allows the interface to be created even if initialization fails # This allows the interface to be created even if initialization fails
def _seedUiLanguageSetsIfEmpty(self) -> None:
try:
import json
from pathlib import Path
from modules.datamodels.datamodelUiLanguage import UiLanguageSet
existing = self.db.getRecordset(UiLanguageSet)
if existing:
return
seedPath = (
Path(__file__).resolve().parent.parent
/ "migration"
/ "seedData"
/ "ui_language_seed.json"
)
if not seedPath.is_file():
logger.warning("ui_language_seed.json not found, skipping UI i18n seed")
return
payload = json.loads(seedPath.read_text(encoding="utf-8"))
now = getUtcTimestamp()
for row in payload:
rec = {
"id": row["id"],
"label": row["label"],
"keys": row.get("keys") or {},
"status": row.get("status") or "complete",
"isDefault": bool(row.get("isDefault", False)),
"sysCreatedAt": now,
"sysModifiedBy": None,
"sysCreatedBy": None,
"sysModifiedAt": now,
}
self.db.recordCreate(UiLanguageSet, rec)
logger.info("Seeded UiLanguageSet rows from ui_language_seed.json")
except Exception as e:
logger.error(f"UI i18n seed failed: {e}")
def _initializeStandardPrompts(self): def _initializeStandardPrompts(self):
"""Initializes standard prompts if they don't exist yet.""" """Initializes standard prompts if they don't exist yet."""
try: try:

File diff suppressed because it is too large Load diff

View file

@ -179,47 +179,6 @@ def _isMemberOfMandate(ctx: RequestContext, targetMandateId: str) -> bool:
return False return False
def _filterTransactionsByScope(transactions: list, scope: BillingDataScope) -> list:
"""
Filter a list of transaction dicts based on the user's BillingDataScope.
Rules:
- SysAdmin: no filter
- Mandate-Admin: all transactions in their admin mandates
- Feature-Instance-Admin: transactions for their admin feature instances
- Regular user: only transactions where createdByUserId/userId matches
"""
if scope.isGlobalAdmin:
return transactions
adminMandateSet = set(scope.adminMandateIds)
adminFiSet = set(scope.adminFeatureInstanceIds)
memberMandateSet = set(scope.memberMandateIds)
result = []
for t in transactions:
mandateId = t.get("mandateId")
fiId = t.get("featureInstanceId")
txUserId = t.get("createdByUserId") or t.get("userId")
# Mandate admin → sees all transactions in their mandate
if mandateId and mandateId in adminMandateSet:
result.append(t)
continue
# Feature instance admin → sees all transactions for their instances
if fiId and fiId in adminFiSet:
result.append(t)
continue
# Regular member → only own transactions
if mandateId and mandateId in memberMandateSet:
if txUserId and txUserId == scope.userId:
result.append(t)
continue
return result
# ============================================================================= # =============================================================================
# Request/Response Models # Request/Response Models
@ -1429,32 +1388,20 @@ def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]:
return result return result
def _buildTransactionsList(ctx: RequestContext, targetMandateId: str) -> List[Dict[str, Any]]: def _buildTransactionsList(ctx: RequestContext, targetMandateId: str, paginationParams: Optional[PaginationParams] = None) -> tuple:
"""Build the full enriched transactions list for a mandate.""" """Build enriched transactions for a mandate. Returns (items, paginatedResult|None)."""
billingInterface = getBillingInterface(ctx.user, targetMandateId) billingInterface = getBillingInterface(ctx.user, targetMandateId)
transactions = billingInterface.getTransactionsByMandate(targetMandateId, limit=5000)
result = [] if paginationParams:
for t in transactions: paginatedResult = billingInterface.getTransactionsByMandate(targetMandateId, pagination=paginationParams)
row = TransactionResponse( transactions = paginatedResult.items if hasattr(paginatedResult, 'items') else paginatedResult.get("items", [])
id=t.get("id"), else:
accountId=t.get("accountId"), defaultPagination = PaginationParams(page=1, pageSize=200, sort=[{"field": "sysCreatedAt", "direction": "desc"}])
transactionType=TransactionTypeEnum(t.get("transactionType", "DEBIT")), paginatedResult = billingInterface.getTransactionsByMandate(targetMandateId, pagination=defaultPagination)
amount=t.get("amount", 0.0), transactions = paginatedResult.items if hasattr(paginatedResult, 'items') else paginatedResult.get("items", [])
description=t.get("description", ""),
referenceType=ReferenceTypeEnum(t["referenceType"]) if t.get("referenceType") else None,
workflowId=t.get("workflowId"),
featureCode=t.get("featureCode"),
featureInstanceId=t.get("featureInstanceId"),
aicoreProvider=t.get("aicoreProvider"),
aicoreModel=t.get("aicoreModel"),
createdByUserId=t.get("createdByUserId"),
createdAt=t.get("sysCreatedAt")
)
result.append(row.model_dump())
_attachCreatedByUserNamesToTransactionRows(result) result = _enrichTransactionRows(transactions)
return result return result, paginatedResult
@router.get("/admin/transactions/{targetMandateId}") @router.get("/admin/transactions/{targetMandateId}")
@ -1463,7 +1410,6 @@ def getTransactionsAdmin(
request: Request, request: Request,
targetMandateId: str = Path(..., description="Mandate ID"), targetMandateId: str = Path(..., description="Mandate ID"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
limit: int = Query(default=100, ge=1, le=1000),
ctx: RequestContext = Depends(getRequestContext), ctx: RequestContext = Depends(getRequestContext),
): ):
"""Get all transactions for a mandate with pagination support.""" """Get all transactions for a mandate with pagination support."""
@ -1480,26 +1426,22 @@ def getTransactionsAdmin(
except (json.JSONDecodeError, ValueError) as e: except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
enriched, paginatedResult = _buildTransactionsList(ctx, targetMandateId, paginationParams)
totalItems = getattr(paginatedResult, 'totalItems', len(enriched)) if paginatedResult else len(enriched)
totalPages = getattr(paginatedResult, 'totalPages', 0) if paginatedResult else 0
paginationMeta = None
if paginationParams: if paginationParams:
# DB-level pagination — enrich only the returned page paginationMeta = PaginationMetadata(
billingInterface = getBillingInterface(ctx.user, targetMandateId)
result = billingInterface.getTransactionsByMandate(targetMandateId, pagination=paginationParams)
transactions = result.items if hasattr(result, 'items') else result
enrichedItems = _enrichTransactionRows(transactions)
return {
"items": enrichedItems,
"pagination": PaginationMetadata(
currentPage=paginationParams.page, currentPage=paginationParams.page,
pageSize=paginationParams.pageSize, pageSize=paginationParams.pageSize,
totalItems=result.totalItems if hasattr(result, 'totalItems') else len(enrichedItems), totalItems=totalItems,
totalPages=result.totalPages if hasattr(result, 'totalPages') else 0, totalPages=totalPages,
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters, filters=paginationParams.filters,
).model_dump(), ).model_dump()
}
enriched = _buildTransactionsList(ctx, targetMandateId) return {"items": enriched, "pagination": paginationMeta}
return {"items": enriched, "pagination": None}
except HTTPException: except HTTPException:
raise raise
@ -1535,16 +1477,15 @@ def getTransactionFilterValues(
except (json.JSONDecodeError, ValueError): except (json.JSONDecodeError, ValueError):
pass pass
# Try SQL DISTINCT for native DB columns; fallback to in-memory for enriched columns (e.g. userName)
try: try:
rootBillingInterface = _getRootInterface() billingInterface = getBillingInterface(ctx.user, targetMandateId)
recordFilter = {"mandateId": targetMandateId} return billingInterface.getTransactionDistinctValues(
values = rootBillingInterface.db.getDistinctColumnValues( mandateIds=[targetMandateId],
BillingTransaction, column, crossFilterParams, recordFilter column=column,
pagination=crossFilterParams,
) )
return sorted(values, key=lambda v: str(v).lower())
except Exception: except Exception:
enriched = _buildTransactionsList(ctx, targetMandateId) enriched, _ = _buildTransactionsList(ctx, targetMandateId)
crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams) crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams)
return _extractDistinctValues(crossFiltered, column) return _extractDistinctValues(crossFiltered, column)
except Exception as e: except Exception as e:
@ -1703,8 +1644,6 @@ def getUserViewStatistics(
- period='day': returns daily time series for the given month/year - period='day': returns daily time series for the given month/year
""" """
try: try:
from datetime import timedelta
if year is None: if year is None:
year = datetime.now().year year = datetime.now().year
@ -1713,10 +1652,8 @@ def getUserViewStatistics(
billingInterface = getBillingInterface(ctx.user, ctx.mandateId) billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
# Evaluate RBAC scope
rbacScope = _getBillingDataScope(ctx.user) rbacScope = _getBillingDataScope(ctx.user)
# Determine mandate IDs for data loading
if rbacScope.isGlobalAdmin: if rbacScope.isGlobalAdmin:
loadMandateIds = None loadMandateIds = None
else: else:
@ -1725,148 +1662,72 @@ def getUserViewStatistics(
logger.warning("No mandate IDs found for user") logger.warning("No mandate IDs found for user")
return ViewStatisticsResponse() return ViewStatisticsResponse()
# Scope=mandate: restrict to specific mandate
if scope == "mandate" and mandateId: if scope == "mandate" and mandateId:
loadMandateIds = [mandateId] loadMandateIds = [mandateId]
# Get all transactions personalUserId = str(ctx.user.id) if scope == "personal" else None
allTransactions = billingInterface.getUserTransactionsForMandates(loadMandateIds, limit=10000)
# Apply RBAC filter (respects admin/user roles)
allTransactions = _filterTransactionsByScope(allTransactions, rbacScope)
# Scope=personal: further filter to only own transactions
if scope == "personal":
userId = str(ctx.user.id)
allTransactions = [
t for t in allTransactions
if (t.get("createdByUserId") or t.get("userId")) == userId
]
logger.info(f"View statistics: {len(allTransactions)} RBAC-filtered transactions for period={period}, year={year}, month={month}")
# Calculate date range
if period == "day": if period == "day":
startDate = date(year, month, 1) startDate = date(year, month, 1)
if month == 12: endDate = date(year + 1, 1, 1) if month == 12 else date(year, month + 1, 1)
endDate = date(year + 1, 1, 1)
else:
endDate = date(year, month + 1, 1)
else: else:
startDate = date(year, 1, 1) startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1) endDate = date(year + 1, 1, 1)
# Filter by date range and only DEBIT transactions startTs = datetime.combine(startDate, datetime.min.time()).timestamp()
debits = [] endTs = datetime.combine(endDate, datetime.min.time()).timestamp()
skippedNoDate = 0
skippedDateRange = 0
skippedNotDebit = 0
for t in allTransactions: agg = billingInterface.getTransactionStatisticsAggregated(
createdAt = t.get("sysCreatedAt") mandateIds=loadMandateIds,
if not createdAt: scope=scope,
skippedNoDate += 1 userId=personalUserId,
continue startTs=startTs,
endTs=endTs,
period=period,
)
# Parse date from various formats (DB stores as DOUBLE PRECISION / Unix timestamp) logger.info(
txDate = None f"View statistics (SQL-aggregated): totalCost={agg['totalCost']}, "
if isinstance(createdAt, (int, float)): f"count={agg['transactionCount']}, period={period}, year={year}, month={month}"
txDate = datetime.fromtimestamp(createdAt).date() )
elif isinstance(createdAt, datetime):
txDate = createdAt.date()
elif isinstance(createdAt, date) and not isinstance(createdAt, datetime):
txDate = createdAt
elif isinstance(createdAt, str):
try:
# Try as float string first (Unix timestamp)
txDate = datetime.fromtimestamp(float(createdAt)).date()
except (ValueError, TypeError):
try:
txDate = datetime.fromisoformat(createdAt.replace("Z", "+00:00")).date()
except (ValueError, TypeError):
skippedNoDate += 1
continue
else:
skippedNoDate += 1
continue
if txDate < startDate or txDate >= endDate: allAccounts = agg.get("_allAccounts", [])
skippedDateRange += 1 accountToMandate: Dict[str, str] = {}
continue for acc in allAccounts:
accountToMandate[acc.get("id", "")] = acc.get("mandateId", "")
# Compare transactionType - handle both string and enum from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
txType = t.get("transactionType") mandateIdsForLookup = list(set(accountToMandate.values()))
txTypeStr = str(txType) if txType is not None else "" mandateMap: Dict[str, str] = {}
if txTypeStr != "DEBIT" and txTypeStr != "TransactionTypeEnum.DEBIT": if mandateIdsForLookup:
# Also check .value for enum objects rootIface = getAppInterface(ctx.user)
txTypeValue = getattr(txType, 'value', txTypeStr) mandatesById = rootIface.getMandatesByIds(mandateIdsForLookup)
if txTypeValue != "DEBIT": for mid, m in mandatesById.items():
skippedNotDebit += 1 mandateMap[mid] = getattr(m, "name", mid) or mid
continue
t["_txDate"] = txDate def _mandateName(accountId: str) -> str:
debits.append(t) mid = accountToMandate.get(accountId, "")
return mandateMap.get(mid, mid or "unknown")
logger.info(f"View statistics: {len(debits)} DEBIT transactions after filter. "
f"Skipped: noDate={skippedNoDate}, dateRange={skippedDateRange}, notDebit={skippedNotDebit}")
# Aggregate totals
totalCost = sum(t.get("amount", 0) for t in debits)
costByProvider: Dict[str, float] = {}
costByModel: Dict[str, float] = {}
costByFeature: Dict[str, float] = {}
costByMandate: Dict[str, float] = {} costByMandate: Dict[str, float] = {}
for accId, total in agg.get("costByAccountId", {}).items():
name = _mandateName(accId)
costByMandate[name] = costByMandate.get(name, 0) + total
for t in debits: costByFeature: Dict[str, float] = {}
provider = t.get("aicoreProvider") or "unknown" for entry in agg.get("costByAccountFeature", []):
costByProvider[provider] = costByProvider.get(provider, 0) + t.get("amount", 0) name = _mandateName(entry["accountId"])
key = f"{name} / {entry['featureCode']}"
model = t.get("aicoreModel") or "unknown" costByFeature[key] = costByFeature.get(key, 0) + entry["total"]
costByModel[model] = costByModel.get(model, 0) + t.get("amount", 0)
mandate = t.get("mandateName") or t.get("mandateId") or "unknown"
featureCode = t.get("featureCode") or "unknown"
featureKey = f"{mandate} / {featureCode}"
costByFeature[featureKey] = costByFeature.get(featureKey, 0) + t.get("amount", 0)
mandate = t.get("mandateName") or t.get("mandateId") or "unknown"
costByMandate[mandate] = costByMandate.get(mandate, 0) + t.get("amount", 0)
# Build time series (raw data only, no display logic)
timeSeries = []
if period == "day":
numDays = (endDate - startDate).days
for day in range(numDays):
d = startDate + timedelta(days=day)
dayCost = sum(t.get("amount", 0) for t in debits if t["_txDate"] == d)
dayCount = sum(1 for t in debits if t["_txDate"] == d)
if dayCost > 0 or dayCount > 0:
timeSeries.append({
"date": d.isoformat(),
"cost": round(dayCost, 4),
"count": dayCount
})
else:
for m in range(1, 13):
mStart = date(year, m, 1)
mEnd = date(year, m + 1, 1) if m < 12 else date(year + 1, 1, 1)
monthCost = sum(t.get("amount", 0) for t in debits if mStart <= t["_txDate"] < mEnd)
monthCount = sum(1 for t in debits if mStart <= t["_txDate"] < mEnd)
timeSeries.append({
"date": f"{year}-{m:02d}",
"cost": round(monthCost, 4),
"count": monthCount
})
return ViewStatisticsResponse( return ViewStatisticsResponse(
totalCost=round(totalCost, 4), totalCost=agg["totalCost"],
transactionCount=len(debits), transactionCount=agg["transactionCount"],
costByProvider=costByProvider, costByProvider=agg.get("costByProvider", {}),
costByModel=costByModel, costByModel=agg.get("costByModel", {}),
costByFeature=costByFeature, costByFeature=costByFeature,
costByMandate=costByMandate, costByMandate=costByMandate,
timeSeries=timeSeries timeSeries=agg.get("timeSeries", []),
) )
except Exception as e: except Exception as e:
@ -1879,24 +1740,26 @@ def getUserViewStatistics(
def getUserViewTransactions( def getUserViewTransactions(
request: Request, request: Request,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
scope: str = Query(default="all", description="Scope: 'personal' (own costs only), 'mandate' (filter by mandateId), 'all' (RBAC-filtered)"),
mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"),
ctx: RequestContext = Depends(getRequestContext) ctx: RequestContext = Depends(getRequestContext)
) -> PaginatedResponse[UserTransactionResponse]: ) -> PaginatedResponse[UserTransactionResponse]:
""" """
Get user-level transactions with pagination support. Get user-level transactions with pagination support.
RBAC filtering: Scope (same contract as /view/statistics):
- SysAdmin: sees all user transactions across all mandates - personal: only the current user's own transactions (ignores admin role)
- Mandate-Admin: sees all user transactions for mandates they administrate - mandate: transactions for a specific mandate (requires mandateId parameter)
- Feature-Instance-Admin: sees transactions for their feature instances - all: RBAC-filtered (SysAdmin sees everything, admin sees mandate, user sees own)
- Regular user: sees only their own transactions
Query Parameters: Query Parameters:
- pagination: JSON-encoded PaginationParams object, or None for no pagination - pagination: JSON-encoded PaginationParams object, or None for no pagination
- scope: 'personal', 'mandate', or 'all'
- mandateId: required when scope='mandate'
""" """
try: try:
billingInterface = getBillingInterface(ctx.user, ctx.mandateId) billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
# Parse pagination params
paginationParams = None paginationParams = None
if pagination: if pagination:
import json import json
@ -1904,52 +1767,34 @@ def getUserViewTransactions(
paginationDict = normalize_pagination_dict(paginationDict) paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict) paginationParams = PaginationParams(**paginationDict)
# Evaluate RBAC scope rbacScope = _getBillingDataScope(ctx.user)
scope = _getBillingDataScope(ctx.user)
# Determine mandate IDs for data loading if rbacScope.isGlobalAdmin:
if scope.isGlobalAdmin: loadMandateIds = None
mandateIds = None # Load all
else: else:
# Load data for all mandates the user belongs to (admin + member) loadMandateIds = rbacScope.adminMandateIds + rbacScope.memberMandateIds
mandateIds = scope.adminMandateIds + scope.memberMandateIds if not loadMandateIds:
if not mandateIds:
return PaginatedResponse(items=[], pagination=None) return PaginatedResponse(items=[], pagination=None)
allTransactions = billingInterface.getUserTransactionsForMandates(mandateIds, limit=10000) if scope == "mandate" and mandateId:
loadMandateIds = [mandateId]
# Apply RBAC filter effectiveScope = scope
allTransactions = _filterTransactionsByScope(allTransactions, scope) personalUserId = str(ctx.user.id) if scope == "personal" else None
logger.debug(f"RBAC-filtered {len(allTransactions)} transactions for user {ctx.user.id}") if not paginationParams:
paginationParams = PaginationParams(page=1, pageSize=50)
# Convert to response objects as dicts for filtering/sorting result = billingInterface.getTransactionsForMandatesPaginated(
transactionDicts = [] mandateIds=loadMandateIds,
for t in allTransactions: pagination=paginationParams,
transactionDicts.append({ scope=effectiveScope,
"id": t.get("id"), userId=personalUserId,
"accountId": t.get("accountId"), )
"transactionType": t.get("transactionType", "DEBIT"),
"amount": t.get("amount", 0.0),
"description": t.get("description", ""),
"referenceType": t.get("referenceType"),
"workflowId": t.get("workflowId"),
"featureCode": t.get("featureCode"),
"featureInstanceId": t.get("featureInstanceId"),
"aicoreProvider": t.get("aicoreProvider"),
"aicoreModel": t.get("aicoreModel"),
"createdByUserId": t.get("createdByUserId"),
"createdAt": t.get("sysCreatedAt"),
"mandateId": t.get("mandateId"),
"mandateName": t.get("mandateName"),
"userId": t.get("userId"),
"userName": t.get("userName"),
})
# Apply filters and sorting logger.debug(f"SQL-paginated {result.totalItems} transactions for user {ctx.user.id} "
filteredDicts = _applyFiltersAndSort(transactionDicts, paginationParams) f"(scope={scope}, mandateId={mandateId}, page={paginationParams.page})")
# Convert to response models
def _toResponse(d): def _toResponse(d):
return UserTransactionResponse( return UserTransactionResponse(
id=d.get("id"), id=d.get("id"),
@ -1964,37 +1809,24 @@ def getUserViewTransactions(
aicoreProvider=d.get("aicoreProvider"), aicoreProvider=d.get("aicoreProvider"),
aicoreModel=d.get("aicoreModel"), aicoreModel=d.get("aicoreModel"),
createdByUserId=d.get("createdByUserId"), createdByUserId=d.get("createdByUserId"),
createdAt=d.get("createdAt"), createdAt=d.get("sysCreatedAt") or d.get("createdAt"),
mandateId=d.get("mandateId"), mandateId=d.get("mandateId"),
mandateName=d.get("mandateName"), mandateName=d.get("mandateName"),
userId=d.get("userId"), userId=d.get("userId"),
userName=d.get("userName") userName=d.get("userName")
) )
if paginationParams:
import math
totalItems = len(filteredDicts)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
paginatedDicts = filteredDicts[startIdx:endIdx]
return PaginatedResponse( return PaginatedResponse(
items=[_toResponse(d) for d in paginatedDicts], items=[_toResponse(d) for d in result.items],
pagination=PaginationMetadata( pagination=PaginationMetadata(
currentPage=paginationParams.page, currentPage=paginationParams.page,
pageSize=paginationParams.pageSize, pageSize=paginationParams.pageSize,
totalItems=totalItems, totalItems=result.totalItems,
totalPages=totalPages, totalPages=result.totalPages,
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters filters=paginationParams.filters,
) )
) )
else:
return PaginatedResponse(
items=[_toResponse(d) for d in filteredDicts],
pagination=None
)
except Exception as e: except Exception as e:
logger.error(f"Error getting user view transactions: {e}") logger.error(f"Error getting user view transactions: {e}")
@ -2007,42 +1839,49 @@ def getUserViewTransactionsFilterValues(
request: Request, request: Request,
column: str = Query(..., description="Column key"), column: str = Query(..., description="Column key"),
pagination: Optional[str] = Query(None, description="JSON-encoded current filters"), pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
scope: str = Query(default="all", description="Scope: 'personal', 'mandate', 'all'"),
mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"),
ctx: RequestContext = Depends(getRequestContext) ctx: RequestContext = Depends(getRequestContext)
): ):
"""Return distinct filter values for a column in user transactions.""" """Return distinct filter values for a column in user transactions (SQL DISTINCT)."""
try: try:
billingInterface = getBillingInterface(ctx.user, ctx.mandateId) billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
scope = _getBillingDataScope(ctx.user) rbacScope = _getBillingDataScope(ctx.user)
if scope.isGlobalAdmin:
mandateIds = None if rbacScope.isGlobalAdmin:
loadMandateIds = None
else: else:
mandateIds = scope.adminMandateIds + scope.memberMandateIds loadMandateIds = rbacScope.adminMandateIds + rbacScope.memberMandateIds
if not mandateIds: if not loadMandateIds:
return [] return []
allTransactions = billingInterface.getUserTransactionsForMandates(mandateIds, limit=10000)
allTransactions = _filterTransactionsByScope(allTransactions, scope) if scope == "mandate" and mandateId:
transactionDicts = [] loadMandateIds = [mandateId]
for t in allTransactions:
transactionDicts.append({ crossFilterParams = None
"id": t.get("id"), if pagination:
"accountId": t.get("accountId"), try:
"transactionType": t.get("transactionType", "DEBIT"), import json
"amount": t.get("amount", 0.0), paginationDict = json.loads(pagination)
"description": t.get("description", ""), if paginationDict:
"referenceType": t.get("referenceType"), paginationDict = normalize_pagination_dict(paginationDict)
"workflowId": t.get("workflowId"), filters = paginationDict.get("filters", {})
"featureCode": t.get("featureCode"), filters.pop(column, None)
"featureInstanceId": t.get("featureInstanceId"), paginationDict["filters"] = filters
"aicoreProvider": t.get("aicoreProvider"), paginationDict.pop("sort", None)
"aicoreModel": t.get("aicoreModel"), crossFilterParams = PaginationParams(**paginationDict)
"createdByUserId": t.get("createdByUserId"), except (json.JSONDecodeError, ValueError):
"createdAt": t.get("sysCreatedAt"), pass
"mandateId": t.get("mandateId"),
"mandateName": t.get("mandateName"), personalUserId = str(ctx.user.id) if scope == "personal" else None
"userId": t.get("userId"),
"userName": t.get("userName"), return billingInterface.getTransactionDistinctValues(
}) mandateIds=loadMandateIds,
return _handleFilterValuesRequest(transactionDicts, column, pagination) column=column,
pagination=crossFilterParams,
scope=scope,
userId=personalUserId,
)
except Exception as e: except Exception as e:
logger.error(f"Error getting filter values for user transactions: {e}") logger.error(f"Error getting filter values for user transactions: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))

View file

@ -423,23 +423,18 @@ def get_users(
detail="No admin access to any mandate" detail="No admin access to any mandate"
) )
# Aggregate users across all admin mandates (deduplicate by user ID) from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel
seenUserIds = set() allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds})
allUsers = [] uniqueUserIds = list({
for mid in adminMandateIds: (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
mandateUsers = rootInterface.getUsersByMandate(mid) for um in (allUM or [])
if isinstance(mandateUsers, list): if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
users = mandateUsers })
elif hasattr(mandateUsers, 'items'): batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {}
users = mandateUsers.items allUsers = [
else: u.model_dump() if hasattr(u, 'model_dump') else vars(u)
users = [] for u in batchUsers.values()
for u in users: ]
uid = u.get("id") if isinstance(u, dict) else getattr(u, "id", None)
if uid and uid not in seenUserIds:
seenUserIds.add(uid)
userData = u if isinstance(u, dict) else u.model_dump() if hasattr(u, 'model_dump') else vars(u)
allUsers.append(userData)
# Apply server-side filtering and sorting # Apply server-side filtering and sorting
filteredUsers = _applyFiltersAndSort(allUsers, paginationParams) filteredUsers = _applyFiltersAndSort(allUsers, paginationParams)
@ -541,17 +536,15 @@ def get_user_filter_values(
break break
if not adminMandateIds: if not adminMandateIds:
return [] return []
seenUserIds = set() from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel
users = [] allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds})
for mid in adminMandateIds: uniqueUserIds = list({
mandateUsers = rootInterface.getUsersByMandate(mid) (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
uList = mandateUsers if isinstance(mandateUsers, list) else (mandateUsers.items if hasattr(mandateUsers, 'items') else []) for um in (allUM or [])
for u in uList: if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
uid = u.get("id") if isinstance(u, dict) else getattr(u, "id", None) })
if uid and uid not in seenUserIds: batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {}
seenUserIds.add(uid) items = [u.model_dump() if hasattr(u, 'model_dump') else vars(u) for u in batchUsers.values()]
users.append(u)
items = [u.model_dump() if hasattr(u, 'model_dump') else u for u in users]
return _handleFilterValuesRequest(items, column, pagination) return _handleFilterValuesRequest(items, column, pagination)
except HTTPException: except HTTPException:
raise raise

711
modules/routes/routeI18n.py Normal file
View file

@ -0,0 +1,711 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Public and authenticated routes for UI language sets (DB-backed i18n).
AI translation pipeline:
- create_language_set background job translates all keys via AiObjects
- update_language_set synchronous AI pass for added keys
- update_all iterates non-de sets
"""
from __future__ import annotations
import asyncio
import json
import logging
import math
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Set
from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, Request, UploadFile, status
from fastapi.responses import Response
from pydantic import BaseModel, Field
from modules.auth import getCurrentUser, requireSysAdminRole
from modules.connectors.connectorDbPostgre import _get_cached_connector
from modules.datamodels.datamodelAi import (
AiCallOptions,
AiCallRequest,
AiCallResponse,
OperationTypeEnum,
PriorityEnum,
)
from modules.datamodels.datamodelUiLanguage import UiLanguageSet
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelNotification import NotificationType
from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface
from modules.routes.routeNotifications import _createNotification
from modules.shared.configuration import APP_CONFIG
from modules.shared.timeUtils import getUtcTimestamp
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/api/i18n",
tags=["i18n"],
responses={404: {"description": "Not found"}},
)
_MIN_AI_BILLING_ESTIMATE_CHF = 0.01
_TRANSLATE_BATCH_SIZE = 80
def _publicMgmtDb():
return _get_cached_connector(
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
dbDatabase="poweron_management",
dbUser=APP_CONFIG.get("DB_USER"),
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
userId="__i18n_public__",
)
def _row_to_public(row: dict) -> dict:
keys = row.get("keys") or {}
return {
"code": row["id"],
"label": row.get("label"),
"status": row.get("status"),
"keys": keys if isinstance(keys, dict) else {},
}
def _load_master_de_keys(db) -> Dict[str, str]:
rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "de"})
if not rows:
return {}
keys = rows[0].get("keys") or {}
return dict(keys) if isinstance(keys, dict) else {}
def _userMemberMandateIds(currentUser: User) -> List[str]:
from modules.interfaces.interfaceDbApp import getRootInterface
root = getRootInterface()
memberships = root.getUserMandates(str(currentUser.id))
out = []
for um in memberships:
mid = getattr(um, "mandateId", None) or (
um.get("mandateId") if isinstance(um, dict) else None
)
if mid:
out.append(str(mid))
return list(dict.fromkeys(out))
def _mandatePassesAiPoolBilling(currentUser: User, mandateId: str, userId: str) -> bool:
from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface
bi = getBillingInterface(currentUser, mandateId)
res = bi.checkBalance(mandateId, userId, _MIN_AI_BILLING_ESTIMATE_CHF)
return bool(res.allowed)
# ---------------------------------------------------------------------------
# AI Translation helpers
# ---------------------------------------------------------------------------
_aiObjectsSingleton = None
async def _getAiObjects():
"""Lazy singleton — same pattern as routeFeatureWorkspace."""
global _aiObjectsSingleton
if _aiObjectsSingleton is None:
from modules.interfaces.interfaceAiObjects import AiObjects
_aiObjectsSingleton = await AiObjects.create()
return _aiObjectsSingleton
def _makeBillingCallback(currentUser: User, mandateId: str):
"""Return a billing callback that records each AI response cost."""
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService
billingService = getBillingService(currentUser, mandateId)
def _cb(response: AiCallResponse) -> None:
if not response or getattr(response, "errorCount", 0) > 0:
return
basePriceCHF = getattr(response, "priceCHF", 0.0)
if not basePriceCHF or basePriceCHF <= 0:
return
provider = getattr(response, "provider", None) or "unknown"
modelName = getattr(response, "modelName", None) or "unknown"
try:
billingService.recordUsage(
priceCHF=basePriceCHF,
aicoreProvider=provider,
aicoreModel=modelName,
description=f"i18n translation ({modelName})",
processingTime=getattr(response, "processingTime", None),
bytesSent=getattr(response, "bytesSent", None),
bytesReceived=getattr(response, "bytesReceived", None),
)
except Exception as e:
logger.error("i18n billing callback failed: %s", e)
return _cb
async def _translateBatch(
keysToTranslate: Dict[str, str],
targetLanguageLabel: str,
targetCode: str,
billingCallback=None,
) -> Dict[str, str]:
"""Translate a batch of German-key → German-value pairs into *targetLanguageLabel*.
Returns dict { germanKey: translatedValue }.
Splits into sub-batches of _TRANSLATE_BATCH_SIZE to stay within token limits.
"""
if not keysToTranslate:
return {}
aiObjects = await _getAiObjects()
allKeys = list(keysToTranslate.items())
totalBatches = math.ceil(len(allKeys) / _TRANSLATE_BATCH_SIZE)
result: Dict[str, str] = {}
for batchIdx in range(totalBatches):
chunk = allKeys[batchIdx * _TRANSLATE_BATCH_SIZE : (batchIdx + 1) * _TRANSLATE_BATCH_SIZE]
payload = {k: v for k, v in chunk}
jsonPayload = json.dumps(payload, ensure_ascii=False)
systemPrompt = (
f"Du bist ein professioneller Übersetzer für Software-UI-Texte. "
f"Übersetze die folgenden deutschen UI-Labels ins {targetLanguageLabel} (ISO {targetCode}). "
f"Behalte Platzhalter wie {{variable}} exakt bei. "
f"Antworte NUR mit einem JSON-Objekt — gleiche Keys, übersetzte Values. Kein Markdown, kein Kommentar."
)
request = AiCallRequest(
prompt=f"Übersetze diese UI-Labels:\n{jsonPayload}",
context=systemPrompt,
options=AiCallOptions(
operationType=OperationTypeEnum.DATA_GENERATE,
priority=PriorityEnum.BALANCED,
compressPrompt=False,
compressContext=False,
resultFormat="json",
temperature=0.2,
),
)
if billingCallback:
aiObjects.billingCallback = billingCallback
try:
response = await aiObjects.callWithTextContext(request)
if response and response.content:
raw = response.content.strip()
if raw.startswith("```"):
raw = re.sub(r"^```[a-z]*\n?", "", raw)
raw = re.sub(r"\n?```$", "", raw)
parsed = json.loads(raw)
if isinstance(parsed, dict):
result.update(parsed)
else:
logger.warning("i18n AI batch %d/%d returned non-dict", batchIdx + 1, totalBatches)
else:
logger.warning("i18n AI batch %d/%d empty response", batchIdx + 1, totalBatches)
except json.JSONDecodeError as je:
logger.error("i18n AI batch %d/%d JSON parse error: %s", batchIdx + 1, totalBatches, je)
except Exception as e:
logger.error("i18n AI batch %d/%d failed: %s", batchIdx + 1, totalBatches, e)
finally:
aiObjects.billingCallback = None
return result
def _resolveMandateIdForAiI18n(request: Request, currentUser: User) -> str:
userId = str(currentUser.id)
memberIds = _userMemberMandateIds(currentUser)
if not memberIds:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Mindestens eine Mandats-Mitgliedschaft ist für die AI-Nutzung erforderlich.",
)
headerRaw = (
request.headers.get("X-Mandate-Id") or request.headers.get("x-mandate-id") or ""
).strip()
if headerRaw:
if headerRaw not in memberIds:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="X-Mandate-Id ist kein Mandat Ihrer Mitgliedschaft.",
)
if _mandatePassesAiPoolBilling(currentUser, headerRaw, userId):
return headerRaw
for mid in memberIds:
if _mandatePassesAiPoolBilling(currentUser, mid, userId):
return mid
raise HTTPException(
status_code=status.HTTP_402_PAYMENT_REQUIRED,
detail="Nicht genügend AI-Guthaben (Mandats-Pool) für diese Aktion.",
)
# ---------------------------------------------------------------------------
# de-Master sync from frontend codebase
# ---------------------------------------------------------------------------
_REPO_ROOT = Path(__file__).resolve().parents[3]
_FRONTEND_SRC = _REPO_ROOT / "frontend_nyla" / "src"
_T_CALL_RE = re.compile(
r"""\bt\(\s*'((?:\\.|[^'])+)'\s*(?:,|\))"""
)
def _scanCodebaseKeys() -> Set[str]:
"""Scan all .tsx/.ts files under frontend_nyla/src for t('...') calls.
Returns the set of German plaintext keys found in the codebase.
"""
keys: Set[str] = set()
if not _FRONTEND_SRC.is_dir():
logger.warning("i18n codebase scan: %s not found", _FRONTEND_SRC)
return keys
for ext in ("*.tsx", "*.ts"):
for filepath in _FRONTEND_SRC.rglob(ext):
try:
content = filepath.read_text(encoding="utf-8", errors="replace")
except OSError:
continue
for m in _T_CALL_RE.finditer(content):
raw = m.group(1)
raw = raw.replace("\\'", "'")
if raw:
keys.add(raw)
return keys
def _syncDeMasterFromCodebase(db, userId: Optional[str]) -> Dict[str, Any]:
"""Synchronise the de master set with t()-keys found in the frontend codebase.
- Keys in codebase but not in DB add (key = value = German plaintext)
- Keys in DB but not in codebase remove (orphaned)
Returns summary dict.
"""
codebaseKeys = _scanCodebaseKeys()
if not codebaseKeys:
logger.warning("i18n de-sync: codebase scan returned 0 keys — aborting")
return {"added": [], "removed": [], "keysCount": 0, "error": "Codebase scan returned 0 keys"}
rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "de"})
if not rows:
raise HTTPException(status_code=503, detail="Deutsch-Master nicht in DB vorhanden.")
row = dict(rows[0])
cur: Dict[str, str] = dict(row.get("keys") or {})
dbKeys = set(cur.keys())
added = sorted(codebaseKeys - dbKeys)
removed = sorted(dbKeys - codebaseKeys)
for k in removed:
del cur[k]
for k in added:
cur[k] = k
if not added and not removed:
return {"added": [], "removed": [], "keysCount": len(cur)}
now = getUtcTimestamp()
row["keys"] = cur
row["sysModifiedAt"] = now
row["sysModifiedBy"] = userId
db.recordModify(UiLanguageSet, "de", row)
logger.info("i18n de-master sync: +%d added, -%d removed, total=%d", len(added), len(removed), len(cur))
return {"added": added, "removed": removed, "keysCount": len(cur)}
# --- Public -----------------------------------------------------------------
@router.get("/codes")
async def list_language_codes():
db = _publicMgmtDb()
rows = db.getRecordset(UiLanguageSet)
out = []
for r in rows:
keys = r.get("keys") or {}
out.append(
{
"code": r["id"],
"label": r.get("label"),
"status": r.get("status"),
"isDefault": bool(r.get("isDefault")),
"keysCount": len(keys) if isinstance(keys, dict) else 0,
}
)
return sorted(out, key=lambda x: (not x.get("isDefault"), x["code"]))
@router.get("/sets/{code}")
async def get_language_set(code: str):
db = _publicMgmtDb()
rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
if not rows:
raise HTTPException(status_code=404, detail="Sprachset nicht gefunden")
return _row_to_public(rows[0])
# --- Auth user --------------------------------------------------------------
class CreateLanguageBody(BaseModel):
code: str = Field(..., min_length=2, max_length=10)
label: str = Field(..., min_length=1, max_length=80)
def _validate_iso2_code(code: str) -> str:
c = code.strip().lower()
if not re.fullmatch(r"[a-z]{2}", c):
raise HTTPException(
status_code=400, detail="Nur ISO-639-1 Zwei-Buchstaben-Codes erlaubt."
)
return c
def _run_create_language_job(userId: str, code: str, label: str, currentUser: User, mandateId: str) -> None:
"""Background job: translate all German master keys via AI, persist, notify user."""
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(_run_create_language_job_async(userId, code, label, currentUser, mandateId))
finally:
loop.close()
async def _run_create_language_job_async(userId: str, code: str, label: str, currentUser: User, mandateId: str) -> None:
try:
db = _publicMgmtDb()
rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
if not rows:
return
deKeys = _load_master_de_keys(db)
if not deKeys:
logger.error("i18n create job: no de master keys found")
return
billingCb = _makeBillingCallback(currentUser, mandateId)
translated = await _translateBatch(deKeys, label, code, billingCallback=billingCb)
finalKeys: Dict[str, str] = {}
for k in deKeys:
finalKeys[k] = translated.get(k, f"[{k}]")
missingCount = sum(1 for k in deKeys if k not in translated)
finalStatus = "complete" if missingCount == 0 else "incomplete"
now = getUtcTimestamp()
merged = dict(rows[0])
merged["keys"] = finalKeys
merged["status"] = finalStatus
merged["label"] = label
merged["sysModifiedAt"] = now
merged["sysModifiedBy"] = userId
db.recordModify(UiLanguageSet, code, merged)
statusHint = "" if finalStatus == "complete" else f" ({missingCount} Keys ohne Übersetzung)"
_createNotification(
userId,
NotificationType.SYSTEM,
title="Sprachset erstellt",
message=f"Die Sprache «{label}» ({code}) wurde per KI übersetzt{statusHint}.",
)
logger.info("i18n create job done: code=%s, translated=%d/%d", code, len(translated), len(deKeys))
except Exception as e:
logger.exception("create language job failed: %s", e)
_createNotification(
userId,
NotificationType.SYSTEM,
title="Sprachset fehlgeschlagen",
message=f"Fehler bei «{code}»: {e}",
)
@router.post("/sets")
async def create_language_set(
request: Request,
body: CreateLanguageBody,
background: BackgroundTasks,
currentUser: User = Depends(getCurrentUser),
):
mandateId = _resolveMandateIdForAiI18n(request, currentUser)
code = _validate_iso2_code(body.code)
if code == "de":
raise HTTPException(status_code=400, detail="Das Standard-Set «de» kann nicht erneut angelegt werden.")
db = _publicMgmtDb()
existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
if existing:
raise HTTPException(status_code=409, detail="Dieses Sprachset existiert bereits.")
deKeys = _load_master_de_keys(db)
if not deKeys:
raise HTTPException(status_code=503, detail="Deutsch-Master nicht geseedet.")
now = getUtcTimestamp()
uid = str(currentUser.id)
rec: dict = {
"id": code,
"label": body.label.strip(),
"keys": {},
"status": "generating",
"isDefault": False,
"sysCreatedAt": now,
"sysCreatedBy": uid,
"sysModifiedAt": now,
"sysModifiedBy": uid,
}
db.recordCreate(UiLanguageSet, rec)
background.add_task(_run_create_language_job, uid, code, body.label.strip(), currentUser, mandateId)
_createNotification(
uid,
NotificationType.SYSTEM,
title="Sprachset wird erzeugt",
message=f"Die Sprache «{code}» wird im Hintergrund per KI übersetzt.",
)
return {"status": "accepted", "code": code}
async def _sync_non_de_set_with_de(db, code: str, userId: Optional[str], adminUser: Optional[User] = None) -> dict:
if code == "de":
raise HTTPException(status_code=400, detail="Das de-Set wird nicht per Update synchronisiert.")
rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
if not rows:
raise HTTPException(status_code=404, detail="Sprachset nicht gefunden")
deKeys = _load_master_de_keys(db)
row = dict(rows[0])
cur: Dict[str, str] = dict(row.get("keys") or {})
masterKeys = set(deKeys.keys())
currentKeys = set(cur.keys())
removed = list(currentKeys - masterKeys)
added = list(masterKeys - currentKeys)
for k in removed:
del cur[k]
translatedCount = 0
if added:
toTranslate = {k: deKeys[k] for k in added}
langLabel = row.get("label") or code
billingCb = None
if adminUser:
memberIds = _userMemberMandateIds(adminUser)
if memberIds:
billingCb = _makeBillingCallback(adminUser, memberIds[0])
try:
translated = await _translateBatch(toTranslate, langLabel, code, billingCallback=billingCb)
for k in added:
cur[k] = translated.get(k, f"[{k}]")
translatedCount = sum(1 for k in added if k in translated)
except Exception as e:
logger.error("AI translation during sync failed for %s: %s", code, e)
for k in added:
cur[k] = f"[{k}]"
now = getUtcTimestamp()
row["keys"] = cur
untranslated = len(added) - translatedCount
row["status"] = "complete" if untranslated == 0 else "incomplete"
row["sysModifiedAt"] = now
row["sysModifiedBy"] = userId
db.recordModify(UiLanguageSet, code, row)
return {"code": code, "added": added, "removed": removed, "translated": translatedCount, "keysCount": len(cur)}
@router.put("/sets/sync-de")
async def sync_de_master_from_codebase(
adminUser: User = Depends(requireSysAdminRole),
):
"""Scan frontend codebase for t() keys and synchronise the de master set.
Adds new keys (key=value=German plaintext), removes orphaned keys.
"""
db = getMgmtInterface(adminUser, mandateId=None).db
return _syncDeMasterFromCodebase(db, str(adminUser.id))
@router.put("/sets/update-all")
async def update_all_language_sets(
adminUser: User = Depends(requireSysAdminRole),
):
"""Sync de-master from codebase, then update all non-de sets via AI."""
db = getMgmtInterface(adminUser, mandateId=None).db
deSync = _syncDeMasterFromCodebase(db, str(adminUser.id))
rows = db.getRecordset(UiLanguageSet)
results = []
for r in rows:
cid = r["id"]
if cid == "de":
continue
res = await _sync_non_de_set_with_de(db, cid, str(adminUser.id), adminUser=adminUser)
results.append(res)
return {"deSync": deSync, "updated": results}
@router.put("/sets/{code}")
async def update_language_set(
code: str,
adminUser: User = Depends(requireSysAdminRole),
):
c = code.strip().lower()
if c in ("update-all", "sync-de"):
raise HTTPException(status_code=400, detail="Ungültiger Sprachcode.")
db = getMgmtInterface(adminUser, mandateId=None).db
deSync = _syncDeMasterFromCodebase(db, str(adminUser.id))
langResult = await _sync_non_de_set_with_de(db, c, str(adminUser.id), adminUser=adminUser)
langResult["deSync"] = deSync
return langResult
@router.delete("/sets/{code}")
async def delete_language_set(
code: str,
adminUser: User = Depends(requireSysAdminRole),
):
c = code.strip().lower()
if c == "de":
raise HTTPException(status_code=400, detail="Das Standard-Set «de» darf nicht gelöscht werden.")
db = getMgmtInterface(adminUser, mandateId=None).db
ok = db.recordDelete(UiLanguageSet, c)
if not ok:
raise HTTPException(status_code=404, detail="Sprachset nicht gefunden")
return {"deleted": c}
@router.get("/sets/{code}/download", dependencies=[Depends(getCurrentUser)])
async def download_language_set(
code: str,
currentUser: User = Depends(getCurrentUser),
):
db = _publicMgmtDb()
rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code.strip().lower()})
if not rows:
raise HTTPException(status_code=404, detail="Sprachset nicht gefunden")
payload = _row_to_public(rows[0])
raw = json.dumps(payload.get("keys", {}), ensure_ascii=False, indent=2)
return Response(
content=raw,
media_type="application/json",
headers={
"Content-Disposition": f'attachment; filename="ui-language-{code}.json"'
},
)
# --- Export / Import (full DB) -----------------------------------------------
@router.get("/export")
async def export_all_language_sets(
adminUser: User = Depends(requireSysAdminRole),
):
"""Export the complete language database as a JSON array (all sets with full metadata)."""
db = getMgmtInterface(adminUser, mandateId=None).db
rows = db.getRecordset(UiLanguageSet)
payload = []
for r in rows:
payload.append({
"id": r["id"],
"label": r.get("label", ""),
"keys": dict(r.get("keys") or {}),
"status": r.get("status", "complete"),
"isDefault": bool(r.get("isDefault", False)),
})
payload.sort(key=lambda x: (not x.get("isDefault"), x["id"]))
raw = json.dumps(payload, ensure_ascii=False, indent=2)
return Response(
content=raw,
media_type="application/json",
headers={
"Content-Disposition": 'attachment; filename="ui-languages-export.json"'
},
)
@router.post("/import")
async def import_language_sets(
file: UploadFile = File(...),
adminUser: User = Depends(requireSysAdminRole),
):
"""Import a previously exported language database JSON.
Behaviour per set in the uploaded array:
- If the set already exists in DB overwrite keys, label, status, isDefault
- If the set does not exist create it
Existing sets NOT present in the upload are left untouched (no deletion).
"""
if not file.filename or not file.filename.endswith(".json"):
raise HTTPException(status_code=400, detail="Nur .json-Dateien erlaubt.")
try:
raw = await file.read()
data = json.loads(raw.decode("utf-8"))
except (json.JSONDecodeError, UnicodeDecodeError) as e:
raise HTTPException(status_code=400, detail=f"Ungültiges JSON: {e}")
if not isinstance(data, list):
raise HTTPException(status_code=400, detail="JSON muss ein Array von Sprachsets sein.")
db = getMgmtInterface(adminUser, mandateId=None).db
now = getUtcTimestamp()
uid = str(adminUser.id)
created = []
updated = []
for entry in data:
if not isinstance(entry, dict):
continue
code = str(entry.get("id", "")).strip().lower()
if not code or len(code) < 2:
continue
keys = entry.get("keys")
if not isinstance(keys, dict):
continue
label = str(entry.get("label", code))
entryStatus = str(entry.get("status", "complete"))
isDefault = bool(entry.get("isDefault", False))
existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
if existing:
row = dict(existing[0])
row["keys"] = keys
row["label"] = label
row["status"] = entryStatus
row["isDefault"] = isDefault
row["sysModifiedAt"] = now
row["sysModifiedBy"] = uid
db.recordModify(UiLanguageSet, code, row)
updated.append(code)
else:
rec = {
"id": code,
"label": label,
"keys": keys,
"status": entryStatus,
"isDefault": isDefault,
"sysCreatedAt": now,
"sysCreatedBy": uid,
"sysModifiedAt": now,
"sysModifiedBy": uid,
}
db.recordCreate(UiLanguageSet, rec)
created.append(code)
logger.info("i18n import: created=%s, updated=%s", created, updated)
return {"created": created, "updated": updated, "totalProcessed": len(created) + len(updated)}

View file

@ -357,6 +357,34 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]:
operativeValues = {s.value for s in OPERATIVE_STATUSES} operativeValues = {s.value for s in OPERATIVE_STATUSES}
operativeMandateIds = list({
sub.get("mandateId") for sub in allSubs
if sub.get("mandateId") and sub.get("status") in operativeValues
})
userCountMap: Dict[str, int] = {}
instanceCountMap: Dict[str, int] = {}
if operativeMandateIds:
try:
from modules.datamodels.datamodelMembership import UserMandate
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.security.rootAccess import getRootDbAppConnector
appDb = getRootDbAppConnector()
allUM = appDb.getRecordset(UserMandate, recordFilter={"mandateId": operativeMandateIds})
for um in (allUM or []):
mid = um.get("mandateId") if isinstance(um, dict) else getattr(um, "mandateId", None)
if mid:
userCountMap[mid] = userCountMap.get(mid, 0) + 1
allFI = appDb.getRecordset(FeatureInstance, recordFilter={"mandateId": operativeMandateIds})
for fi in (allFI or []):
fid = fi if isinstance(fi, dict) else fi.__dict__
if fid.get("enabled"):
mid = fid.get("mandateId")
if mid:
instanceCountMap[mid] = instanceCountMap.get(mid, 0) + 1
except Exception as e:
logger.warning("Batch count for subscriptions failed: %s", e)
enriched = [] enriched = []
for sub in allSubs: for sub in allSubs:
mid = sub.get("mandateId", "") mid = sub.get("mandateId", "")
@ -369,12 +397,8 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]:
if sub.get("status") in operativeValues: if sub.get("status") in operativeValues:
userPrice = sub.get("snapshotPricePerUserCHF", 0) or 0 userPrice = sub.get("snapshotPricePerUserCHF", 0) or 0
instPrice = sub.get("snapshotPricePerInstanceCHF", 0) or 0 instPrice = sub.get("snapshotPricePerInstanceCHF", 0) or 0
try: userCount = userCountMap.get(mid, 0)
userCount = subInterface.countActiveUsers(mid) instanceCount = instanceCountMap.get(mid, 0)
instanceCount = subInterface.countActiveFeatureInstances(mid)
except Exception:
userCount = 0
instanceCount = 0
sub["monthlyRevenueCHF"] = round(userPrice * userCount + instPrice * instanceCount, 2) sub["monthlyRevenueCHF"] = round(userPrice * userCount + instPrice * instanceCount, 2)
sub["activeUsers"] = userCount sub["activeUsers"] = userCount
sub["activeInstances"] = instanceCount sub["activeInstances"] = instanceCount
@ -492,13 +516,13 @@ def _getDataVolumeUsage(
mgmtDb = getMgmtInterface().db mgmtDb = getMgmtInterface().db
totalFileBytes = 0 totalFileBytes = 0
for instId in instIds: if instIds:
files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instIds})
for f in files: for f in (files or []):
size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0)
totalFileBytes += (size or 0) totalFileBytes += (size or 0)
mandateFiles = mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId}) mandateFiles = mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId})
for f in mandateFiles: for f in (mandateFiles or []):
size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0)
totalFileBytes += (size or 0) totalFileBytes += (size or 0)
filesMB = round(totalFileBytes / (1024 * 1024), 2) filesMB = round(totalFileBytes / (1024 * 1024), 2)

View file

@ -187,8 +187,6 @@ def _buildDynamicBlock(
# Convert Pydantic model to dict if needed # Convert Pydantic model to dict if needed
if hasattr(featureLabel, 'model_dump'): if hasattr(featureLabel, 'model_dump'):
featureLabel = featureLabel.model_dump() featureLabel = featureLabel.model_dump()
elif hasattr(featureLabel, 'dict'):
featureLabel = featureLabel.dict()
elif not isinstance(featureLabel, dict): elif not isinstance(featureLabel, dict):
# Fallback: try to access as attributes # Fallback: try to access as attributes
featureLabel = {"de": getattr(featureLabel, 'de', instance.featureCode), "en": getattr(featureLabel, 'en', instance.featureCode)} featureLabel = {"de": getattr(featureLabel, 'de', instance.featureCode), "en": getattr(featureLabel, 'en', instance.featureCode)}
@ -392,8 +390,10 @@ def _buildStaticBlocks(
if section.get("adminOnly") and not isSysAdmin: if section.get("adminOnly") and not isSysAdmin:
continue continue
# Handle sections with subgroups hasSubgroups = "subgroups" in section
if "subgroups" in section: hasItems = "items" in section and len(section["items"]) > 0
if hasSubgroups:
filteredSubgroups = [] filteredSubgroups = []
for subgroup in section["subgroups"]: for subgroup in section["subgroups"]:
subItems = _filterItems( subItems = _filterItems(
@ -409,17 +409,22 @@ def _buildStaticBlocks(
filteredSubgroups.sort(key=lambda s: s["order"]) filteredSubgroups.sort(key=lambda s: s["order"])
if filteredSubgroups: topLevelItems = []
if hasItems:
topLevelItems = _filterItems(
section["items"], language, isSysAdmin, roleIds, hasGlobalPermission
)
if filteredSubgroups or topLevelItems:
blocks.append({ blocks.append({
"type": "static", "type": "static",
"id": section["id"], "id": section["id"],
"title": section["title"].get(language, section["title"].get("en", section["id"])), "title": section["title"].get(language, section["title"].get("en", section["id"])),
"order": section.get("order", 50), "order": section.get("order", 50),
"items": [], "items": topLevelItems,
"subgroups": filteredSubgroups, "subgroups": filteredSubgroups,
}) })
else: else:
# Standard flat section
filteredItems = _filterItems( filteredItems = _filterItems(
section.get("items", []), language, isSysAdmin, roleIds, hasGlobalPermission section.get("items", []), language, isSysAdmin, roleIds, hasGlobalPermission
) )

View file

@ -0,0 +1,267 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
System-level Workflow Runs Dashboard API.
Provides cross-feature, cross-mandate access to workflow runs
with RBAC scoping: user sees own runs, mandate admin sees mandate runs,
sysadmin sees all runs.
"""
import logging
import math
from typing import Optional
from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException
from slowapi import Limiter
from slowapi.util import get_remote_address
from modules.auth.authentication import getRequestContext, RequestContext
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.configuration import APP_CONFIG
from modules.datamodels.datamodelPagination import PaginationParams
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
AutoRun, AutoStepLog, AutoWorkflow, AutoTask,
)
logger = logging.getLogger(__name__)
limiter = Limiter(key_func=get_remote_address)
router = APIRouter(prefix="/api/system/workflow-runs", tags=["WorkflowDashboard"])
_GREENFIELD_DB = "poweron_graphicaleditor"
def _getDb() -> DatabaseConnector:
return DatabaseConnector(
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
dbDatabase=_GREENFIELD_DB,
dbUser=APP_CONFIG.get("DB_USER"),
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
userId=None,
)
def _getUserMandateIds(userId: str) -> list[str]:
"""Get mandate IDs the user is a member of."""
rootIface = getRootInterface()
memberships = rootIface.getUserMandates(userId)
return [um.mandateId for um in memberships if um.mandateId and um.enabled]
def _getAdminMandateIds(userId: str, mandateIds: list) -> list:
"""Batch-check which mandates the user is admin for (2 SQL queries total)."""
if not mandateIds:
return []
rootIface = getRootInterface()
from modules.datamodels.datamodelMembership import UserMandateRole
allRoles = rootIface.db.getRecordset(UserMandateRole, recordFilter={
"userId": userId, "mandateId": mandateIds,
})
if not allRoles:
return []
roleIds = set()
roleToMandate: dict = {}
for r in allRoles:
row = r if isinstance(r, dict) else r.__dict__
rid = row.get("roleId")
mid = row.get("mandateId")
if rid:
roleIds.add(rid)
roleToMandate.setdefault(rid, set()).add(mid)
if not roleIds:
return []
from modules.datamodels.datamodelRbac import MandateRole
roleRecords = rootIface.db.getRecordset(MandateRole, recordFilter={"id": list(roleIds)})
adminMandates: set = set()
for role in (roleRecords or []):
row = role if isinstance(role, dict) else role.__dict__
if row.get("isAdmin"):
rid = row.get("id")
if rid and rid in roleToMandate:
adminMandates.update(roleToMandate[rid])
return [mid for mid in mandateIds if mid in adminMandates]
def _scopedRunFilter(context: RequestContext) -> Optional[dict]:
"""
Build a DB filter dict based on RBAC:
- sysadmin: None (no filter)
- mandate admin: mandateId IN user's mandates
- normal user: ownerId = userId
"""
if context.hasSysAdminRole:
return None
userId = str(context.user.id) if context.user else None
if not userId:
return {"ownerId": "__impossible__"}
mandateIds = _getUserMandateIds(userId)
adminMandateIds = _getAdminMandateIds(userId, mandateIds)
if adminMandateIds:
return {"mandateId": adminMandateIds}
return {"ownerId": userId}
@router.get("")
@limiter.limit("60/minute")
def get_workflow_runs(
request: Request,
limit: int = Query(50, ge=1, le=200),
offset: int = Query(0, ge=0),
status: Optional[str] = Query(None, description="Filter by status"),
mandateId: Optional[str] = Query(None, description="Filter by mandate"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""List workflow runs with RBAC scoping (SQL-paginated)."""
db = _getDb()
if not db._ensureTableExists(AutoRun):
return {"runs": [], "total": 0, "limit": limit, "offset": offset}
baseFilter = _scopedRunFilter(context)
recordFilter = dict(baseFilter) if baseFilter else {}
if status:
recordFilter["status"] = status
if mandateId:
recordFilter["mandateId"] = mandateId
page = (offset // limit) + 1 if limit > 0 else 1
pagination = PaginationParams(
page=page,
pageSize=limit,
sort=[{"field": "sysCreatedAt", "direction": "desc"}],
)
result = db.getRecordsetPaginated(
AutoRun,
pagination=pagination,
recordFilter=recordFilter if recordFilter else None,
)
pageRuns = result.get("items", []) if isinstance(result, dict) else result.items
total = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
wfIds = list({r.get("workflowId") for r in pageRuns if r.get("workflowId")})
wfLabelMap = {}
if wfIds and db._ensureTableExists(AutoWorkflow):
wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds})
for wf in (wfs or []):
wfLabelMap[wf.get("id")] = wf.get("label") or wf.get("id")
runs = []
for r in pageRuns:
row = dict(r)
row["workflowLabel"] = wfLabelMap.get(row.get("workflowId"), row.get("workflowId") or "")
runs.append(row)
return {"runs": runs, "total": total, "limit": limit, "offset": offset}
@router.get("/metrics")
@limiter.limit("60/minute")
def get_workflow_metrics(
request: Request,
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Aggregated metrics across all accessible workflow runs (SQL COUNT)."""
db = _getDb()
if not db._ensureTableExists(AutoRun):
return {"totalRuns": 0, "runsByStatus": {}, "totalTokens": 0, "totalCredits": 0}
baseFilter = _scopedRunFilter(context)
countPagination = PaginationParams(page=1, pageSize=1)
countResult = db.getRecordsetPaginated(AutoRun, pagination=countPagination, recordFilter=baseFilter)
totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems
statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=baseFilter)
runsByStatus = {}
for sv in statusValues:
statusFilter = dict(baseFilter) if baseFilter else {}
statusFilter["status"] = sv
sr = db.getRecordsetPaginated(AutoRun, pagination=PaginationParams(page=1, pageSize=1), recordFilter=statusFilter)
runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems
totalTokens = 0
totalCredits = 0.0
if totalRuns > 0 and totalRuns <= 10000:
allRuns = db.getRecordset(AutoRun, recordFilter=baseFilter, fieldFilter=["costTokens", "costCredits"]) or []
for r in allRuns:
totalTokens += r.get("costTokens", 0) or 0
totalCredits += r.get("costCredits", 0.0) or 0.0
workflowCount = 0
activeWorkflows = 0
if db._ensureTableExists(AutoWorkflow):
wfFilter: dict = {"isTemplate": False}
if not context.hasSysAdminRole:
userId = str(context.user.id) if context.user else None
mandateIds = _getUserMandateIds(userId) if userId else []
if mandateIds:
wfFilter["mandateId"] = mandateIds
else:
wfFilter["mandateId"] = "__impossible__"
wfCount = db.getRecordsetPaginated(AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1), recordFilter=wfFilter)
workflowCount = wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems
activeFilter = dict(wfFilter)
activeFilter["active"] = True
activeCount = db.getRecordsetPaginated(AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1), recordFilter=activeFilter)
activeWorkflows = activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems
return {
"totalRuns": totalRuns,
"runsByStatus": runsByStatus,
"totalTokens": totalTokens,
"totalCredits": round(totalCredits, 4),
"workflowCount": workflowCount,
"activeWorkflows": activeWorkflows,
}
@router.get("/{runId}/steps")
@limiter.limit("60/minute")
def get_run_steps(
request: Request,
runId: str = Path(..., description="Run ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Get step logs for a specific run (with access check)."""
db = _getDb()
if not db._ensureTableExists(AutoRun):
raise HTTPException(status_code=404, detail="Run not found")
runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
if not runs:
raise HTTPException(status_code=404, detail="Run not found")
run = dict(runs[0])
if not context.hasSysAdminRole:
userId = str(context.user.id) if context.user else None
runOwner = run.get("ownerId")
runMandate = run.get("mandateId")
if runOwner == userId:
pass
elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
pass
else:
raise HTTPException(status_code=403, detail="Access denied")
if not db._ensureTableExists(AutoStepLog):
return {"steps": []}
records = db.getRecordset(AutoStepLog, recordFilter={"runId": runId})
steps = [dict(r) for r in records] if records else []
steps.sort(key=lambda s: s.get("startedAt") or 0)
return {"steps": steps}

View file

@ -304,11 +304,40 @@ def _registerDocumentTools(registry: ToolRegistry, services):
imageData = fileContent.get("data", "") imageData = fileContent.get("data", "")
mimeType = fileMimeType mimeType = fileMimeType
# 4) PDF page rendering: render the requested page as an image via PyMuPDF
if not imageData: if not imageData:
chatService = services.chat chatService = services.chat
fileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None fileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None
fileName = fileInfo.get("fileName", fileId) if fileInfo else fileId fileMime = (fileInfo.get("mimeType", "") if fileInfo else "").lower()
fileMime = fileInfo.get("mimeType", "unknown") if fileInfo else "unknown" if fileMime == "application/pdf" or (fileInfo and (fileInfo.get("fileName", "") or "").lower().endswith(".pdf")):
try:
import fitz as _fitz
rawContent = chatService.getFileContent(fileId) if not fileContent else fileContent
rawData = rawContent.get("data", "") if rawContent else ""
if isinstance(rawData, str) and len(rawData) > 100:
pdfBytes = _b64.b64decode(rawData)
elif isinstance(rawData, bytes):
pdfBytes = rawData
else:
pdfBytes = None
if pdfBytes:
doc = _fitz.open(stream=pdfBytes, filetype="pdf")
targetPage = pageIndex if pageIndex is not None else 0
if 0 <= targetPage < len(doc):
page = doc[targetPage]
pix = page.get_pixmap(dpi=200)
imageData = _b64.b64encode(pix.tobytes("png")).decode("ascii")
mimeType = "image/png"
logger.info("describeImage: rendered PDF page %d as image (%dx%d)", targetPage, pix.width, pix.height)
doc.close()
except Exception as pdfErr:
logger.warning("describeImage: PDF page rendering failed: %s", pdfErr)
if not imageData:
chatService = services.chat
_errFileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None
fileName = _errFileInfo.get("fileName", fileId) if _errFileInfo else fileId
fileMime = _errFileInfo.get("mimeType", "unknown") if _errFileInfo else "unknown"
return ToolResult(toolCallId="", toolName="describeImage", success=False, return ToolResult(toolCallId="", toolName="describeImage", success=False,
error=f"No image data found in '{fileName}' (type: {fileMime}). " error=f"No image data found in '{fileName}' (type: {fileMime}). "
f"This file likely contains text, not images. Use readFile(fileId=\"{fileId}\") to access its text content.") f"This file likely contains text, not images. Use readFile(fileId=\"{fileId}\") to access its text content.")

View file

@ -49,6 +49,19 @@ def _getOrCreateFeatureDbConnector(featureDbName: str, userId: str):
return conn return conn
def clearFeatureQueryCache(featureInstanceId: Optional[str] = None) -> int:
"""Clear the feature data query cache. If featureInstanceId given, only for that instance."""
if featureInstanceId:
prefix = f"{featureInstanceId}:"
keys = [k for k in _featureQueryCache if k.startswith(prefix)]
else:
keys = list(_featureQueryCache.keys())
for k in keys:
del _featureQueryCache[k]
logger.info(f"Feature query cache cleared: {len(keys)} entries removed (instance={featureInstanceId or 'all'})")
return len(keys)
def _registerFeatureSubAgentTools(registry: ToolRegistry, services): def _registerFeatureSubAgentTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools.""" """Auto-extracted from registerCoreTools."""
# ---- Feature Data Sub-Agent tool ---- # ---- Feature Data Sub-Agent tool ----

View file

@ -283,18 +283,8 @@ def _buildSchemaContext(
selectedTables: List[Dict[str, Any]], selectedTables: List[Dict[str, Any]],
) -> str: ) -> str:
"""Build a system-level context block describing available tables.""" """Build a system-level context block describing available tables."""
parts = [ tableNames = []
f"You are a data query assistant for the '{featureCode}' feature", tableBlocks = []
]
if instanceLabel:
parts[0] += f' (instance: "{instanceLabel}")'
parts[0] += "."
parts.append(
"You have access to the following data tables. "
"Use browseTable to list rows, queryTable to filter/search, "
"and aggregateTable for SUM/COUNT/AVG/MIN/MAX with optional GROUP BY."
)
parts.append("")
for obj in selectedTables: for obj in selectedTables:
meta = obj.get("meta", {}) meta = obj.get("meta", {})
@ -302,11 +292,38 @@ def _buildSchemaContext(
fields = meta.get("fields", []) fields = meta.get("fields", [])
label = obj.get("label", {}) label = obj.get("label", {})
labelStr = label.get("en") or label.get("de") or tbl labelStr = label.get("en") or label.get("de") or tbl
parts.append(f"Table: {tbl} ({labelStr})") tableNames.append(tbl)
block = f" Table: {tbl} ({labelStr})"
if fields: if fields:
parts.append(f" Fields: {', '.join(fields)}") block += f"\n Fields: {', '.join(fields)}"
parts.append("") tableBlocks.append(block)
parts = [
f"You are a data query assistant for the '{featureCode}' feature",
]
if instanceLabel:
parts[0] += f' (instance: "{instanceLabel}")'
parts[0] += "."
parts.append("")
parts.append("AVAILABLE TABLES (use EXACTLY these names as tableName parameter):")
parts.extend(tableBlocks)
parts.append("")
parts.append(
"IMPORTANT RULES:\n"
f"- The ONLY valid tableName values are: {tableNames}\n"
"- Do NOT invent table names, do NOT use UUIDs or IDs as table names.\n"
"- Field names are plain column names (e.g. 'accountNumber', 'periodYear').\n"
" Do NOT prefix field names with UUIDs, table names, or dots.\n"
"- If unsure about column names, call browseTable with only tableName (no fields)\n"
" to see actual columns first."
)
parts.append("")
parts.append(
"Tools: browseTable (list rows), queryTable (filter/search), "
"aggregateTable (SUM/COUNT/AVG/MIN/MAX with optional GROUP BY)."
)
parts.append("")
parts.append( parts.append(
"Answer the user's question using the data from these tables. " "Answer the user's question using the data from these tables. "
"Be precise, cite row counts, and format data clearly." "Be precise, cite row counts, and format data clearly."

View file

@ -78,6 +78,15 @@ class FeatureDataProvider:
""" """
_validateTableName(tableName) _validateTableName(tableName)
conn = self._db.connection conn = self._db.connection
if fields:
invalid = [f for f in fields if not _isValidIdentifier(f)]
if invalid:
return {
"rows": [], "total": 0, "limit": limit, "offset": offset,
"error": f"Invalid field name(s): {', '.join(invalid)}. Use getActualColumns to discover valid column names.",
}
scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn) scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn)
extraWhere, extraParams = _buildFilterClauses(extraFilters) extraWhere, extraParams = _buildFilterClauses(extraFilters)
@ -105,6 +114,10 @@ class FeatureDataProvider:
return {"rows": rows, "total": total, "limit": limit, "offset": offset} return {"rows": rows, "total": total, "limit": limit, "offset": offset}
except Exception as e: except Exception as e:
logger.error(f"browseTable({tableName}) failed: {e}") logger.error(f"browseTable({tableName}) failed: {e}")
try:
conn.rollback()
except Exception:
pass
return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)} return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
def aggregateTable( def aggregateTable(
@ -164,6 +177,10 @@ class FeatureDataProvider:
} }
except Exception as e: except Exception as e:
logger.error(f"aggregateTable({tableName}, {aggregate}({field})) failed: {e}") logger.error(f"aggregateTable({tableName}, {aggregate}({field})) failed: {e}")
try:
conn.rollback()
except Exception:
pass
return {"rows": [], "error": str(e), "aggregate": aggregate, "field": field, "groupBy": groupBy} return {"rows": [], "error": str(e), "aggregate": aggregate, "field": field, "groupBy": groupBy}
def queryTable( def queryTable(
@ -185,6 +202,15 @@ class FeatureDataProvider:
""" """
_validateTableName(tableName) _validateTableName(tableName)
conn = self._db.connection conn = self._db.connection
if fields:
invalid = [f for f in fields if not _isValidIdentifier(f)]
if invalid:
return {
"rows": [], "total": 0, "limit": limit, "offset": offset,
"error": f"Invalid field name(s): {', '.join(invalid)}. Use getActualColumns to discover valid column names.",
}
scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn) scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn)
combinedFilters = list(filters or []) + list(extraFilters or []) combinedFilters = list(filters or []) + list(extraFilters or [])
@ -214,6 +240,10 @@ class FeatureDataProvider:
return {"rows": rows, "total": total, "limit": limit, "offset": offset} return {"rows": rows, "total": total, "limit": limit, "offset": offset}
except Exception as e: except Exception as e:
logger.error(f"queryTable({tableName}) failed: {e}") logger.error(f"queryTable({tableName}) failed: {e}")
try:
conn.rollback()
except Exception:
pass
return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)} return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}

View file

@ -312,9 +312,13 @@ class AgentService:
if tb.id == "workflow": if tb.id == "workflow":
try: try:
from modules.serviceCenter.services.serviceAgent.workflowTools import getWorkflowToolDefinitions from modules.serviceCenter.services.serviceAgent.workflowTools import getWorkflowToolDefinitions
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolDefinition
wfDefs = getWorkflowToolDefinitions() wfDefs = getWorkflowToolDefinitions()
for toolDef in wfDefs: for rawDef in wfDefs:
registry.registerFromDefinition(toolDef, toolDef._handler if hasattr(toolDef, "_handler") else None) handler = rawDef.get("handler")
defFields = {k: v for k, v in rawDef.items() if k != "handler"}
toolDef = ToolDefinition(**defFields)
registry.registerFromDefinition(toolDef, handler)
logger.info("Registered %d workflow tools from toolbox", len(wfDefs)) logger.info("Registered %d workflow tools from toolbox", len(wfDefs))
except Exception as e: except Exception as e:
logger.warning("Could not register workflow tools: %s", e) logger.warning("Could not register workflow tools: %s", e)

View file

@ -430,7 +430,7 @@ class ContentExtractor:
# Debug-Log (harmonisiert) # Debug-Log (harmonisiert)
self.services.utils.writeDebugFile( self.services.utils.writeDebugFile(
json.dumps([part.dict() for part in allContentParts], indent=2, default=str), json.dumps([part.model_dump() for part in allContentParts], indent=2, default=str),
"content_extraction_result" "content_extraction_result"
) )

View file

@ -105,7 +105,7 @@ class DocumentIntentAnalyzer:
# Debug-Log (harmonisiert) # Debug-Log (harmonisiert)
self.services.utils.writeDebugFile( self.services.utils.writeDebugFile(
json.dumps([intent.dict() for intent in documentIntents], indent=2), json.dumps([intent.model_dump() for intent in documentIntents], indent=2),
"document_intent_analysis_result" "document_intent_analysis_result"
) )

View file

@ -99,11 +99,7 @@ class GenerationService:
if mime_type == "application/json": if mime_type == "application/json":
# Erstelle ActionDocument-Format mit validationMetadata und documentData # Erstelle ActionDocument-Format mit validationMetadata und documentData
if hasattr(document_data, 'model_dump'): if hasattr(document_data, 'model_dump'):
# Pydantic v2
document_data_dict = document_data.model_dump() document_data_dict = document_data.model_dump()
elif hasattr(document_data, 'dict'):
# Pydantic v1
document_data_dict = document_data.dict()
elif isinstance(document_data, dict): elif isinstance(document_data, dict):
document_data_dict = document_data document_data_dict = document_data
elif isinstance(document_data, str): elif isinstance(document_data, str):

View file

@ -130,8 +130,8 @@ class StructureGenerator:
# Convert ContentParts to dict format for JSON serialization # Convert ContentParts to dict format for JSON serialization
contentPartsList = [] contentPartsList = []
for part in contentParts: for part in contentParts:
if hasattr(part, 'dict'): if hasattr(part, 'model_dump'):
partDict = part.dict() partDict = part.model_dump()
elif isinstance(part, dict): elif isinstance(part, dict):
partDict = part partDict = part
else: else:

View file

@ -57,18 +57,47 @@ class FrontendType(str, Enum):
SHAREPOINT_FOLDER = "sharepointFolder" SHAREPOINT_FOLDER = "sharepointFolder"
"""SharePoint folder selector - requires connectionReference parameter in same action to load folders""" """SharePoint folder selector - requires connectionReference parameter in same action to load folders"""
# Additional custom types can be added here as needed SHAREPOINT_FILE = "sharepointFile"
# Examples: """SharePoint file selector - requires connectionReference parameter"""
# OUTLOOK_FOLDER = "outlookFolder"
# JIRA_PROJECT = "jiraProject" CLICKUP_LIST = "clickupList"
"""ClickUp list selector - requires connectionReference parameter"""
CLICKUP_TASK = "clickupTask"
"""ClickUp task selector - requires connectionReference parameter"""
# Complex Structure Types (for graph editor node configs)
CASE_LIST = "caseList"
"""Case list editor for flow.switch cases"""
FIELD_BUILDER = "fieldBuilder"
"""Field builder for input.form field definitions"""
KEY_VALUE_ROWS = "keyValueRows"
"""Key-value row editor for task update entries"""
CRON = "cron"
"""Cron expression builder"""
CONDITION = "condition"
"""Structured condition builder for flow.ifElse"""
MAPPING_TABLE = "mappingTable"
"""Mapping table editor for data.transform"""
FILTER_EXPRESSION = "filterExpression"
"""Filter expression builder for data.filter"""
# Mapping of custom types to their API endpoint for dynamic options # Mapping of custom types to their API endpoint for dynamic options
CUSTOM_TYPE_OPTIONS_API: Dict[FrontendType, str] = { CUSTOM_TYPE_OPTIONS_API: Dict[FrontendType, str] = {
FrontendType.USER_CONNECTION: "user.connection", FrontendType.USER_CONNECTION: "user.connection",
FrontendType.DOCUMENT_REFERENCE: "workflow.documentReference", # To be implemented FrontendType.DOCUMENT_REFERENCE: "workflow.documentReference",
FrontendType.WORKFLOW_ACTION: "workflow.action", # To be implemented FrontendType.WORKFLOW_ACTION: "workflow.action",
FrontendType.SHAREPOINT_FOLDER: "sharepoint.folder", # Dynamic - requires connectionReference FrontendType.SHAREPOINT_FOLDER: "sharepoint.folder",
FrontendType.SHAREPOINT_FILE: "sharepoint.file",
FrontendType.CLICKUP_LIST: "clickup.list",
FrontendType.CLICKUP_TASK: "clickup.task",
} }
# Mapping of custom types to their description # Mapping of custom types to their description
@ -93,6 +122,21 @@ CUSTOM_TYPE_DESCRIPTIONS: Dict[FrontendType, Dict[str, str]] = {
"fr": "Dossier SharePoint", "fr": "Dossier SharePoint",
"de": "SharePoint-Ordner" "de": "SharePoint-Ordner"
}, },
FrontendType.SHAREPOINT_FILE: {
"en": "SharePoint File",
"fr": "Fichier SharePoint",
"de": "SharePoint-Datei"
},
FrontendType.CLICKUP_LIST: {
"en": "ClickUp List",
"fr": "Liste ClickUp",
"de": "ClickUp-Liste"
},
FrontendType.CLICKUP_TASK: {
"en": "ClickUp Task",
"fr": "Tâche ClickUp",
"de": "ClickUp-Aufgabe"
},
} }

View file

@ -35,9 +35,10 @@ FEATURE_ICON = "mdi-cog"
# icon: Wird intern gehalten aber NICHT in der API Response zurückgegeben # icon: Wird intern gehalten aber NICHT in der API Response zurückgegeben
NAVIGATION_SECTIONS = [ NAVIGATION_SECTIONS = [
# ─── Meine Sicht (with top-level item + subgroups) ───
{ {
"id": "system", "id": "system",
"title": {"en": "SYSTEM", "de": "SYSTEM", "fr": "SYSTÈME"}, "title": {"en": "MY VIEW", "de": "MEINE SICHT", "fr": "MA VUE"},
"order": 10, "order": 10,
"items": [ "items": [
{ {
@ -49,37 +50,20 @@ NAVIGATION_SECTIONS = [
"order": 10, "order": 10,
"public": True, "public": True,
}, },
{
"id": "store",
"objectKey": "ui.system.store",
"label": {"en": "Store", "de": "Store", "fr": "Store"},
"icon": "FaStore",
"path": "/store",
"order": 15,
"public": True,
},
{
"id": "settings",
"objectKey": "ui.system.settings",
"label": {"en": "Settings", "de": "Einstellungen", "fr": "Paramètres"},
"icon": "FaCog",
"path": "/settings",
"order": 20,
"public": True,
},
], ],
}, "subgroups": [
# ── Basisdaten ──
{ {
"id": "basedata", "id": "system-basedata",
"title": {"en": "BASE DATA", "de": "BASISDATEN", "fr": "DONNÉES DE BASE"}, "title": {"en": "Base Data", "de": "Basisdaten", "fr": "Données de base"},
"order": 30, "order": 20,
"items": [ "items": [
{ {
"id": "prompts", "id": "connections",
"objectKey": "ui.system.prompts", "objectKey": "ui.system.connections",
"label": {"en": "Prompts", "de": "Prompts", "fr": "Prompts"}, "label": {"en": "Connections", "de": "Verbindungen", "fr": "Connexions"},
"icon": "FaLightbulb", "icon": "FaLink",
"path": "/basedata/prompts", "path": "/basedata/connections",
"order": 10, "order": 10,
}, },
{ {
@ -91,33 +75,68 @@ NAVIGATION_SECTIONS = [
"order": 20, "order": 20,
}, },
{ {
"id": "connections", "id": "prompts",
"objectKey": "ui.system.connections", "objectKey": "ui.system.prompts",
"label": {"en": "Connections", "de": "Verbindungen", "fr": "Connexions"}, "label": {"en": "Prompts", "de": "Prompts", "fr": "Prompts"},
"icon": "FaLink", "icon": "FaLightbulb",
"path": "/basedata/connections", "path": "/basedata/prompts",
"order": 30, "order": 30,
}, },
], ],
}, },
# ── Nutzung ──
{ {
"id": "billing", "id": "system-usage",
"title": {"en": "BILLING", "de": "BILLING", "fr": "FACTURATION"}, "title": {"en": "Usage", "de": "Nutzung", "fr": "Utilisation"},
"order": 35, "order": 30,
"items": [ "items": [
{ {
"id": "billing-transactions", "id": "billing-admin",
"objectKey": "ui.billing.transactions", "objectKey": "ui.system.billingAdmin",
"label": {"en": "Billing", "de": "Billing", "fr": "Facturation"}, "label": {"en": "Billing", "de": "Abrechnung", "fr": "Facturation"},
"icon": "FaWallet", "icon": "FaMoneyBillAlt",
"path": "/billing/transactions", "path": "/billing/admin",
"order": 10, "order": 10,
}, },
{
"id": "statistics",
"objectKey": "ui.system.statistics",
"label": {"en": "Statistics", "de": "Statistiken", "fr": "Statistiques"},
"icon": "FaChartBar",
"path": "/billing/transactions",
"order": 20,
},
{
"id": "automations",
"objectKey": "ui.system.automations",
"label": {"en": "Automations", "de": "Automations", "fr": "Automations"},
"icon": "FaRobot",
"path": "/automations",
"order": 30,
},
{
"id": "store",
"objectKey": "ui.system.store",
"label": {"en": "Store", "de": "Store", "fr": "Store"},
"icon": "FaStore",
"path": "/store",
"order": 40,
"public": True,
},
{
"id": "settings",
"objectKey": "ui.system.settings",
"label": {"en": "Settings", "de": "Einstellungen", "fr": "Paramètres"},
"icon": "FaCog",
"path": "/settings",
"order": 50,
"public": True,
},
],
},
], ],
}, },
# ─── Administration (with subgroups) ─── # ─── Administration (with subgroups) ───
# Access control is at item level, NOT section level.
# Groups auto-hide if 0 visible pages for the user.
{ {
"id": "admin", "id": "admin",
"title": {"en": "ADMINISTRATION", "de": "ADMINISTRATION", "fr": "ADMINISTRATION"}, "title": {"en": "ADMINISTRATION", "de": "ADMINISTRATION", "fr": "ADMINISTRATION"},
@ -182,22 +201,13 @@ NAVIGATION_SECTIONS = [
"order": 30, "order": 30,
"adminOnly": True, "adminOnly": True,
}, },
{
"id": "admin-billing",
"objectKey": "ui.admin.billing",
"label": {"en": "Billing Administration", "de": "Billing-Verwaltung", "fr": "Administration de facturation"},
"icon": "FaMoneyBillAlt",
"path": "/admin/billing",
"order": 40,
"adminOnly": True,
},
{ {
"id": "admin-subscriptions", "id": "admin-subscriptions",
"objectKey": "ui.admin.subscriptions", "objectKey": "ui.admin.subscriptions",
"label": {"en": "Subscriptions", "de": "Abonnements", "fr": "Abonnements"}, "label": {"en": "Subscriptions", "de": "Abonnements", "fr": "Abonnements"},
"icon": "FaFileContract", "icon": "FaFileContract",
"path": "/admin/subscriptions", "path": "/admin/subscriptions",
"order": 50, "order": 40,
"adminOnly": True, "adminOnly": True,
}, },
], ],
@ -282,6 +292,16 @@ NAVIGATION_SECTIONS = [
"adminOnly": True, "adminOnly": True,
"sysAdminOnly": True, "sysAdminOnly": True,
}, },
{
"id": "admin-languages",
"objectKey": "ui.admin.languages",
"label": {"en": "UI Languages", "de": "UI-Sprachen", "fr": "Langues UI"},
"icon": "FaGlobe",
"path": "/admin/languages",
"order": 95,
"adminOnly": True,
"sysAdminOnly": True,
},
], ],
}, },
], ],

View file

@ -22,9 +22,11 @@ from modules.workflows.automation2.executors import (
FlowExecutor, FlowExecutor,
ActionNodeExecutor, ActionNodeExecutor,
InputExecutor, InputExecutor,
DataExecutor,
PauseForHumanTaskError, PauseForHumanTaskError,
PauseForEmailWaitError, PauseForEmailWaitError,
) )
from modules.features.graphicalEditor.portTypes import _normalizeToSchema
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
from modules.workflows.automation2.runEnvelope import normalize_run_envelope from modules.workflows.automation2.runEnvelope import normalize_run_envelope
@ -43,10 +45,8 @@ def _is_node_on_active_path(
) -> bool: ) -> bool:
""" """
Return True if this node receives input only from active branches. Return True if this node receives input only from active branches.
- flow.ifElse: only one output (0=yes, 1=no) is active; uses "branch". Transit envelopes: routing metadata is in out["_meta"] (branch/match).
- flow.switch: only one output (0, 1, 2, ...) is active; uses "match". Legacy format: branch/match directly on out.
Nodes connected to inactive outputs must be skipped.
Also skip when a predecessor was skipped (not in nodeOutputs).
""" """
for src, source_output, _ in connectionMap.get(nodeId, []): for src, source_output, _ in connectionMap.get(nodeId, []):
out = nodeOutputs.get(src) out = nodeOutputs.get(src)
@ -54,14 +54,18 @@ def _is_node_on_active_path(
return False return False
if not isinstance(out, dict): if not isinstance(out, dict):
continue continue
branch = out.get("branch")
match = out.get("match") # Transit envelope: metadata in _meta
meta = out.get("_meta", {}) if out.get("_transit") else out
branch = meta.get("branch")
match = meta.get("match")
active_output = None active_output = None
if branch is not None: if branch is not None:
active_output = branch active_output = branch
elif match is not None: elif match is not None:
if match < 0: if match < 0:
return False # switch: no case matched, skip all downstream return False
active_output = match active_output = match
if active_output is not None and source_output != active_output: if active_output is not None and source_output != active_output:
return False return False
@ -78,7 +82,11 @@ def _getExecutor(
return TriggerExecutor() return TriggerExecutor()
if nodeType.startswith("flow."): if nodeType.startswith("flow."):
return FlowExecutor() return FlowExecutor()
if nodeType.startswith("ai.") or nodeType.startswith("email.") or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.") or nodeType.startswith("file."): if nodeType.startswith("data."):
return DataExecutor()
if (nodeType.startswith("ai.") or nodeType.startswith("email.")
or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.")
or nodeType.startswith("file.") or nodeType.startswith("trustee.")):
return ActionNodeExecutor(services) return ActionNodeExecutor(services)
if nodeType.startswith("input.") and automation2_interface: if nodeType.startswith("input.") and automation2_interface:
return InputExecutor(automation2_interface) return InputExecutor(automation2_interface)
@ -88,6 +96,11 @@ def _getExecutor(
_stepMeta: Dict[str, Dict[str, str]] = {} _stepMeta: Dict[str, Dict[str, str]] = {}
def _serializableOutputs(nodeOutputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return a shallow copy of nodeOutputs without the circular _context reference."""
return {k: v for k, v in nodeOutputs.items() if k != "_context"}
def _emitStepEvent(runId: str, stepData: Dict[str, Any]) -> None: def _emitStepEvent(runId: str, stepData: Dict[str, Any]) -> None:
"""Emit a step-log SSE event to any listening client for this run.""" """Emit a step-log SSE event to any listening client for this run."""
try: try:
@ -283,9 +296,12 @@ async def executeGraph(
"_orderedNodes": ordered, "_orderedNodes": ordered,
"runEnvelope": env_for_run, "runEnvelope": env_for_run,
} }
# _context key in nodeOutputs for system variable resolution
nodeOutputs["_context"] = context
skip_until_passed = bool(startAfterNodeId) skip_until_passed = bool(startAfterNodeId)
processed_in_loop: Set[str] = set() processed_in_loop: Set[str] = set()
_aggregateAccumulators: Dict[str, list] = {}
# Check for loop resume: run was paused inside a loop, we're resuming for next iteration # Check for loop resume: run was paused inside a loop, we're resuming for next iteration
run = automation2_interface.getRun(runId) if (runId and automation2_interface) else None run = automation2_interface.getRun(runId) if (runId and automation2_interface) else None
@ -323,6 +339,11 @@ async def executeGraph(
_rStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _rInputSnap) _rStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _rInputSnap)
try: try:
result, _rRetry = await _executeWithRetry(executor, body_node, context) result, _rRetry = await _executeWithRetry(executor, body_node, context)
if body_node.get("type") == "data.aggregate":
if bnid not in _aggregateAccumulators:
_aggregateAccumulators[bnid] = []
accItems = result.get("items", [result]) if isinstance(result, dict) else [result]
_aggregateAccumulators[bnid].extend(accItems)
nodeOutputs[bnid] = result nodeOutputs[bnid] = result
_rDur = int((time.time() - _rStepStart) * 1000) _rDur = int((time.time() - _rStepStart) * 1000)
_updateStepLog(automation2_interface, _rStepId, "completed", _updateStepLog(automation2_interface, _rStepId, "completed",
@ -335,8 +356,8 @@ async def executeGraph(
if automation2_interface: if automation2_interface:
run_ctx = dict(run.get("context") or {}) run_ctx = dict(run.get("context") or {})
run_ctx["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items} run_ctx["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items}
automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)} return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": _serializableOutputs(nodeOutputs)}
except PauseForEmailWaitError as e: except PauseForEmailWaitError as e:
_updateStepLog(automation2_interface, _rStepId, "completed", _updateStepLog(automation2_interface, _rStepId, "completed",
durationMs=int((time.time() - _rStepStart) * 1000)) durationMs=int((time.time() - _rStepStart) * 1000))
@ -347,11 +368,14 @@ async def executeGraph(
logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex) logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex)
nodeOutputs[bnid] = {"error": str(ex), "success": False} nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid, "runId": runId} return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId}
next_index += 1 next_index += 1
if loop_node_id: if loop_node_id:
nodeOutputs[loop_node_id] = {"items": items, "count": len(items)} nodeOutputs[loop_node_id] = {"items": items, "count": len(items)}
for aggId, accItems in _aggregateAccumulators.items():
nodeOutputs[aggId] = {"items": accItems, "count": len(accItems), "_success": True}
_aggregateAccumulators.clear()
processed_in_loop = set(body_ids) | {loop_node_id} processed_in_loop = set(body_ids) | {loop_node_id}
for i, node in enumerate(ordered): for i, node in enumerate(ordered):
@ -425,6 +449,12 @@ async def executeGraph(
_bStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _bInputSnap) _bStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _bInputSnap)
try: try:
bres, _bRetry = await _executeWithRetry(bexec, body_node, context) bres, _bRetry = await _executeWithRetry(bexec, body_node, context)
# data.aggregate: accumulate instead of overwrite
if body_node.get("type") == "data.aggregate":
if bnid not in _aggregateAccumulators:
_aggregateAccumulators[bnid] = []
accItems = bres.get("items", [bres]) if isinstance(bres, dict) else [bres]
_aggregateAccumulators[bnid].extend(accItems)
nodeOutputs[bnid] = bres nodeOutputs[bnid] = bres
_bDur = int((time.time() - _bStepStart) * 1000) _bDur = int((time.time() - _bStepStart) * 1000)
_updateStepLog(automation2_interface, _bStepId, "completed", _updateStepLog(automation2_interface, _bStepId, "completed",
@ -438,8 +468,8 @@ async def executeGraph(
run = automation2_interface.getRun(runId) or {} run = automation2_interface.getRun(runId) or {}
run_ctx = dict(run.get("context") or {}) run_ctx = dict(run.get("context") or {})
run_ctx["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items} run_ctx["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items}
automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)} return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": _serializableOutputs(nodeOutputs)}
except PauseForEmailWaitError as e: except PauseForEmailWaitError as e:
_updateStepLog(automation2_interface, _bStepId, "completed", _updateStepLog(automation2_interface, _bStepId, "completed",
durationMs=int((time.time() - _bStepStart) * 1000)) durationMs=int((time.time() - _bStepStart) * 1000))
@ -450,9 +480,13 @@ async def executeGraph(
logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex) logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex)
nodeOutputs[bnid] = {"error": str(ex), "success": False} nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid, "runId": runId} return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId}
nodeOutputs[nodeId] = {"items": items, "count": len(items)} nodeOutputs[nodeId] = {"items": items, "count": len(items)}
# Finalize aggregate accumulators after loop
for aggId, accItems in _aggregateAccumulators.items():
nodeOutputs[aggId] = {"items": accItems, "count": len(accItems), "_success": True}
_aggregateAccumulators.clear()
_updateStepLog(automation2_interface, _stepId, "completed", _updateStepLog(automation2_interface, _stepId, "completed",
output={"iterationCount": len(items), "items": len(items)}, output={"iterationCount": len(items), "items": len(items)},
durationMs=int((time.time() - _stepStartMs) * 1000)) durationMs=int((time.time() - _stepStartMs) * 1000))
@ -489,13 +523,12 @@ async def executeGraph(
"taskId": e.taskId, "taskId": e.taskId,
"runId": e.runId, "runId": e.runId,
"nodeId": e.nodeId, "nodeId": e.nodeId,
"nodeOutputs": dict(nodeOutputs), "nodeOutputs": _serializableOutputs(nodeOutputs),
} }
except PauseForEmailWaitError as e: except PauseForEmailWaitError as e:
_updateStepLog(automation2_interface, _stepId, "completed", _updateStepLog(automation2_interface, _stepId, "completed",
durationMs=int((time.time() - _stepStartMs) * 1000)) durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph paused for email wait (run %s, node %s)", e.runId, e.nodeId) logger.info("executeGraph paused for email wait (run %s, node %s)", e.runId, e.nodeId)
# Start email poller on-demand (only runs while workflows wait for email)
try: try:
from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceDbApp import getRootInterface
from modules.features.graphicalEditor.emailPoller import ensureRunning from modules.features.graphicalEditor.emailPoller import ensureRunning
@ -521,7 +554,7 @@ async def executeGraph(
automation2_interface.updateRun( automation2_interface.updateRun(
e.runId, e.runId,
status="paused", status="paused",
nodeOutputs=dict(nodeOutputs), nodeOutputs=_serializableOutputs(nodeOutputs),
currentNodeId=e.nodeId, currentNodeId=e.nodeId,
context=run_ctx, context=run_ctx,
) )
@ -531,7 +564,7 @@ async def executeGraph(
"waitReason": "email", "waitReason": "email",
"runId": e.runId, "runId": e.runId,
"nodeId": e.nodeId, "nodeId": e.nodeId,
"nodeOutputs": dict(nodeOutputs), "nodeOutputs": _serializableOutputs(nodeOutputs),
} }
except Exception as e: except Exception as e:
logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e) logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e)
@ -539,7 +572,7 @@ async def executeGraph(
_durMs = int((time.time() - _stepStartMs) * 1000) _durMs = int((time.time() - _stepStartMs) * 1000)
_updateStepLog(automation2_interface, _stepId, "failed", error=str(e), durationMs=_durMs) _updateStepLog(automation2_interface, _stepId, "failed", error=str(e), durationMs=_durMs)
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
if runId: if runId:
_emitStepEvent(runId, {"type": "run_failed", "runId": runId, "status": "failed", "error": str(e), "failedNode": nodeId}) _emitStepEvent(runId, {"type": "run_failed", "runId": runId, "status": "failed", "error": str(e), "failedNode": nodeId})
try: try:
@ -560,13 +593,15 @@ async def executeGraph(
return { return {
"success": False, "success": False,
"error": str(e), "error": str(e),
"nodeOutputs": nodeOutputs, "nodeOutputs": _serializableOutputs(nodeOutputs),
"failedNode": nodeId, "failedNode": nodeId,
"runId": runId, "runId": runId,
} }
_safeOutputs = _serializableOutputs(nodeOutputs)
if runId and automation2_interface: if runId and automation2_interface:
automation2_interface.updateRun(runId, status="completed", nodeOutputs=nodeOutputs) automation2_interface.updateRun(runId, status="completed", nodeOutputs=_safeOutputs)
if runId: if runId:
_emitStepEvent(runId, {"type": "run_complete", "runId": runId, "status": "completed"}) _emitStepEvent(runId, {"type": "run_complete", "runId": runId, "status": "completed"})
logger.info( logger.info(
@ -576,7 +611,7 @@ async def executeGraph(
) )
return { return {
"success": True, "success": True,
"nodeOutputs": nodeOutputs, "nodeOutputs": _safeOutputs,
"stopped": context.get("_stopped", False), "stopped": context.get("_stopped", False),
"runId": runId, "runId": runId,
} }

View file

@ -5,12 +5,14 @@ from .triggerExecutor import TriggerExecutor
from .flowExecutor import FlowExecutor from .flowExecutor import FlowExecutor
from .actionNodeExecutor import ActionNodeExecutor from .actionNodeExecutor import ActionNodeExecutor
from .inputExecutor import InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError from .inputExecutor import InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError
from .dataExecutor import DataExecutor
__all__ = [ __all__ = [
"TriggerExecutor", "TriggerExecutor",
"FlowExecutor", "FlowExecutor",
"ActionNodeExecutor", "ActionNodeExecutor",
"InputExecutor", "InputExecutor",
"DataExecutor",
"PauseForHumanTaskError", "PauseForHumanTaskError",
"PauseForEmailWaitError", "PauseForEmailWaitError",
] ]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,214 @@
# Copyright (c) 2025 Patrick Motsch
# Data manipulation node executor: data.aggregate, data.transform, data.filter.
import logging
from typing import Any, Dict
from modules.features.graphicalEditor.portTypes import _unwrapTransit, _wrapTransit
logger = logging.getLogger(__name__)
class DataExecutor:
"""Execute data.aggregate, data.transform, data.filter nodes."""
async def execute(
self,
node: Dict[str, Any],
context: Dict[str, Any],
) -> Any:
nodeType = node.get("type", "")
nodeId = node.get("id", "")
nodeOutputs = context.get("nodeOutputs", {})
inputSources = context.get("inputSources", {}).get(nodeId, {})
logger.info("DataExecutor node %s type=%s", nodeId, nodeType)
if nodeType == "data.aggregate":
return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context)
if nodeType == "data.transform":
return await self._transform(node, nodeOutputs, nodeId, inputSources)
if nodeType == "data.filter":
return await self._filter(node, nodeOutputs, nodeId, inputSources)
logger.debug("DataExecutor node %s unhandled type %s", nodeId, nodeType)
return None
async def _aggregate(
self,
node: Dict,
nodeOutputs: Dict,
nodeId: str,
inputSources: Dict,
context: Dict,
) -> Any:
"""
In loop context: accumulation is handled by the engine (_aggregateAccumulators).
Outside loop: collect the single input.
"""
inp = self._getInput(inputSources, nodeOutputs)
mode = (node.get("parameters") or {}).get("mode", "collect")
if inp is None:
return {"items": [], "count": 0, "_success": True}
data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
if mode == "collect":
items = [data] if data is not None else []
elif mode == "concat":
items = data if isinstance(data, list) else [data] if data is not None else []
elif mode == "sum":
val = data if isinstance(data, (int, float)) else 0
items = [val]
elif mode == "count":
items = [1] if data is not None else []
else:
items = [data] if data is not None else []
return {"items": items, "count": len(items), "_success": True}
async def _transform(
self,
node: Dict,
nodeOutputs: Dict,
nodeId: str,
inputSources: Dict,
) -> Any:
"""Apply mappings to restructure data."""
from modules.workflows.automation2.graphUtils import resolveParameterReferences
inp = self._getInput(inputSources, nodeOutputs)
data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
mappings = (node.get("parameters") or {}).get("mappings", [])
result = {}
for mapping in mappings:
if not isinstance(mapping, dict):
continue
outputField = mapping.get("outputField")
if not outputField:
continue
source = mapping.get("source")
if source and isinstance(source, dict) and source.get("type") == "ref":
resolved = resolveParameterReferences(source, nodeOutputs)
result[outputField] = resolved
elif source and isinstance(source, dict) and source.get("type") == "value":
result[outputField] = source.get("value")
elif isinstance(data, dict) and mapping.get("sourceField"):
result[outputField] = data.get(mapping["sourceField"])
else:
result[outputField] = source
result["_success"] = True
return result
async def _filter(
self,
node: Dict,
nodeOutputs: Dict,
nodeId: str,
inputSources: Dict,
) -> Any:
"""Filter items by condition expression. Returns Transit envelope."""
inp = self._getInput(inputSources, nodeOutputs)
data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
condition = (node.get("parameters") or {}).get("condition", "")
items = self._extractItems(data)
originalCount = len(items)
if not condition:
filtered = items
else:
filtered = [item for item in items if self._evalFilterCondition(item, condition)]
filteredData = data
if isinstance(data, dict):
filteredData = dict(data)
listKey = self._findListKey(data)
if listKey:
filteredData[listKey] = filtered
elif isinstance(data, list):
filteredData = filtered
return _wrapTransit(filteredData, {
"originalCount": originalCount,
"filteredCount": len(filtered),
})
def _getInput(self, inputSources: Dict, nodeOutputs: Dict) -> Any:
"""Get data from the first connected input port."""
if 0 not in inputSources:
return None
srcId, _ = inputSources[0]
return nodeOutputs.get(srcId)
def _extractItems(self, data: Any) -> list:
"""Extract the list of items from various data shapes."""
if isinstance(data, list):
return data
if isinstance(data, dict):
for key in ("items", "tasks", "emails", "files", "documents", "documentList"):
val = data.get(key)
if isinstance(val, list):
return val
return []
def _findListKey(self, data: Dict) -> str:
"""Find the key that holds the main list in a dict."""
for key in ("items", "tasks", "emails", "files", "documents", "documentList"):
if isinstance(data.get(key), list):
return key
return ""
def _evalFilterCondition(self, item: Any, condition: Any) -> bool:
"""
Evaluate a filter condition against a single item.
Supports structured conditions {field, operator, value} or simple string expressions.
"""
if isinstance(condition, dict):
field = condition.get("field", "")
operator = condition.get("operator", "eq")
value = condition.get("value")
left = item.get(field) if isinstance(item, dict) else item
return self._compareValues(left, operator, value)
if isinstance(condition, str) and condition.strip():
try:
if isinstance(item, dict):
return bool(eval(condition, {"__builtins__": {}}, item))
return bool(item)
except Exception:
return True
return True
def _compareValues(self, left: Any, operator: str, right: Any) -> bool:
"""Compare two values with the given operator."""
if operator == "eq":
return left == right
if operator == "neq":
return left != right
if operator == "contains":
return right is not None and str(right) in str(left or "")
if operator == "startsWith":
return str(left or "").startswith(str(right or ""))
if operator == "isEmpty":
return left is None or left == "" or (isinstance(left, (list, dict)) and len(left) == 0)
if operator == "isNotEmpty":
return left is not None and left != "" and (not isinstance(left, (list, dict)) or len(left) > 0)
if operator in ("lt", "lte", "gt", "gte"):
try:
l = float(left) if left is not None else 0
r = float(right) if right is not None else 0
if operator == "lt":
return l < r
if operator == "lte":
return l <= r
if operator == "gt":
return l > r
return l >= r
except (TypeError, ValueError):
return False
return True

View file

@ -1,9 +1,11 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# Flow control node executor (ifElse, switch, loop). # Flow control node executor (ifElse, switch, loop, merge).
import logging import logging
from typing import Any, Dict from typing import Any, Dict
from modules.features.graphicalEditor.portTypes import _wrapTransit, _unwrapTransit
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -30,16 +32,20 @@ class FlowExecutor:
if nodeType == "flow.ifElse": if nodeType == "flow.ifElse":
out = await self._ifElse(node, nodeOutputs, nodeId, inputSources) out = await self._ifElse(node, nodeOutputs, nodeId, inputSources)
logger.info("FlowExecutor node %s ifElse -> %s", nodeId, out) logger.info("FlowExecutor node %s ifElse -> branch=%s", nodeId, out.get("_meta", {}).get("branch"))
return out return out
if nodeType == "flow.switch": if nodeType == "flow.switch":
out = await self._switch(node, nodeOutputs, nodeId, inputSources) out = await self._switch(node, nodeOutputs, nodeId, inputSources)
logger.info("FlowExecutor node %s switch -> %s", nodeId, out) logger.info("FlowExecutor node %s switch -> match=%s", nodeId, out.get("_meta", {}).get("match"))
return out return out
if nodeType == "flow.loop": if nodeType == "flow.loop":
out = await self._loop(node, nodeOutputs, nodeId, inputSources) out = await self._loop(node, nodeOutputs, nodeId, inputSources)
logger.info("FlowExecutor node %s loop -> %s", nodeId, out) logger.info("FlowExecutor node %s loop -> %s", nodeId, out)
return out return out
if nodeType == "flow.merge":
out = await self._merge(node, nodeOutputs, nodeId, inputSources, context)
logger.info("FlowExecutor node %s merge -> keys=%s", nodeId, list(out.keys()) if isinstance(out, dict) else None)
return out
logger.debug("FlowExecutor node %s unhandled type %s -> None", nodeId, nodeType) logger.debug("FlowExecutor node %s unhandled type %s -> None", nodeId, nodeType)
return None return None
@ -62,7 +68,10 @@ class FlowExecutor:
condParam = (node.get("parameters") or {}).get("condition") condParam = (node.get("parameters") or {}).get("condition")
inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs) inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs)
ok = self._evalConditionParam(condParam, nodeOutputs) ok = self._evalConditionParam(condParam, nodeOutputs)
return {"branch": 0 if ok else 1, "conditionResult": ok, "input": inp} return _wrapTransit(
_unwrapTransit(inp) if inp else inp,
{"branch": 0 if ok else 1, "conditionResult": ok},
)
def _evalConditionParam(self, condParam: Any, nodeOutputs: Dict) -> bool: def _evalConditionParam(self, condParam: Any, nodeOutputs: Dict) -> bool:
"""Evaluate condition: structured {type,ref,operator,value} or legacy string/ref.""" """Evaluate condition: structured {type,ref,operator,value} or legacy string/ref."""
@ -201,10 +210,17 @@ class FlowExecutor:
from modules.workflows.automation2.graphUtils import resolveParameterReferences from modules.workflows.automation2.graphUtils import resolveParameterReferences
value = resolveParameterReferences(valueExpr, nodeOutputs) value = resolveParameterReferences(valueExpr, nodeOutputs)
cases = (node.get("parameters") or {}).get("cases", []) cases = (node.get("parameters") or {}).get("cases", [])
inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs)
for i, c in enumerate(cases): for i, c in enumerate(cases):
if self._evalSwitchCase(value, c): if self._evalSwitchCase(value, c):
return {"match": i, "value": value} return _wrapTransit(
return {"match": -1, "value": value} _unwrapTransit(inp) if inp else inp,
{"match": i, "value": value},
)
return _wrapTransit(
_unwrapTransit(inp) if inp else inp,
{"match": -1, "value": value},
)
def _evalSwitchCase(self, left: Any, case: Any) -> bool: def _evalSwitchCase(self, left: Any, case: Any) -> bool:
""" """
@ -265,8 +281,47 @@ class FlowExecutor:
if isinstance(items, list): if isinstance(items, list):
pass pass
elif isinstance(items, dict): elif isinstance(items, dict):
# Convert form payload / object to list of {name, value} for "for each field"
items = [{"name": k, "value": v} for k, v in items.items()] items = [{"name": k, "value": v} for k, v in items.items()]
else: else:
items = [items] if items is not None else [] items = [items] if items is not None else []
return {"items": items, "count": len(items)} return {"items": items, "count": len(items)}
async def _merge(self, node: Dict, nodeOutputs: Dict, nodeId: str, inputSources: Dict, context: Dict) -> Any:
"""Merge multiple branch inputs. mode: first | all | append."""
mode = (node.get("parameters") or {}).get("mode", "first")
inputs: Dict[int, Any] = {}
for portIdx, (srcId, srcOut) in inputSources.items():
out = nodeOutputs.get(srcId)
if out is not None:
inputs[portIdx] = _unwrapTransit(out)
first = None
merged: Dict = {}
for idx in sorted(inputs.keys()):
val = inputs[idx]
if first is None:
first = val
if isinstance(val, dict):
merged.update(val)
if mode == "first":
pass
elif mode == "all":
pass
elif mode == "append":
allItems = []
for val in inputs.values():
if isinstance(val, list):
allItems.extend(val)
elif isinstance(val, dict) and "items" in val:
allItems.extend(val["items"])
elif val is not None:
allItems.append(val)
merged["items"] = allItems
return {
"inputs": inputs,
"first": first,
"merged": merged,
"_success": True,
}

View file

@ -113,6 +113,11 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
if nid not in nodeIds: if nid not in nodeIds:
errors.append(f"Connection references non-existent node {nid}") errors.append(f"Connection references non-existent node {nid}")
# Soft port compatibility check (warnings, not errors)
warnings = _checkPortCompatibility(nodes, connMap)
if warnings:
logger.info("validateGraph port warnings: %s", warnings)
if errors: if errors:
logger.debug("validateGraph errors: %s", errors) logger.debug("validateGraph errors: %s", errors)
else: else:
@ -120,6 +125,55 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
return errors return errors
def _checkPortCompatibility(
nodes: List[Dict],
connMap: Dict[str, List[Tuple[str, int, int]]],
) -> List[str]:
"""
Soft check: warn if connected port types are incompatible.
Returns warnings (never blocks execution).
"""
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES}
nodeById = {n["id"]: n for n in nodes if n.get("id")}
warnings = []
for tgt, pairs in connMap.items():
tgtNode = nodeById.get(tgt)
if not tgtNode:
continue
tgtDef = nodeDefMap.get(tgtNode.get("type", ""))
if not tgtDef:
continue
tgtInputPorts = tgtDef.get("inputPorts", {})
for src, srcOut, tgtIn in pairs:
srcNode = nodeById.get(src)
if not srcNode:
continue
srcDef = nodeDefMap.get(srcNode.get("type", ""))
if not srcDef:
continue
srcOutputPorts = srcDef.get("outputPorts", {})
srcPort = srcOutputPorts.get(srcOut, {})
tgtPort = tgtInputPorts.get(tgtIn, {})
srcSchema = srcPort.get("schema", "")
accepts = tgtPort.get("accepts", [])
if not accepts or not srcSchema:
continue
if "Transit" in accepts:
continue
if srcSchema not in accepts:
warnings.append(
f"Port mismatch: {src}[out:{srcOut}] ({srcSchema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})"
)
return warnings
def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, int]]]) -> List[Dict]: def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, int]]]) -> List[Dict]:
""" """
Topological sort: start from trigger nodes, then BFS by connections. Topological sort: start from trigger nodes, then BFS by connections.
@ -198,9 +252,11 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
path = value.get("path") path = value.get("path")
if node_id is not None and isinstance(path, (list, tuple)): if node_id is not None and isinstance(path, (list, tuple)):
data = nodeOutputs.get(node_id) data = nodeOutputs.get(node_id)
# Unwrap transit envelopes to access the real data
if isinstance(data, dict) and data.get("_transit"):
data = data.get("data", data)
plist = list(path) plist = list(path)
resolved = _get_by_path(data, plist) resolved = _get_by_path(data, plist)
# input.form historically stored flat field dict; refs use payload.<field>
if ( if (
resolved is None resolved is None
and isinstance(data, dict) and isinstance(data, dict)
@ -214,6 +270,10 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
if value.get("type") == "value": if value.get("type") == "value":
inner = value.get("value") inner = value.get("value")
return resolveParameterReferences(inner, nodeOutputs) return resolveParameterReferences(inner, nodeOutputs)
if value.get("type") == "system":
variable = value.get("variable", "")
from modules.features.graphicalEditor.portTypes import _resolveSystemVariable
return _resolveSystemVariable(variable, nodeOutputs.get("_context", {}))
return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()} return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()}
if isinstance(value, str): if isinstance(value, str):

View file

@ -15,7 +15,7 @@ import io
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import Dict, Any, List, Optional, Tuple from typing import Dict, Any, List, Optional, Tuple
from modules.datamodels.datamodelChat import ActionResult, ActionDocument, ChatDocument from modules.datamodels.datamodelChat import ActionResult, ActionDocument, ChatDocument, ChatMessage
from modules.datamodels.datamodelDocref import DocumentReferenceList, DocumentItemReference from modules.datamodels.datamodelDocref import DocumentReferenceList, DocumentItemReference
from modules.datamodels.datamodelAi import AiCallOptions, AiCallRequest, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallOptions, AiCallRequest, OperationTypeEnum
@ -500,10 +500,13 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
if not filesToProcess: if not filesToProcess:
return ActionResult.isSuccess(documents=[]) return ActionResult.isSuccess(documents=[])
# Attach all files as ChatDocuments to the workflow so AI can resolve them # Attach all files as ChatDocuments so AI can resolve them via DocumentReferenceList.
chatDocDumps = [] # When running inside the graph engine there is no real ChatWorkflow (workflow.id is None),
# so we create in-memory ChatDocument objects and inject them directly into the placeholder
# workflow's messages list instead of going through storeMessageWithDocuments.
chatDocs = []
for f in filesToProcess: for f in filesToProcess:
chatDoc = ChatDocument( chatDocs.append(ChatDocument(
id=str(uuid.uuid4()), id=str(uuid.uuid4()),
mandateId=self.services.mandateId or "", mandateId=self.services.mandateId or "",
featureInstanceId=featureInstanceId or "", featureInstanceId=featureInstanceId or "",
@ -512,8 +515,14 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
fileName=f["fileName"], fileName=f["fileName"],
fileSize=0, fileSize=0,
mimeType=f["mimeType"], mimeType=f["mimeType"],
) ))
chatDocDumps.append(chatDoc.model_dump())
workflow = self.services.workflow
_wfId = getattr(workflow, "id", None) or ""
hasRealWorkflow = workflow is not None and bool(_wfId) and not str(_wfId).startswith("transient-")
if hasRealWorkflow:
chatDocDumps = [d.model_dump() for d in chatDocs]
messageData = { messageData = {
"id": f"msg_extract_{uuid.uuid4().hex[:12]}", "id": f"msg_extract_{uuid.uuid4().hex[:12]}",
"documentsLabel": "extract_files", "documentsLabel": "extract_files",
@ -522,17 +531,30 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
"message": f"Extract from {len(filesToProcess)} file(s)", "message": f"Extract from {len(filesToProcess)} file(s)",
} }
createdMessage = self.services.chat.storeMessageWithDocuments( createdMessage = self.services.chat.storeMessageWithDocuments(
self.services.workflow, workflow, messageData, chatDocDumps,
messageData,
chatDocDumps,
) )
if not createdMessage or not createdMessage.documents: if not createdMessage or not createdMessage.documents:
return ActionResult.isFailure(error="Failed to attach documents to workflow") return ActionResult.isFailure(error="Failed to attach documents to workflow")
# Map fileId -> ChatDocument id for AI reference
fileIdToChatDocId = {} fileIdToChatDocId = {}
for i, f in enumerate(filesToProcess): for i, f in enumerate(filesToProcess):
if i < len(createdMessage.documents): if i < len(createdMessage.documents):
fileIdToChatDocId[f["fileId"]] = createdMessage.documents[i].id fileIdToChatDocId[f["fileId"]] = createdMessage.documents[i].id
else:
# Graph-engine path: inject documents into the placeholder workflow so
# getChatDocumentsFromDocumentList can find them via workflow.messages.
msgId = f"msg_extract_{uuid.uuid4().hex[:12]}"
placeholderMsg = ChatMessage(
id=msgId,
workflowId=getattr(workflow, "id", None) or "transient",
documentsLabel="extract_files",
role="user",
status="step",
message=f"Extract from {len(filesToProcess)} file(s)",
documents=chatDocs,
)
if workflow is not None and hasattr(workflow, "messages"):
workflow.messages.append(placeholderMsg)
fileIdToChatDocId = {f["fileId"]: chatDocs[i].id for i, f in enumerate(filesToProcess)}
expenseList, bankList = await _getAccountLists(self, featureInstanceId) expenseList, bankList = await _getAccountLists(self, featureInstanceId)

View file

@ -917,7 +917,7 @@ class DynamicMode(BaseMode):
'success': observation.success, 'success': observation.success,
'resultLabel': observation.resultLabel, 'resultLabel': observation.resultLabel,
'documentsCount': observation.documentsCount, 'documentsCount': observation.documentsCount,
'previews': [p.model_dump(exclude_none=True) if hasattr(p, 'model_dump') else p.dict() for p in observation.previews] if observation.previews else [], 'previews': [p.model_dump(exclude_none=True) for p in observation.previews] if observation.previews else [],
'notes': observation.notes, 'notes': observation.notes,
'contentAnalysis': observation.contentAnalysis if observation.contentAnalysis else {} 'contentAnalysis': observation.contentAnalysis if observation.contentAnalysis else {}
} }

View file

@ -48,8 +48,6 @@ def _observationToDict(obs) -> dict:
return obs.copy() return obs.copy()
if hasattr(obs, 'model_dump'): if hasattr(obs, 'model_dump'):
return obs.model_dump(exclude_none=True) return obs.model_dump(exclude_none=True)
if hasattr(obs, 'dict'):
return obs.dict()
return {"raw": str(obs)} return {"raw": str(obs)}

View file

@ -0,0 +1,100 @@
"""Build ui_language_seed.json from frontend_nyla locale TS files (one-off / CI)."""
from __future__ import annotations
import json
import re
from pathlib import Path
_REPO = Path(__file__).resolve().parents[2]
_SRC = _REPO / "frontend_nyla" / "src" / "locales"
_OUT = _REPO / "gateway" / "modules" / "migration" / "seedData" / "ui_language_seed.json"
def _unescape_ts_single_quoted(raw: str) -> str:
out: list[str] = []
i = 0
while i < len(raw):
c = raw[i]
if c == "\\" and i + 1 < len(raw):
n = raw[i + 1]
if n == "n":
out.append("\n")
i += 2
continue
if n == "r":
out.append("\r")
i += 2
continue
if n == "t":
out.append("\t")
i += 2
continue
out.append(n)
i += 2
continue
out.append(c)
i += 1
return "".join(out)
def _parse_locale(path: Path) -> dict[str, str]:
text = path.read_text(encoding="utf-8")
mapping: dict[str, str] = {}
line_re = re.compile(
r"^\s*'((?:\\.|[^'])*)':\s*'((?:\\.|[^'])*)'\s*,?\s*(//.*)?$"
)
for line in text.splitlines():
m = line_re.match(line.strip())
if not m:
continue
key = _unescape_ts_single_quoted(m.group(1))
val = _unescape_ts_single_quoted(m.group(2))
mapping[key] = val
return mapping
def main() -> None:
deMap = _parse_locale(_SRC / "de.ts")
enMap = _parse_locale(_SRC / "en.ts")
frMap = _parse_locale(_SRC / "fr.ts")
dePlain = {v: v for v in deMap.values()}
enPlain: dict[str, str] = {}
frPlain: dict[str, str] = {}
for dotKey, germanText in deMap.items():
if dotKey in enMap:
enPlain[germanText] = enMap[dotKey]
if dotKey in frMap:
frPlain[germanText] = frMap[dotKey]
payload = [
{
"id": "de",
"label": "Deutsch",
"keys": dePlain,
"status": "complete",
"isDefault": True,
},
{
"id": "en",
"label": "English",
"keys": enPlain,
"status": "complete",
"isDefault": False,
},
{
"id": "fr",
"label": "Français",
"keys": frPlain,
"status": "complete",
"isDefault": False,
},
]
_OUT.parent.mkdir(parents=True, exist_ok=True)
_OUT.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8")
print("Wrote", _OUT, "keys de/en/fr", len(dePlain), len(enPlain), len(frPlain))
if __name__ == "__main__":
main()

View file

@ -0,0 +1,136 @@
"""
Rekey frontend t('dot.notation') -> t('Deutscher Klartext') using locales/de.ts mapping.
Usage (from repo root):
python gateway/scripts/i18n_rekey_plaintext_keys.py
Excludes: src/locales/, this script's output is in-place file edits.
"""
from __future__ import annotations
import re
import sys
from pathlib import Path
_REPO = Path(__file__).resolve().parents[2]
_SRC = _REPO / "frontend_nyla" / "src"
_DE_FILE = _SRC / "locales" / "de.ts"
def _unescape_ts_single_quoted(raw: str) -> str:
out: list[str] = []
i = 0
while i < len(raw):
c = raw[i]
if c == "\\" and i + 1 < len(raw):
n = raw[i + 1]
if n == "n":
out.append("\n")
i += 2
continue
if n == "r":
out.append("\r")
i += 2
continue
if n == "t":
out.append("\t")
i += 2
continue
out.append(n)
i += 2
continue
out.append(c)
i += 1
return "".join(out)
def _escape_for_ts_single_quoted(s: str) -> str:
return (
s.replace("\\", "\\\\")
.replace("'", "\\'")
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t")
)
def _parse_de_ts(path: Path) -> dict[str, str]:
text = path.read_text(encoding="utf-8")
mapping: dict[str, str] = {}
line_re = re.compile(
r"^\s*'((?:\\.|[^'])*)':\s*'((?:\\.|[^'])*)'\s*,?\s*(//.*)?$"
)
for line in text.splitlines():
m = line_re.match(line.strip())
if not m:
continue
key = _unescape_ts_single_quoted(m.group(1))
val = _unescape_ts_single_quoted(m.group(2))
mapping[key] = val
return mapping
def _iter_source_files():
for ext in ("*.tsx", "*.ts"):
for p in _SRC.rglob(ext):
rel = p.relative_to(_SRC).as_posix()
if rel.startswith("locales/"):
continue
yield p
def _rekey_content(content: str, mapping: dict[str, str]) -> tuple[str, int]:
changes = 0
keys = sorted(mapping.keys(), key=len, reverse=True)
for key in keys:
if f"'{key}'" not in content:
continue
german = mapping[key]
escaped = _escape_for_ts_single_quoted(german)
repl_single = f"t('{escaped}')"
key_re = re.escape(key)
# t('key', "..." )
content, c = re.subn(
rf"t\(\s*'{key_re}'\s*,\s*\"(?:\\.|[^\"])*\"\s*\)",
repl_single,
content,
)
changes += c
# t('key', '...' )
content, c = re.subn(
rf"t\(\s*'{key_re}'\s*,\s*'(?:\\.|[^'])*'\s*\)",
repl_single,
content,
)
changes += c
# t('key')
content, c = re.subn(rf"t\(\s*'{key_re}'\s*\)", repl_single, content)
changes += c
return content, changes
def main() -> int:
if not _DE_FILE.is_file():
print("Missing", _DE_FILE, file=sys.stderr)
return 1
mapping = _parse_de_ts(_DE_FILE)
print("Loaded", len(mapping), "entries from de.ts")
total = 0
for path in _iter_source_files():
raw = path.read_text(encoding="utf-8")
new_raw, n = _rekey_content(raw, mapping)
if n and new_raw != raw:
path.write_text(new_raw, encoding="utf-8", newline="\n")
print(path.relative_to(_REPO), n, "replacements")
total += n
print("Done. Total replacements:", total)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View file

@ -41,7 +41,7 @@ from modules.shared.configuration import APP_CONFIG
DATABASE_CONFIG = { DATABASE_CONFIG = {
"poweron_app": ("DB_APP", ["datamodelUam", "datamodelRbac", "datamodelSecurity"]), "poweron_app": ("DB_APP", ["datamodelUam", "datamodelRbac", "datamodelSecurity"]),
"poweron_chat": ("DB_CHAT", ["datamodelChat"]), "poweron_chat": ("DB_CHAT", ["datamodelChat"]),
"poweron_management": ("DB_MANAGEMENT", ["datamodelWorkflow", "datamodelFiles"]), "poweron_management": ("DB_MANAGEMENT", ["datamodelWorkflow", "datamodelFiles", "datamodelUiLanguage"]),
} }
# Python-Typ → PostgreSQL-Typ Mapping # Python-Typ → PostgreSQL-Typ Mapping