Compare commits

...

8 commits

77 changed files with 4352 additions and 1399 deletions

3
app.py
View file

@ -600,6 +600,9 @@ app.include_router(promptRouter)
from modules.routes.routeDataConnections import router as connectionsRouter from modules.routes.routeDataConnections import router as connectionsRouter
app.include_router(connectionsRouter) app.include_router(connectionsRouter)
from modules.routes.routeTableViews import router as tableViewsRouter
app.include_router(tableViewsRouter)
from modules.routes.routeSecurityLocal import router as localRouter from modules.routes.routeSecurityLocal import router as localRouter
app.include_router(localRouter) app.include_router(localRouter)

View file

@ -351,6 +351,7 @@ class AiAnthropic(BaseConnectorAi):
# Parse response # Parse response
anthropicResponse = response.json() anthropicResponse = response.json()
stop_reason = anthropicResponse.get("stop_reason")
# Extract content and tool_use blocks from response # Extract content and tool_use blocks from response
content = "" content = ""
@ -374,9 +375,25 @@ class AiAnthropic(BaseConnectorAi):
if not content and not toolCalls: if not content and not toolCalls:
logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}") logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}")
content = "[Anthropic API returned empty response]" err = (
"Anthropic refused the request (content policy) — try another model or adjust the prompt."
if stop_reason == "refusal"
else f"Anthropic returned no assistant text (stop_reason={stop_reason or 'unknown'})."
)
return AiModelResponse(
content="",
success=False,
error=err,
modelId=model.name,
metadata={
"response_id": anthropicResponse.get("id", ""),
"stop_reason": stop_reason,
},
)
metadata = {"response_id": anthropicResponse.get("id", "")} metadata = {"response_id": anthropicResponse.get("id", "")}
if stop_reason:
metadata["stop_reason"] = stop_reason
if toolCalls: if toolCalls:
metadata["toolCalls"] = toolCalls metadata["toolCalls"] = toolCalls
@ -492,6 +509,19 @@ class AiAnthropic(BaseConnectorAi):
f"Anthropic stream returned empty response: model={model.name}, " f"Anthropic stream returned empty response: model={model.name}, "
f"stopReason={stopReason}" f"stopReason={stopReason}"
) )
err = (
"Anthropic refused the request (content policy) — try another model or adjust the prompt."
if stopReason == "refusal"
else f"Anthropic returned no assistant text (stop_reason={stopReason or 'unknown'})."
)
yield AiModelResponse(
content="",
success=False,
error=err,
modelId=model.name,
metadata={"stopReason": stopReason} if stopReason else {},
)
return
metadata: Dict[str, Any] = {} metadata: Dict[str, Any] = {}
if stopReason: if stopReason:

View file

@ -834,7 +834,10 @@ class DatabaseConnector:
createdTs = record.get("sysCreatedAt") createdTs = record.get("sysCreatedAt")
if createdTs is None or createdTs == 0 or createdTs == 0.0: if createdTs is None or createdTs == 0 or createdTs == 0.0:
record["sysCreatedAt"] = currentTime record["sysCreatedAt"] = currentTime
if effective_user_id: # Do not wipe caller-provided sysCreatedBy (e.g. FileItem from createFile with
# real user). ContextVar can be "system" for the DB pool while the business
# user is set on the record from model_dump().
if effective_user_id and not record.get("sysCreatedBy"):
record["sysCreatedBy"] = effective_user_id record["sysCreatedBy"] = effective_user_id
elif not record.get("sysCreatedBy"): elif not record.get("sysCreatedBy"):
if effective_user_id: if effective_user_id:
@ -1531,7 +1534,7 @@ class DatabaseConnector:
createdTs = rec.get("sysCreatedAt") createdTs = rec.get("sysCreatedAt")
if createdTs is None or createdTs == 0 or createdTs == 0.0: if createdTs is None or createdTs == 0 or createdTs == 0.0:
rec["sysCreatedAt"] = currentTime rec["sysCreatedAt"] = currentTime
if effectiveUserId: if effectiveUserId and not rec.get("sysCreatedBy"):
rec["sysCreatedBy"] = effectiveUserId rec["sysCreatedBy"] = effectiveUserId
elif not rec.get("sysCreatedBy") and effectiveUserId: elif not rec.get("sysCreatedBy") and effectiveUserId:
rec["sysCreatedBy"] = effectiveUserId rec["sysCreatedBy"] = effectiveUserId

View file

@ -210,6 +210,9 @@ class ClickupListsAdapter(ServiceAdapter):
data = await self._svc.getTask(task_id) data = await self._svc.getTask(task_id)
if isinstance(data, dict) and data.get("error"): if isinstance(data, dict) and data.get("error"):
return json.dumps(data).encode("utf-8") return json.dumps(data).encode("utf-8")
returnedId = data.get("id", "") if isinstance(data, dict) else ""
if returnedId and returnedId != task_id:
logger.warning(f"ClickUp download: requested task_id={task_id} but API returned id={returnedId}")
payload = json.dumps(data, indent=2).encode("utf-8") payload = json.dumps(data, indent=2).encode("utf-8")
return DownloadResult(data=payload, fileName=f"task-{task_id}.json", mimeType="application/json") return DownloadResult(data=payload, fileName=f"task-{task_id}.json", mimeType="application/json")

View file

@ -155,9 +155,12 @@ def coerceDocumentReferenceList(value: Any) -> DocumentReferenceList:
return coerceDocumentReferenceList(value[innerKey]) return coerceDocumentReferenceList(value[innerKey])
docId = value.get("documentId") or value.get("id") docId = value.get("documentId") or value.get("id")
if docId: if docId:
docIdStr = str(docId)
if docIdStr.startswith("docItem:") or docIdStr.startswith("docList:"):
return DocumentReferenceList.from_string_list([docIdStr])
return DocumentReferenceList(references=[ return DocumentReferenceList(references=[
DocumentItemReference( DocumentItemReference(
documentId=str(docId), documentId=docIdStr,
fileName=value.get("fileName") or value.get("name"), fileName=value.get("fileName") or value.get("name"),
) )
]) ])
@ -180,10 +183,15 @@ def coerceDocumentReferenceList(value: Any) -> DocumentReferenceList:
continue continue
docId = item.get("documentId") or item.get("id") docId = item.get("documentId") or item.get("id")
if docId: if docId:
references.append(DocumentItemReference( docIdStr = str(docId)
documentId=str(docId), if docIdStr.startswith("docItem:") or docIdStr.startswith("docList:"):
fileName=item.get("fileName") or item.get("name"), parsed = DocumentReferenceList.from_string_list([docIdStr])
)) references.extend(parsed.references)
else:
references.append(DocumentItemReference(
documentId=docIdStr,
fileName=item.get("fileName") or item.get("name"),
))
elif item.get("label"): elif item.get("label"):
references.append(DocumentListReference( references.append(DocumentListReference(
label=str(item["label"]), label=str(item["label"]),

View file

@ -10,6 +10,69 @@ import uuid
import base64 import base64
@i18nModel("Ordner")
class FileFolder(PowerOnModel):
"""Persistenter Datei-Ordner im Management-DB-Kontext (RBAC wie FileItem)."""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
name: str = Field(
description="Display name of the folder",
json_schema_extra={"label": "Name", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
)
parentId: Optional[str] = Field(
default=None,
description="Parent folder id; empty or None for root",
json_schema_extra={
"label": "Uebergeordneter Ordner",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False,
"fk_target": {"db": "poweron_management", "table": "FileFolder", "labelField": "name"},
},
)
mandateId: Optional[str] = Field(
default="",
description="ID of the mandate this folder belongs to",
json_schema_extra={
"label": "Mandant",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
"fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"},
},
)
featureInstanceId: Optional[str] = Field(
default="",
description="ID of the feature instance this folder belongs to",
json_schema_extra={
"label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
"fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"},
},
)
scope: str = Field(
default="personal",
description="Data visibility scope: personal, featureInstance, mandate, global",
json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
{"value": "personal", "label": "Persönlich"},
{"value": "featureInstance", "label": "Feature-Instanz"},
{"value": "mandate", "label": "Mandant"},
{"value": "global", "label": "Global"},
]},
)
neutralize: bool = Field(
default=False,
description="Whether files in this folder should be neutralized before AI processing",
json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
)
@i18nModel("Datei") @i18nModel("Datei")
class FileItem(PowerOnModel): class FileItem(PowerOnModel):
"""Metadaten einer gespeicherten Datei.""" """Metadaten einer gespeicherten Datei."""
@ -44,6 +107,17 @@ class FileItem(PowerOnModel):
"fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"}, "fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"},
}, },
) )
folderId: Optional[str] = Field(
default=None,
description="ID of the folder containing this file (if any)",
json_schema_extra={
"label": "Ordner",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False,
"fk_target": {"db": "poweron_management", "table": "FileFolder", "labelField": "name"},
},
)
mimeType: str = Field( mimeType: str = Field(
description="MIME type of the file", description="MIME type of the file",
json_schema_extra={"label": "MIME-Typ", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}, json_schema_extra={"label": "MIME-Typ", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},

View file

@ -9,50 +9,95 @@ All models use camelStyle naming convention for consistency with frontend.
from typing import List, Dict, Any, Optional, Generic, TypeVar from typing import List, Dict, Any, Optional, Generic, TypeVar
from pydantic import BaseModel, Field, ConfigDict from pydantic import BaseModel, Field, ConfigDict
import math import math
import uuid
T = TypeVar('T') T = TypeVar('T')
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Table Grouping models # Group layout models (Strategy B — derived from Views, purely presentational)
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
class TableGroupNode(BaseModel): class GroupByLevel(BaseModel):
"""One level of a multi-level grouping definition, stored inside a TableListView config."""
field: str = Field(..., description="Field key to group by")
nullLabel: str = Field(default="", description="Display label for null/empty values")
direction: str = Field(
default="asc",
description="Order of group bands at this level: 'asc' or 'desc'",
)
class GroupBand(BaseModel):
""" """
A single node in a user-defined group tree for a FormGeneratorTable. A contiguous block of rows that share the same group path, intersecting the current page.
Items belong to exactly one group (no multi-membership). startRowIndex and rowCount are 0-based indices relative to the current page's items[].
Groups can be nested to arbitrary depth via subGroups.
""" """
id: str path: List[str] = Field(..., description="Hierarchical group key (one entry per level)")
name: str label: str = Field(..., description="Display label for this band (last path element)")
itemIds: List[str] = Field(default_factory=list) startRowIndex: int = Field(..., description="0-based start index within items[] on this page")
subGroups: List['TableGroupNode'] = Field(default_factory=list) rowCount: int = Field(..., description="Number of items in this band on this page")
order: int = 0
isExpanded: bool = True
TableGroupNode.model_rebuild()
class TableGrouping(BaseModel): class GroupLayout(BaseModel):
""" """
Persisted grouping configuration for one (user, contextKey) pair. Grouping structure for the current response page.
Stored in table_groupings in poweron_app (auto-created). Included only when the effective view has groupByLevels configured.
The frontend renders group header rows by iterating bands and inserting
headers before each startRowIndex.
"""
levels: List[str] = Field(..., description="Ordered field keys that define the grouping hierarchy")
bands: List[GroupBand] = Field(..., description="Bands intersecting the current page, in order")
class AppliedViewMeta(BaseModel):
"""Minimal metadata about the view that was applied to this response."""
viewKey: Optional[str] = None
displayName: Optional[str] = None
# ---------------------------------------------------------------------------
# Persisted view model
# ---------------------------------------------------------------------------
class TableListView(BaseModel):
"""
A saved table view for one (userId, contextKey) pair.
config schema (schemaVersion=1):
{
"schemaVersion": 1,
"filters": {}, # same structure as PaginationParams.filters
"sort": [], # same structure as PaginationParams.sort
"groupByLevels": [ # ordered grouping levels
{"field": "scope", "nullLabel": "", "direction": "asc"}
],
"collapsedSectionKeys": [], # optional: section UI (stable group keys)
"collapsedGroupKeys": [], # optional: inline group bands (path.join('///'))
}
contextKey convention: API path without /api/ prefix and without trailing slash. contextKey convention: API path without /api/ prefix and without trailing slash.
Examples: "connections", "prompts", "admin/users", "trustee/{instanceId}/documents" Examples: "connections", "prompts", "admin/users", "files/list"
viewKey is a user-defined slug, unique per (userId, mandateId, contextKey).
""" """
id: str id: str = Field(default_factory=lambda: str(uuid.uuid4()))
userId: str userId: str
mandateId: Optional[str] = None
contextKey: str contextKey: str
rootGroups: List[TableGroupNode] = Field(default_factory=list) viewKey: str
displayName: str
config: Dict[str, Any] = Field(default_factory=dict)
updatedAt: Optional[float] = None updatedAt: Optional[float] = None
# ---------------------------------------------------------------------------
# Sort and pagination models
# ---------------------------------------------------------------------------
class SortField(BaseModel): class SortField(BaseModel):
""" """Single sort field configuration."""
Single sort field configuration.
"""
field: str = Field(..., description="Field name to sort by") field: str = Field(..., description="Field name to sort by")
direction: str = Field(..., description="Sort direction: 'asc' or 'desc'") direction: str = Field(..., description="Sort direction: 'asc' or 'desc'")
@ -61,16 +106,13 @@ class PaginationParams(BaseModel):
""" """
Complete pagination state including page, sorting, and filters. Complete pagination state including page, sorting, and filters.
Grouping extensions (both optional omit when not using grouping): View extension (optional):
groupId Scope the request to items belonging to this group. viewKey Slug of a saved TableListView for this (user, contextKey) pair.
The backend resolves it to an itemIds IN-filter before The server loads the view, merges its filters/sort/groupByLevels
applying normal pagination/search/filter logic. into the effective query (request fields take priority over view
Also applied for mode=ids and mode=filterValues so that defaults for explicitly provided fields), and returns groupLayout
bulk-select and filter-dropdowns respect the group scope. in the response when groupByLevels is non-empty.
saveGroupTree If present the backend persists this tree for the current Omit or set to None for the default (ungrouped) view.
(user, contextKey) pair *before* fetching, then returns
the confirmed tree in the response groupTree field.
Omit on every request that does not change the group tree.
""" """
page: int = Field(ge=1, description="Current page number (1-based)") page: int = Field(ge=1, description="Current page number (1-based)")
pageSize: int = Field(ge=1, le=1000, description="Number of items per page") pageSize: int = Field(ge=1, le=1000, description="Number of items per page")
@ -85,13 +127,16 @@ class PaginationParams(BaseModel):
- Supported operators: equals/eq, contains, startsWith, endsWith, gt, gte, lt, lte, in, notIn - Supported operators: equals/eq, contains, startsWith, endsWith, gt, gte, lt, lte, in, notIn
- Multiple filters are combined with AND logic""" - Multiple filters are combined with AND logic"""
) )
groupId: Optional[str] = Field( viewKey: Optional[str] = Field(
default=None, default=None,
description="Scope request to items of this group (resolved server-side to itemIds IN-filter)", description="Slug of a saved view to load; server merges view config into effective query",
) )
saveGroupTree: Optional[List[Dict[str, Any]]] = Field( groupByLevels: Optional[List[GroupByLevel]] = Field(
default=None, default=None,
description="If set, persist this group tree before fetching (optimistic save)", description=(
"When set (including an empty list), replaces the saved view's groupByLevels for this request. "
"Omit entirely to use grouping from the view only."
),
) )
@ -130,16 +175,22 @@ class PaginatedResponse(BaseModel, Generic[T]):
""" """
Response containing paginated data and metadata. Response containing paginated data and metadata.
groupTree is included when the endpoint supports table grouping and the groupLayout is included when the effective view has groupByLevels configured.
current user has a saved group tree for the requested contextKey. It describes how to render group header rows in the current page's items[].
It is None when grouping is not configured for the endpoint or the user Omitted (None) when no grouping is active.
has not created any groups yet. Frontend must treat None as an empty tree.
appliedView describes which saved view was merged into this response,
allowing the frontend to synchronise its view selector.
""" """
items: List[T] = Field(..., description="Array of items for current page") items: List[T] = Field(..., description="Array of items for current page")
pagination: Optional[PaginationMetadata] = Field(..., description="Pagination metadata (None if pagination not applied)") pagination: Optional[PaginationMetadata] = Field(..., description="Pagination metadata (None if pagination not applied)")
groupTree: Optional[List[TableGroupNode]] = Field( groupLayout: Optional[GroupLayout] = Field(
default=None, default=None,
description="Current group tree for this (user, contextKey) pair — None if no grouping configured", description="Group band structure for this page (None if no grouping active)",
)
appliedView: Optional[AppliedViewMeta] = Field(
default=None,
description="Metadata about the view applied to this response",
) )
model_config = ConfigDict(arbitrary_types_allowed=True) model_config = ConfigDict(arbitrary_types_allowed=True)
@ -148,34 +199,30 @@ class PaginatedResponse(BaseModel, Generic[T]):
def normalize_pagination_dict(pagination_dict: Dict[str, Any]) -> Dict[str, Any]: def normalize_pagination_dict(pagination_dict: Dict[str, Any]) -> Dict[str, Any]:
""" """
Normalize pagination dictionary to handle frontend variations. Normalize pagination dictionary to handle frontend variations.
Moves top-level "search" field into filters if present.
Grouping fields (groupId, saveGroupTree) are passed through as-is.
Args: - Moves top-level "search" field into filters if present.
pagination_dict: Raw pagination dictionary from frontend - Silently drops legacy fields (groupId, saveGroupTree) that were part of the
old tree-grouping implementation so old clients do not cause validation errors.
Returns: - Passes viewKey through unchanged.
Normalized pagination dictionary ready for PaginationParams parsing
""" """
if not pagination_dict: if not pagination_dict:
return pagination_dict return pagination_dict
# Create a copy to avoid modifying the original
normalized = dict(pagination_dict) normalized = dict(pagination_dict)
# Ensure required fields have sensible defaults
if "page" not in normalized: if "page" not in normalized:
normalized["page"] = 1 normalized["page"] = 1
if "pageSize" not in normalized: if "pageSize" not in normalized:
normalized["pageSize"] = 25 normalized["pageSize"] = 25
# Move top-level "search" into filters if present # Move top-level "search" into filters
if "search" in normalized: if "search" in normalized:
if "filters" not in normalized or normalized["filters"] is None: if "filters" not in normalized or normalized["filters"] is None:
normalized["filters"] = {} normalized["filters"] = {}
normalized["filters"]["search"] = normalized.pop("search") normalized["filters"]["search"] = normalized.pop("search")
# groupId / saveGroupTree are valid PaginationParams fields — pass through unchanged. # Drop legacy tree-grouping fields — harmless if already absent
# No transformation needed; Pydantic will validate them. normalized.pop("groupId", None)
normalized.pop("saveGroupTree", None)
return normalized return normalized

View file

@ -12,17 +12,30 @@ import uuid
from typing import Dict, Any, List, Optional from typing import Dict, Any, List, Optional
def _make_json_serializable(obj: Any) -> Any: _INTERNAL_SKIP_KEYS = frozenset({"_context", "_orderedNodes"})
def _make_json_serializable(obj: Any, _depth: int = 0) -> Any:
""" """
Recursively convert bytes to base64 strings so structures can be JSON-serialized Recursively convert bytes to base64 strings so structures can be JSON-serialized
for storage in JSONB columns. for storage in JSONB columns.
Internal runtime keys (_context, _orderedNodes) are skipped they hold live
Python objects (including back-references to nodeOutputs) and must never be
stored. A depth guard prevents runaway recursion on unexpected circular refs.
""" """
if _depth > 50:
return None
if isinstance(obj, bytes): if isinstance(obj, bytes):
return base64.b64encode(obj).decode("ascii") return base64.b64encode(obj).decode("ascii")
if isinstance(obj, dict): if isinstance(obj, dict):
return {k: _make_json_serializable(v) for k, v in obj.items()} return {
k: _make_json_serializable(v, _depth + 1)
for k, v in obj.items()
if k not in _INTERNAL_SKIP_KEYS
}
if isinstance(obj, list): if isinstance(obj, list):
return [_make_json_serializable(v) for v in obj] return [_make_json_serializable(v, _depth + 1) for v in obj]
return obj return obj
from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelUam import User

View file

@ -4,7 +4,7 @@
from modules.shared.i18nRegistry import t from modules.shared.i18nRegistry import t
_AI_COMMON_PARAMS = [ _AI_COMMON_PARAMS = [
{"name": "requireNeutralization", "type": "boolean", "required": False, {"name": "requireNeutralization", "type": "bool", "required": False,
"frontendType": "checkbox", "default": False, "frontendType": "checkbox", "default": False,
"description": t("Eingaben fuer diesen Call neutralisieren")}, "description": t("Eingaben fuer diesen Call neutralisieren")},
{"name": "allowedModels", "type": "array", "required": False, {"name": "allowedModels", "type": "array", "required": False,
@ -19,25 +19,25 @@ AI_NODES = [
"label": t("Prompt"), "label": t("Prompt"),
"description": t("Prompt eingeben und KI führt aus"), "description": t("Prompt eingeben und KI führt aus"),
"parameters": [ "parameters": [
{"name": "aiPrompt", "type": "string", "required": True, "frontendType": "templateTextarea", {"name": "aiPrompt", "type": "str", "required": True, "frontendType": "templateTextarea",
"description": t("KI-Prompt")}, "description": t("KI-Prompt")},
{"name": "resultType", "type": "string", "required": False, "frontendType": "select", {"name": "resultType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["txt", "json", "md", "csv", "xml", "html", "pdf", "docx", "xlsx", "pptx", "png", "jpg"]}, "frontendOptions": {"options": ["txt", "json", "md", "csv", "xml", "html", "pdf", "docx", "xlsx", "pptx", "png", "jpg"]},
"description": t("Ausgabeformat"), "default": "txt"}, "description": t("Ausgabeformat"), "default": "txt"},
{"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "dataRef", {"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "hidden",
"description": t("Dokumentenliste (Upstream-Output binden)"), "default": ""}, "description": t("Dokumente aus vorherigen Schritten"), "default": ""},
{"name": "context", "type": "string", "required": False, "frontendType": "dataRef", {"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder",
"description": t("Kontextdaten fuer den Prompt (Upstream-Output binden)"), "default": ""}, "description": t("Daten aus vorherigen Schritten"), "default": ""},
{"name": "documentTheme", "type": "string", "required": False, "frontendType": "select", {"name": "documentTheme", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["general", "finance", "legal", "technical", "hr"]}, "frontendOptions": {"options": ["general", "finance", "legal", "technical", "hr"]},
"description": t("Dokument-Thema (Style-Hinweis fuer den Renderer)"), "default": "general"}, "description": t("Dokument-Thema (Style-Hinweis fuer den Renderer)"), "default": "general"},
{"name": "simpleMode", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "simpleMode", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Einfacher Modus"), "default": True}, "description": t("Einfacher Modus"), "default": True},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": [ "inputPorts": {0: {"accepts": [
"DocumentList", "AiResult", "TextResult", "Transit", "LoopItem", "ActionResult", "FormPayload", "DocumentList", "AiResult", "TextResult", "Transit", "LoopItem", "ActionResult",
]}}, ]}},
"outputPorts": {0: {"schema": "AiResult"}}, "outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-robot", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-robot", "color": "#9C27B0", "usesAi": True},
@ -50,12 +50,18 @@ AI_NODES = [
"label": t("Web-Recherche"), "label": t("Web-Recherche"),
"description": t("Recherche im Web"), "description": t("Recherche im Web"),
"parameters": [ "parameters": [
{"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", {"name": "prompt", "type": "str", "required": True, "frontendType": "textarea",
"description": t("Recherche-Anfrage")}, "description": t("Recherche-Anfrage")},
{"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder",
"description": t("Daten aus vorherigen Schritten"), "default": ""},
{"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "hidden",
"description": t("Dokumente aus vorherigen Schritten"), "default": ""},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}}, "inputPorts": {0: {"accepts": [
"FormPayload", "Transit", "AiResult", "DocumentList", "ActionResult", "LoopItem", "TextResult",
]}},
"outputPorts": {0: {"schema": "AiResult"}}, "outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-magnify", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-magnify", "color": "#9C27B0", "usesAi": True},
"_method": "ai", "_method": "ai",
@ -68,14 +74,14 @@ AI_NODES = [
"description": t("Dokumentinhalt zusammenfassen"), "description": t("Dokumentinhalt zusammenfassen"),
"parameters": [ "parameters": [
{"name": "documentList", "type": "DocumentList", "required": True, "frontendType": "dataRef", {"name": "documentList", "type": "DocumentList", "required": True, "frontendType": "dataRef",
"description": t("Dokumentenliste (Upstream-Output binden)"), "default": ""}, "description": t("Dokumente aus vorherigen Schritten")},
{"name": "summaryLength", "type": "string", "required": False, "frontendType": "select", {"name": "summaryLength", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["brief", "medium", "detailed"]}, "frontendOptions": {"options": ["brief", "medium", "detailed"]},
"description": t("Kurz, mittel oder ausführlich"), "default": "medium"}, "description": t("Kurz, mittel oder ausführlich"), "default": "medium"},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, "inputPorts": {0: {"accepts": ["DocumentList", "Transit", "LoopItem"]}},
"outputPorts": {0: {"schema": "AiResult"}}, "outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0", "usesAi": True},
"_method": "ai", "_method": "ai",
@ -88,13 +94,13 @@ AI_NODES = [
"description": t("Dokument in Zielsprache übersetzen"), "description": t("Dokument in Zielsprache übersetzen"),
"parameters": [ "parameters": [
{"name": "documentList", "type": "DocumentList", "required": True, "frontendType": "dataRef", {"name": "documentList", "type": "DocumentList", "required": True, "frontendType": "dataRef",
"description": t("Dokumentenliste (Upstream-Output binden)"), "default": ""}, "description": t("Dokumente aus vorherigen Schritten")},
{"name": "targetLanguage", "type": "string", "required": True, "frontendType": "text", {"name": "targetLanguage", "type": "str", "required": True, "frontendType": "text",
"description": t("Zielsprache (z.B. de, en, French)")}, "description": t("Zielsprache (z.B. de, en, French)")},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, "inputPorts": {0: {"accepts": ["DocumentList", "Transit", "LoopItem"]}},
"outputPorts": {0: {"schema": "AiResult"}}, "outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-translate", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-translate", "color": "#9C27B0", "usesAi": True},
"_method": "ai", "_method": "ai",
@ -107,14 +113,14 @@ AI_NODES = [
"description": t("Dokument in anderes Format konvertieren"), "description": t("Dokument in anderes Format konvertieren"),
"parameters": [ "parameters": [
{"name": "documentList", "type": "DocumentList", "required": True, "frontendType": "dataRef", {"name": "documentList", "type": "DocumentList", "required": True, "frontendType": "dataRef",
"description": t("Dokumentenliste (Upstream-Output binden)"), "default": ""}, "description": t("Dokumente aus vorherigen Schritten")},
{"name": "targetFormat", "type": "string", "required": True, "frontendType": "select", {"name": "targetFormat", "type": "str", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["docx", "pdf", "xlsx", "csv", "txt", "html", "json", "md"]}, "frontendOptions": {"options": ["docx", "pdf", "xlsx", "csv", "txt", "html", "json", "md"]},
"description": t("Zielformat")}, "description": t("Zielformat")},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, "inputPorts": {0: {"accepts": ["DocumentList", "Transit", "LoopItem"]}},
"outputPorts": {0: {"schema": "DocumentList"}}, "outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-convert", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-file-convert", "color": "#9C27B0", "usesAi": True},
"_method": "ai", "_method": "ai",
@ -126,12 +132,26 @@ AI_NODES = [
"label": t("Dokument generieren"), "label": t("Dokument generieren"),
"description": t("Dokument aus Prompt generieren"), "description": t("Dokument aus Prompt generieren"),
"parameters": [ "parameters": [
{"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", {"name": "prompt", "type": "str", "required": True, "frontendType": "textarea",
"description": t("Generierungs-Prompt")}, "description": t("Generierungs-Prompt")},
{"name": "outputFormat", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]},
"description": t("Ausgabeformat"), "default": "docx"},
{"name": "title", "type": "str", "required": False, "frontendType": "text",
"description": t("Dokumenttitel (Metadaten / Dateiname)"), "default": ""},
{"name": "documentType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["letter", "memo", "proposal", "contract", "report", "email"]},
"description": t("Dokumentart (Inhaltshinweis fuer die KI)"), "default": "proposal"},
{"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder",
"description": t("Daten aus vorherigen Schritten"), "default": ""},
{"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "hidden",
"description": t("Dokumente aus vorherigen Schritten"), "default": ""},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}}, "inputPorts": {0: {"accepts": [
"FormPayload", "Transit", "AiResult", "DocumentList", "ActionResult", "LoopItem", "TextResult",
]}},
"outputPorts": {0: {"schema": "DocumentList"}}, "outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-plus", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-file-plus", "color": "#9C27B0", "usesAi": True},
"_method": "ai", "_method": "ai",
@ -143,15 +163,21 @@ AI_NODES = [
"label": t("Code generieren"), "label": t("Code generieren"),
"description": t("Code aus Beschreibung generieren"), "description": t("Code aus Beschreibung generieren"),
"parameters": [ "parameters": [
{"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", {"name": "prompt", "type": "str", "required": True, "frontendType": "textarea",
"description": t("Code-Generierungs-Prompt")}, "description": t("Code-Generierungs-Prompt")},
{"name": "resultType", "type": "string", "required": False, "frontendType": "select", {"name": "resultType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["py", "js", "ts", "html", "java", "cpp", "txt", "json", "csv", "xml"]}, "frontendOptions": {"options": ["py", "js", "ts", "html", "java", "cpp", "txt", "json", "csv", "xml"]},
"description": t("Datei-Endung der erzeugten Code-Datei"), "default": "py"}, "description": t("Datei-Endung der erzeugten Code-Datei"), "default": "py"},
{"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder",
"description": t("Daten aus vorherigen Schritten"), "default": ""},
{"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "hidden",
"description": t("Dokumente aus vorherigen Schritten"), "default": ""},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}}, "inputPorts": {0: {"accepts": [
"FormPayload", "Transit", "AiResult", "DocumentList", "ActionResult", "LoopItem", "TextResult",
]}},
"outputPorts": {0: {"schema": "AiResult"}}, "outputPorts": {0: {"schema": "AiResult"}},
"meta": {"icon": "mdi-code-tags", "color": "#9C27B0", "usesAi": True}, "meta": {"icon": "mdi-code-tags", "color": "#9C27B0", "usesAi": True},
"_method": "ai", "_method": "ai",
@ -163,10 +189,10 @@ AI_NODES = [
"label": t("KI-Konsolidierung"), "label": t("KI-Konsolidierung"),
"description": t("Gesammelte Ergebnisse mit KI zusammenfassen, klassifizieren oder semantisch zusammenführen"), "description": t("Gesammelte Ergebnisse mit KI zusammenfassen, klassifizieren oder semantisch zusammenführen"),
"parameters": [ "parameters": [
{"name": "mode", "type": "string", "required": False, "frontendType": "select", {"name": "mode", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["summarize", "classify", "semanticMerge"]}, "frontendOptions": {"options": ["summarize", "classify", "semanticMerge"]},
"description": t("Konsolidierungsmodus"), "default": "summarize"}, "description": t("Konsolidierungsmodus"), "default": "summarize"},
{"name": "prompt", "type": "string", "required": False, "frontendType": "textarea", {"name": "prompt", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Optionaler Prompt für die Konsolidierung"), "default": ""}, "description": t("Optionaler Prompt für die Konsolidierung"), "default": ""},
] + _AI_COMMON_PARAMS, ] + _AI_COMMON_PARAMS,
"inputs": 1, "inputs": 1,

View file

@ -11,23 +11,23 @@ CLICKUP_NODES = [
"label": t("Aufgaben suchen"), "label": t("Aufgaben suchen"),
"description": t("Aufgaben in einem Workspace suchen"), "description": t("Aufgaben in einem Workspace suchen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"}, "frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")}, "description": t("ClickUp-Verbindung")},
{"name": "teamId", "type": "string", "required": True, "frontendType": "text", {"name": "teamId", "type": "str", "required": True, "frontendType": "text",
"description": t("Team-/Workspace-ID")}, "description": t("Team-/Workspace-ID")},
{"name": "query", "type": "string", "required": True, "frontendType": "text", {"name": "query", "type": "str", "required": True, "frontendType": "text",
"description": t("Suchbegriff")}, "description": t("Suchbegriff")},
{"name": "page", "type": "number", "required": False, "frontendType": "number", {"name": "page", "type": "int", "required": False, "frontendType": "number",
"description": t("Seite"), "default": 0}, "description": t("Seite"), "default": 0},
{"name": "listId", "type": "string", "required": False, "frontendType": "clickupList", {"name": "listId", "type": "str", "required": False, "frontendType": "clickupList",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("In dieser Liste suchen")}, "description": t("In dieser Liste suchen")},
{"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "includeClosed", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Erledigte einbeziehen"), "default": False}, "description": t("Erledigte einbeziehen"), "default": False},
{"name": "fullTaskData", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "fullTaskData", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Vollständige Daten"), "default": False}, "description": t("Vollständige Daten"), "default": False},
{"name": "matchNameOnly", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "matchNameOnly", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Nur Titel"), "default": True}, "description": t("Nur Titel"), "default": True},
], ],
"inputs": 1, "inputs": 1,
@ -44,15 +44,15 @@ CLICKUP_NODES = [
"label": t("Aufgaben auflisten"), "label": t("Aufgaben auflisten"),
"description": t("Aufgaben einer Liste auflisten"), "description": t("Aufgaben einer Liste auflisten"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"}, "frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")}, "description": t("ClickUp-Verbindung")},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "clickupList", {"name": "pathQuery", "type": "str", "required": True, "frontendType": "clickupList",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Pfad zur Liste")}, "description": t("Pfad zur Liste")},
{"name": "page", "type": "number", "required": False, "frontendType": "number", {"name": "page", "type": "int", "required": False, "frontendType": "number",
"description": t("Seite"), "default": 0}, "description": t("Seite"), "default": 0},
{"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "includeClosed", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Erledigte einbeziehen"), "default": False}, "description": t("Erledigte einbeziehen"), "default": False},
], ],
"inputs": 1, "inputs": 1,
@ -69,12 +69,12 @@ CLICKUP_NODES = [
"label": t("Aufgabe abrufen"), "label": t("Aufgabe abrufen"),
"description": t("Eine Aufgabe abrufen"), "description": t("Eine Aufgabe abrufen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"}, "frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")}, "description": t("ClickUp-Verbindung")},
{"name": "taskId", "type": "string", "required": False, "frontendType": "text", {"name": "taskId", "type": "str", "required": False, "frontendType": "text",
"description": t("Task-ID")}, "description": t("Task-ID")},
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "text", {"name": "pathQuery", "type": "str", "required": False, "frontendType": "text",
"description": t("Oder Pfad")}, "description": t("Oder Pfad")},
], ],
"inputs": 1, "inputs": 1,
@ -91,34 +91,34 @@ CLICKUP_NODES = [
"label": t("Aufgabe erstellen"), "label": t("Aufgabe erstellen"),
"description": t("Aufgabe erstellen"), "description": t("Aufgabe erstellen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"}, "frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")}, "description": t("ClickUp-Verbindung")},
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList", {"name": "pathQuery", "type": "str", "required": False, "frontendType": "clickupList",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Pfad zur Liste")}, "description": t("Pfad zur Liste")},
{"name": "listId", "type": "string", "required": False, "frontendType": "text", {"name": "listId", "type": "str", "required": False, "frontendType": "text",
"description": t("Listen-ID")}, "description": t("Listen-ID")},
{"name": "name", "type": "string", "required": True, "frontendType": "text", {"name": "name", "type": "str", "required": True, "frontendType": "text",
"description": t("Name")}, "description": t("Name")},
{"name": "description", "type": "string", "required": False, "frontendType": "textarea", {"name": "description", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Beschreibung")}, "description": t("Beschreibung")},
{"name": "taskStatus", "type": "string", "required": False, "frontendType": "text", {"name": "taskStatus", "type": "str", "required": False, "frontendType": "text",
"description": t("Status")}, "description": t("Status")},
{"name": "taskPriority", "type": "string", "required": False, "frontendType": "select", {"name": "taskPriority", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["1", "2", "3", "4"]}, "frontendOptions": {"options": ["1", "2", "3", "4"]},
"description": t("Priorität 1-4")}, "description": t("Priorität 1-4")},
{"name": "taskDueDateMs", "type": "string", "required": False, "frontendType": "text", {"name": "taskDueDateMs", "type": "str", "required": False, "frontendType": "text",
"description": t("Fälligkeit (ms)")}, "description": t("Fälligkeit (ms)")},
{"name": "taskAssigneeIds", "type": "object", "required": False, "frontendType": "json", {"name": "taskAssigneeIds", "type": "object", "required": False, "frontendType": "json",
"description": t("Zugewiesene")}, "description": t("Zugewiesene")},
{"name": "taskTimeEstimateMs", "type": "string", "required": False, "frontendType": "text", {"name": "taskTimeEstimateMs", "type": "str", "required": False, "frontendType": "text",
"description": t("Zeitschätzung (ms)")}, "description": t("Zeitschätzung (ms)")},
{"name": "taskTimeEstimateHours", "type": "string", "required": False, "frontendType": "text", {"name": "taskTimeEstimateHours", "type": "str", "required": False, "frontendType": "text",
"description": t("Zeitschätzung (h)")}, "description": t("Zeitschätzung (h)")},
{"name": "customFieldValues", "type": "object", "required": False, "frontendType": "json", {"name": "customFieldValues", "type": "object", "required": False, "frontendType": "json",
"description": t("Benutzerdefinierte Felder")}, "description": t("Benutzerdefinierte Felder")},
{"name": "taskFields", "type": "string", "required": False, "frontendType": "json", {"name": "taskFields", "type": "str", "required": False, "frontendType": "json",
"description": t("Zusätzliches JSON")}, "description": t("Zusätzliches JSON")},
], ],
"inputs": 1, "inputs": 1,
@ -135,14 +135,14 @@ CLICKUP_NODES = [
"label": t("Aufgabe aktualisieren"), "label": t("Aufgabe aktualisieren"),
"description": t("Felder der Aufgabe ändern"), "description": t("Felder der Aufgabe ändern"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"}, "frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")}, "description": t("ClickUp-Verbindung")},
{"name": "taskId", "type": "string", "required": False, "frontendType": "text", {"name": "taskId", "type": "str", "required": False, "frontendType": "text",
"description": t("Task-ID")}, "description": t("Task-ID")},
{"name": "path", "type": "string", "required": False, "frontendType": "text", {"name": "path", "type": "str", "required": False, "frontendType": "text",
"description": t("Oder Pfad")}, "description": t("Oder Pfad")},
{"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json", {"name": "taskUpdate", "type": "str", "required": False, "frontendType": "json",
"description": t("JSON-Body für PUT /task/{id}, z.B. {\"name\":\"...\",\"status\":\"...\"}")}, "description": t("JSON-Body für PUT /task/{id}, z.B. {\"name\":\"...\",\"status\":\"...\"}")},
], ],
"inputs": 1, "inputs": 1,
@ -159,16 +159,16 @@ CLICKUP_NODES = [
"label": t("Anhang hochladen"), "label": t("Anhang hochladen"),
"description": t("Datei an Task anhängen"), "description": t("Datei an Task anhängen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "clickup"}, "frontendOptions": {"authority": "clickup"},
"description": t("ClickUp-Verbindung")}, "description": t("ClickUp-Verbindung")},
{"name": "taskId", "type": "string", "required": False, "frontendType": "text", {"name": "taskId", "type": "str", "required": False, "frontendType": "text",
"description": t("Task-ID")}, "description": t("Task-ID")},
{"name": "path", "type": "string", "required": False, "frontendType": "text", {"name": "path", "type": "str", "required": False, "frontendType": "text",
"description": t("Oder Pfad")}, "description": t("Oder Pfad")},
{"name": "fileName", "type": "string", "required": False, "frontendType": "text", {"name": "fileName", "type": "str", "required": False, "frontendType": "text",
"description": t("Dateiname")}, "description": t("Dateiname")},
{"name": "content", "type": "string", "required": True, "frontendType": "hidden", {"name": "content", "type": "str", "required": True, "frontendType": "hidden",
"description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""}, "description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""},
], ],
"inputs": 1, "inputs": 1,

View file

@ -10,7 +10,7 @@ CONTEXT_NODES = [
"label": t("Inhalt extrahieren"), "label": t("Inhalt extrahieren"),
"description": t("Dokumentstruktur extrahieren ohne KI (Seiten, Abschnitte, Bilder, Tabellen)"), "description": t("Dokumentstruktur extrahieren ohne KI (Seiten, Abschnitte, Bilder, Tabellen)"),
"parameters": [ "parameters": [
{"name": "documentList", "type": "string", "required": True, "frontendType": "hidden", {"name": "documentList", "type": "str", "required": True, "frontendType": "hidden",
"description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""}, "description": t("Dokumentenliste (via Wire oder DataRef)"), "default": ""},
{"name": "extractionOptions", "type": "object", "required": False, "frontendType": "json", {"name": "extractionOptions", "type": "object", "required": False, "frontendType": "json",
"description": t( "description": t(

View file

@ -10,7 +10,7 @@ DATA_NODES = [
"label": t("Sammeln"), "label": t("Sammeln"),
"description": t("Ergebnisse aus Schleifen-Iterationen sammeln"), "description": t("Ergebnisse aus Schleifen-Iterationen sammeln"),
"parameters": [ "parameters": [
{"name": "mode", "type": "string", "required": False, "frontendType": "select", {"name": "mode", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["collect", "concat", "sum", "count"]}, "frontendOptions": {"options": ["collect", "concat", "sum", "count"]},
"description": t("Aggregationsmodus"), "default": "collect"}, "description": t("Aggregationsmodus"), "default": "collect"},
], ],
@ -27,9 +27,9 @@ DATA_NODES = [
"label": t("Filtern"), "label": t("Filtern"),
"description": t("Elemente nach Bedingung filtern"), "description": t("Elemente nach Bedingung filtern"),
"parameters": [ "parameters": [
{"name": "condition", "type": "string", "required": True, "frontendType": "filterExpression", {"name": "condition", "type": "str", "required": True, "frontendType": "filterExpression",
"description": t("Filterbedingung")}, "description": t("Filterbedingung")},
{"name": "udmContentType", "type": "string", "required": False, "frontendType": "select", {"name": "udmContentType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["", "text", "image", "table", "code", "media", "link", "formula"]}, "frontendOptions": {"options": ["", "text", "image", "table", "code", "media", "link", "formula"]},
"description": t("UDM-ContentType-Filter (optional, leer = kein UDM-Filter)"), "default": ""}, "description": t("UDM-ContentType-Filter (optional, leer = kein UDM-Filter)"), "default": ""},
], ],
@ -46,10 +46,10 @@ DATA_NODES = [
"label": t("Konsolidieren"), "label": t("Konsolidieren"),
"description": t("Gesammelte Ergebnisse deterministisch zusammenführen (Tabelle, CSV, Merge)"), "description": t("Gesammelte Ergebnisse deterministisch zusammenführen (Tabelle, CSV, Merge)"),
"parameters": [ "parameters": [
{"name": "mode", "type": "string", "required": False, "frontendType": "select", {"name": "mode", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["table", "concat", "merge", "csvJoin"]}, "frontendOptions": {"options": ["table", "concat", "merge", "csvJoin"]},
"description": t("Konsolidierungsmodus"), "default": "table"}, "description": t("Konsolidierungsmodus"), "default": "table"},
{"name": "separator", "type": "string", "required": False, "frontendType": "text", {"name": "separator", "type": "str", "required": False, "frontendType": "text",
"description": t("Trennzeichen (für concat/csvJoin)"), "default": "\n"}, "description": t("Trennzeichen (für concat/csvJoin)"), "default": "\n"},
], ],
"inputs": 1, "inputs": 1,

View file

@ -10,14 +10,14 @@ EMAIL_NODES = [
"label": t("E-Mail prüfen"), "label": t("E-Mail prüfen"),
"description": t("Neue E-Mails prüfen"), "description": t("Neue E-Mails prüfen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("E-Mail-Konto Verbindung")}, "description": t("E-Mail-Konto Verbindung")},
{"name": "folder", "type": "string", "required": False, "frontendType": "text", {"name": "folder", "type": "str", "required": False, "frontendType": "text",
"description": t("Ordner"), "default": "Inbox"}, "description": t("Ordner"), "default": "Inbox"},
{"name": "limit", "type": "number", "required": False, "frontendType": "number", {"name": "limit", "type": "int", "required": False, "frontendType": "number",
"description": t("Max E-Mails"), "default": 100}, "description": t("Max E-Mails"), "default": 100},
{"name": "filter", "type": "string", "required": False, "frontendType": "text", {"name": "filter", "type": "str", "required": False, "frontendType": "text",
"description": t("Filter-Ausdruck (z.B. 'from:max@example.com hasAttachment:true betreff')"), "default": ""}, "description": t("Filter-Ausdruck (z.B. 'from:max@example.com hasAttachment:true betreff')"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -34,14 +34,14 @@ EMAIL_NODES = [
"label": t("E-Mail suchen"), "label": t("E-Mail suchen"),
"description": t("E-Mails suchen"), "description": t("E-Mails suchen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("E-Mail-Konto Verbindung")}, "description": t("E-Mail-Konto Verbindung")},
{"name": "query", "type": "string", "required": True, "frontendType": "text", {"name": "query", "type": "str", "required": True, "frontendType": "text",
"description": t("Suchausdruck (z.B. 'from:max@example.com hasAttachments:true Rechnung')")}, "description": t("Suchausdruck (z.B. 'from:max@example.com hasAttachments:true Rechnung')")},
{"name": "folder", "type": "string", "required": False, "frontendType": "text", {"name": "folder", "type": "str", "required": False, "frontendType": "text",
"description": t("Ordner"), "default": "All"}, "description": t("Ordner"), "default": "All"},
{"name": "limit", "type": "number", "required": False, "frontendType": "number", {"name": "limit", "type": "int", "required": False, "frontendType": "number",
"description": t("Max E-Mails"), "default": 100}, "description": t("Max E-Mails"), "default": 100},
], ],
"inputs": 1, "inputs": 1,
@ -59,19 +59,19 @@ EMAIL_NODES = [
"description": t( "description": t(
"AI-gestützt einen E-Mail-Entwurf aus Kontext und optionalen Dokumenten erstellen"), "AI-gestützt einen E-Mail-Entwurf aus Kontext und optionalen Dokumenten erstellen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("E-Mail-Konto")}, "description": t("E-Mail-Konto")},
{"name": "context", "type": "string", "required": False, "frontendType": "templateTextarea", {"name": "context", "type": "Any", "required": False, "frontendType": "templateTextarea",
"description": t("Kontext / Brief-Beschreibung für die KI-Komposition"), "default": ""}, "description": t("Daten aus vorherigen Schritten (oder direkte Beschreibung)"), "default": ""},
{"name": "to", "type": "string", "required": False, "frontendType": "text", {"name": "to", "type": "str", "required": False, "frontendType": "text",
"description": t("Empfänger (komma-separiert, optional für Entwurf)"), "default": ""}, "description": t("Empfänger (komma-separiert, optional für Entwurf)"), "default": ""},
{"name": "documentList", "type": "string", "required": False, "frontendType": "hidden", {"name": "documentList", "type": "str", "required": False, "frontendType": "hidden",
"description": t("Anhang-Dokumente (via Wire oder DataRef)"), "default": ""}, "description": t("Anhang-Dokumente (via Wire oder DataRef)"), "default": ""},
{"name": "emailContent", "type": "string", "required": False, "frontendType": "hidden", {"name": "emailContent", "type": "str", "required": False, "frontendType": "hidden",
"description": t("Direkt vorbereiteter Inhalt {subject, body, to} (via Wire — überspringt KI)"), "description": t("Direkt vorbereiteter Inhalt {subject, body, to} (via Wire — überspringt KI)"),
"default": ""}, "default": ""},
{"name": "emailStyle", "type": "string", "required": False, "frontendType": "select", {"name": "emailStyle", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["formal", "casual", "business"]}, "frontendOptions": {"options": ["formal", "casual", "business"]},
"description": t("Stil"), "default": "business"}, "description": t("Stil"), "default": "business"},
], ],

View file

@ -10,25 +10,21 @@ FILE_NODES = [
"label": t("Datei erstellen"), "label": t("Datei erstellen"),
"description": t("Erstellt eine Datei aus Kontext (Text/Markdown von KI)."), "description": t("Erstellt eine Datei aus Kontext (Text/Markdown von KI)."),
"parameters": [ "parameters": [
{"name": "contentSources", "type": "json", "required": False, "frontendType": "json", {"name": "outputFormat", "type": "str", "required": True, "frontendType": "select",
"description": t("Kontext-Quellen"), "default": []},
{"name": "outputFormat", "type": "string", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]}, "frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]},
"description": t("Ausgabeformat"), "default": "docx"}, "description": t("Ausgabeformat"), "default": "docx"},
{"name": "title", "type": "string", "required": False, "frontendType": "text", {"name": "title", "type": "str", "required": False, "frontendType": "text",
"description": t("Dokumenttitel")}, "description": t("Dokumenttitel")},
{"name": "templateName", "type": "string", "required": False, "frontendType": "select", {"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder",
"frontendOptions": {"options": ["default", "corporate", "minimal"]}, "description": t("Daten aus vorherigen Schritten"), "default": ""},
"description": t("Stil-Vorlage")},
{"name": "language", "type": "string", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["de", "en", "fr"]},
"description": t("Sprache"), "default": "de"},
{"name": "context", "type": "string", "required": False, "frontendType": "hidden",
"description": t("Inhalt (via Wire oder DataRef)"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit"]}}, <<<<<<< HEAD
"inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit", "FormPayload", "LoopItem", "ActionResult"]}},
=======
"inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit", "FormPayload"]}},
>>>>>>> 875f8252 (ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes)
"outputPorts": {0: {"schema": "DocumentList"}}, "outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3", "usesAi": False}, "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3", "usesAi": False},
"_method": "file", "_method": "file",

View file

@ -3,25 +3,46 @@
from modules.shared.i18nRegistry import t from modules.shared.i18nRegistry import t
# Ports, die typische Schritt-Ausgaben durchreichen (nicht nur leerer Transit).
_FLOW_INPUT_SCHEMAS = [
"Transit",
"FormPayload",
"AiResult",
"TextResult",
"ActionResult",
"DocumentList",
"FileList",
"EmailList",
"TaskList",
"QueryResult",
"MergeResult",
"LoopItem",
"BoolResult",
"UdmDocument",
]
FLOW_NODES = [ FLOW_NODES = [
{ {
"id": "flow.ifElse", "id": "flow.ifElse",
"category": "flow", "category": "flow",
"label": t("Wenn / Sonst"), "label": t("Wenn / Sonst"),
"description": t("Verzweigung nach Bedingung"), "description": t(
"Verzweigt anhand einer Bedingung auf ein vorheriges Feld oder einen Ausdruck. "
"Die Daten vom Eingangskanal werden an den gewählten Ausgang durchgereicht."
),
"parameters": [ "parameters": [
{ {
"name": "condition", "name": "condition",
"type": "string", "type": "json",
"required": True, "required": True,
"frontendType": "condition", "frontendType": "condition",
"description": t("Bedingung"), "description": t("Bedingung: Feld aus einem vorherigen Schritt und Vergleich"),
}, },
], ],
"inputs": 1, "inputs": 1,
"outputs": 2, "outputs": 2,
"outputLabels": [t("Ja"), t("Nein")], "outputLabels": [t("Ja"), t("Nein")],
"inputPorts": {0: {"accepts": ["Transit"]}}, "inputPorts": {0: {"accepts": list(_FLOW_INPUT_SCHEMAS)}},
"outputPorts": {0: {"schema": "Transit"}, 1: {"schema": "Transit"}}, "outputPorts": {0: {"schema": "Transit"}, 1: {"schema": "Transit"}},
"executor": "flow", "executor": "flow",
"meta": {"icon": "mdi-source-branch", "color": "#FF9800", "usesAi": False}, "meta": {"icon": "mdi-source-branch", "color": "#FF9800", "usesAi": False},
@ -30,26 +51,29 @@ FLOW_NODES = [
"id": "flow.switch", "id": "flow.switch",
"category": "flow", "category": "flow",
"label": t("Switch"), "label": t("Switch"),
"description": t("Mehrere Zweige nach Wert"), "description": t(
"Mehrere Zweige nach einem Wert aus einem vorherigen Schritt (Data Picker). "
"Definiere Fälle mit Vergleichsoperator; der Eingang wird an den ersten passenden Zweig durchgereicht."
),
"parameters": [ "parameters": [
{ {
"name": "value", "name": "value",
"type": "string", "type": "Any",
"required": True, "required": True,
"frontendType": "text", "frontendType": "dataRef",
"description": t("Zu vergleichender Wert"), "description": t("Wert zum Vergleichen (Feld aus einem vorherigen Schritt)"),
}, },
{ {
"name": "cases", "name": "cases",
"type": "array", "type": "array",
"required": False, "required": False,
"frontendType": "caseList", "frontendType": "caseList",
"description": t("Fälle"), "description": t("Fälle: Operator und Vergleichswert"),
}, },
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}}, "inputPorts": {0: {"accepts": list(_FLOW_INPUT_SCHEMAS)}},
"outputPorts": {0: {"schema": "Transit"}}, "outputPorts": {0: {"schema": "Transit"}},
"executor": "flow", "executor": "flow",
"meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800", "usesAi": False}, "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800", "usesAi": False},
@ -57,39 +81,43 @@ FLOW_NODES = [
{ {
"id": "flow.loop", "id": "flow.loop",
"category": "flow", "category": "flow",
"label": t("Schleife / Für Jedes"), "label": t("Schleife / Für jedes"),
"description": t("Über Array-Elemente oder UDM-Strukturebenen iterieren"), "description": t(
"Iteriert über ein Array aus einem vorherigen Schritt (z. B. documente, Zeilen, Listeneinträge). "
"Optional: UDM-Ebene für strukturierte Dokumente."
),
"parameters": [ "parameters": [
{ {
"name": "items", "name": "items",
"type": "string", "type": "Any",
"required": True, "required": True,
"frontendType": "text", "frontendType": "dataRef",
"description": t("Pfad zum Array"), "description": t("Liste oder Sammlung zum Durchlaufen (im Data Picker wählen)"),
}, },
{ {
"name": "level", "name": "level",
"type": "string", "type": "str",
"required": False, "required": False,
"frontendType": "select", "frontendType": "select",
"frontendOptions": {"options": ["auto", "documents", "structuralNodes", "contentBlocks"]}, "frontendOptions": {"options": ["auto", "documents", "structuralNodes", "contentBlocks"]},
"description": t("UDM-Iterationsebene"), "description": t("Nur bei UDM-Daten: welche Strukturebene als Elemente verwendet wird"),
"default": "auto", "default": "auto",
}, },
{ {
"name": "concurrency", "name": "concurrency",
"type": "number", "type": "int",
"required": False, "required": False,
"frontendType": "number", "frontendType": "number",
"frontendOptions": {"min": 1, "max": 20}, "frontendOptions": {"min": 1, "max": 20},
"description": t("Parallele Iterationen (1 = sequentiell)"), "description": t("Parallele Durchläufe (1 = nacheinander)"),
"default": 1, "default": 1,
}, },
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": [ "inputPorts": {0: {"accepts": [
"Transit", "UdmDocument", "EmailList", "DocumentList", "FileList", "TaskList", "ActionResult", "Transit", "UdmDocument", "EmailList", "DocumentList", "FileList", "TaskList",
"ActionResult", "AiResult", "QueryResult", "FormPayload",
]}}, ]}},
"outputPorts": {0: {"schema": "LoopItem"}}, "outputPorts": {0: {"schema": "LoopItem"}},
"executor": "flow", "executor": "flow",
@ -99,30 +127,36 @@ FLOW_NODES = [
"id": "flow.merge", "id": "flow.merge",
"category": "flow", "category": "flow",
"label": t("Zusammenführen"), "label": t("Zusammenführen"),
"description": t("Mehrere Zweige zusammenführen (2-5 Eingänge)"), "description": t(
"Führt 25 Zweige zusammen, wenn alle verbunden sind. "
"Modus legt fest, wie die Eingabeobjekte im Ergebnis kombiniert werden."
),
"parameters": [ "parameters": [
{ {
"name": "mode", "name": "mode",
"type": "string", "type": "str",
"required": False, "required": False,
"frontendType": "select", "frontendType": "select",
"frontendOptions": {"options": ["first", "all", "append"]}, "frontendOptions": {"options": ["first", "all", "append"]},
"description": t("Zusammenführungsmodus"), "description": t("first: erster Zweig; all: Dict-Felder zusammenführen; append: Listen anhängen"),
"default": "first", "default": "first",
}, },
{ {
"name": "inputCount", "name": "inputCount",
"type": "number", "type": "int",
"required": False, "required": False,
"frontendType": "number", "frontendType": "number",
"frontendOptions": {"min": 2, "max": 5}, "frontendOptions": {"min": 2, "max": 5},
"description": t("Anzahl Eingänge"), "description": t("Anzahl Eingänge dieses Nodes (25)"),
"default": 2, "default": 2,
}, },
], ],
"inputs": 2, "inputs": 2,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["Transit"]}, 1: {"accepts": ["Transit"]}}, "inputPorts": {
0: {"accepts": list(_FLOW_INPUT_SCHEMAS)},
1: {"accepts": list(_FLOW_INPUT_SCHEMAS)},
},
"outputPorts": {0: {"schema": "MergeResult"}}, "outputPorts": {0: {"schema": "MergeResult"}},
"executor": "flow", "executor": "flow",
"meta": {"icon": "mdi-call-merge", "color": "#FF9800", "usesAi": False}, "meta": {"icon": "mdi-call-merge", "color": "#FF9800", "usesAi": False},

View file

@ -3,6 +3,18 @@
from modules.shared.i18nRegistry import t from modules.shared.i18nRegistry import t
# Canonical form field types — single source of truth.
# portType maps to the PORT_TYPE_CATALOG primitive used by DataPicker / validateGraph.
FORM_FIELD_TYPES = [
{"id": "text", "label": "Text (einzeilig)", "portType": "str"},
{"id": "textarea", "label": "Text (mehrzeilig)", "portType": "str"},
{"id": "number", "label": "Zahl", "portType": "int"},
{"id": "boolean", "label": "Ja/Nein", "portType": "bool"},
{"id": "date", "label": "Datum", "portType": "str"},
{"id": "email", "label": "E-Mail", "portType": "str"},
{"id": "select", "label": "Auswahl", "portType": "str"},
]
INPUT_NODES = [ INPUT_NODES = [
{ {
"id": "input.form", "id": "input.form",
@ -32,11 +44,11 @@ INPUT_NODES = [
"label": t("Genehmigung"), "label": t("Genehmigung"),
"description": t("Benutzer genehmigt oder lehnt ab"), "description": t("Benutzer genehmigt oder lehnt ab"),
"parameters": [ "parameters": [
{"name": "title", "type": "string", "required": True, "frontendType": "text", {"name": "title", "type": "str", "required": True, "frontendType": "text",
"description": t("Genehmigungstitel")}, "description": t("Genehmigungstitel")},
{"name": "description", "type": "string", "required": False, "frontendType": "textarea", {"name": "description", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Was genehmigt werden soll")}, "description": t("Was genehmigt werden soll")},
{"name": "approvalType", "type": "string", "required": False, "frontendType": "select", {"name": "approvalType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["generic", "document"]}, "frontendOptions": {"options": ["generic", "document"]},
"description": t("Typ: document oder generic"), "default": "generic"}, "description": t("Typ: document oder generic"), "default": "generic"},
], ],
@ -53,14 +65,14 @@ INPUT_NODES = [
"label": t("Upload"), "label": t("Upload"),
"description": t("Benutzer lädt Datei(en) hoch"), "description": t("Benutzer lädt Datei(en) hoch"),
"parameters": [ "parameters": [
{"name": "accept", "type": "string", "required": False, "frontendType": "text", {"name": "accept", "type": "str", "required": False, "frontendType": "text",
"description": t("Accept-String"), "default": ""}, "description": t("Accept-String"), "default": ""},
{"name": "allowedTypes", "type": "json", "required": False, "frontendType": "multiselect", {"name": "allowedTypes", "type": "json", "required": False, "frontendType": "multiselect",
"frontendOptions": {"options": ["pdf", "docx", "xlsx", "pptx", "txt", "csv", "jpg", "png", "gif"]}, "frontendOptions": {"options": ["pdf", "docx", "xlsx", "pptx", "txt", "csv", "jpg", "png", "gif"]},
"description": t("Ausgewählte Dateitypen"), "default": []}, "description": t("Ausgewählte Dateitypen"), "default": []},
{"name": "maxSize", "type": "number", "required": False, "frontendType": "number", {"name": "maxSize", "type": "int", "required": False, "frontendType": "number",
"description": t("Max. Dateigröße in MB"), "default": 10}, "description": t("Max. Dateigröße in MB"), "default": 10},
{"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "multiple", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Mehrere Dateien erlauben"), "default": False}, "description": t("Mehrere Dateien erlauben"), "default": False},
], ],
"inputs": 1, "inputs": 1,
@ -76,9 +88,9 @@ INPUT_NODES = [
"label": t("Kommentar"), "label": t("Kommentar"),
"description": t("Benutzer fügt einen Kommentar hinzu"), "description": t("Benutzer fügt einen Kommentar hinzu"),
"parameters": [ "parameters": [
{"name": "placeholder", "type": "string", "required": False, "frontendType": "text", {"name": "placeholder", "type": "str", "required": False, "frontendType": "text",
"description": t("Platzhalter"), "default": ""}, "description": t("Platzhalter"), "default": ""},
{"name": "required", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "required", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Kommentar erforderlich"), "default": True}, "description": t("Kommentar erforderlich"), "default": True},
], ],
"inputs": 1, "inputs": 1,
@ -94,9 +106,9 @@ INPUT_NODES = [
"label": t("Prüfung"), "label": t("Prüfung"),
"description": t("Benutzer prüft Inhalt"), "description": t("Benutzer prüft Inhalt"),
"parameters": [ "parameters": [
{"name": "contentRef", "type": "string", "required": True, "frontendType": "text", {"name": "contentRef", "type": "str", "required": True, "frontendType": "text",
"description": t("Referenz auf Inhalt")}, "description": t("Referenz auf Inhalt")},
{"name": "reviewType", "type": "string", "required": False, "frontendType": "select", {"name": "reviewType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["generic", "document"]}, "frontendOptions": {"options": ["generic", "document"]},
"description": t("Art der Prüfung"), "default": "generic"}, "description": t("Art der Prüfung"), "default": "generic"},
], ],
@ -115,7 +127,7 @@ INPUT_NODES = [
"parameters": [ "parameters": [
{"name": "options", "type": "json", "required": True, "frontendType": "keyValueRows", {"name": "options", "type": "json", "required": True, "frontendType": "keyValueRows",
"description": t("Optionen"), "default": []}, "description": t("Optionen"), "default": []},
{"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "multiple", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Mehrfachauswahl erlauben"), "default": False}, "description": t("Mehrfachauswahl erlauben"), "default": False},
], ],
"inputs": 1, "inputs": 1,
@ -131,11 +143,11 @@ INPUT_NODES = [
"label": t("Bestätigung"), "label": t("Bestätigung"),
"description": t("Benutzer bestätigt Ja/Nein"), "description": t("Benutzer bestätigt Ja/Nein"),
"parameters": [ "parameters": [
{"name": "question", "type": "string", "required": True, "frontendType": "text", {"name": "question", "type": "str", "required": True, "frontendType": "text",
"description": t("Zu bestätigende Frage")}, "description": t("Zu bestätigende Frage")},
{"name": "confirmLabel", "type": "string", "required": False, "frontendType": "text", {"name": "confirmLabel", "type": "str", "required": False, "frontendType": "text",
"description": t("Label für Bestätigen-Button"), "default": "Confirm"}, "description": t("Label für Bestätigen-Button"), "default": "Confirm"},
{"name": "rejectLabel", "type": "string", "required": False, "frontendType": "text", {"name": "rejectLabel", "type": "str", "required": False, "frontendType": "text",
"description": t("Label für Ablehnen-Button"), "default": "Reject"}, "description": t("Label für Ablehnen-Button"), "default": "Reject"},
], ],
"inputs": 1, "inputs": 1,

View file

@ -25,7 +25,7 @@ REDMINE_NODES = [
"description": t("Einzelnes Redmine-Ticket aus dem Mirror laden."), "description": t("Einzelnes Redmine-Ticket aus dem Mirror laden."),
"parameters": [ "parameters": [
dict(_REDMINE_INSTANCE_PARAM), dict(_REDMINE_INSTANCE_PARAM),
{"name": "ticketId", "type": "number", "required": True, "frontendType": "number", {"name": "ticketId", "type": "int", "required": True, "frontendType": "number",
"description": t("Redmine-Ticket-ID")}, "description": t("Redmine-Ticket-ID")},
], ],
"inputs": 1, "inputs": 1,
@ -43,17 +43,17 @@ REDMINE_NODES = [
"description": t("Tickets aus dem lokalen Mirror mit Filtern (Tracker, Status, Zeitraum, Zuweisung)."), "description": t("Tickets aus dem lokalen Mirror mit Filtern (Tracker, Status, Zeitraum, Zuweisung)."),
"parameters": [ "parameters": [
dict(_REDMINE_INSTANCE_PARAM), dict(_REDMINE_INSTANCE_PARAM),
{"name": "trackerIds", "type": "string", "required": False, "frontendType": "text", {"name": "trackerIds", "type": "str", "required": False, "frontendType": "text",
"description": t("Tracker-IDs (Komma-separiert)"), "default": ""}, "description": t("Tracker-IDs (Komma-separiert)"), "default": ""},
{"name": "status", "type": "string", "required": False, "frontendType": "text", {"name": "status", "type": "str", "required": False, "frontendType": "text",
"description": t("Status-Filter: open | closed | *"), "default": "*"}, "description": t("Status-Filter: open | closed | *"), "default": "*"},
{"name": "dateFrom", "type": "string", "required": False, "frontendType": "date", {"name": "dateFrom", "type": "str", "required": False, "frontendType": "date",
"description": t("Zeitraum ab (ISO-Datum)"), "default": ""}, "description": t("Zeitraum ab (ISO-Datum)"), "default": ""},
{"name": "dateTo", "type": "string", "required": False, "frontendType": "date", {"name": "dateTo", "type": "str", "required": False, "frontendType": "date",
"description": t("Zeitraum bis (ISO-Datum)"), "default": ""}, "description": t("Zeitraum bis (ISO-Datum)"), "default": ""},
{"name": "assignedToId", "type": "number", "required": False, "frontendType": "number", {"name": "assignedToId", "type": "int", "required": False, "frontendType": "number",
"description": t("Nur Tickets dieses Benutzers (ID)")}, "description": t("Nur Tickets dieses Benutzers (ID)")},
{"name": "limit", "type": "number", "required": False, "frontendType": "number", {"name": "limit", "type": "int", "required": False, "frontendType": "number",
"description": t("Max. Anzahl Tickets (1-500)"), "default": 100}, "description": t("Max. Anzahl Tickets (1-500)"), "default": 100},
], ],
"inputs": 1, "inputs": 1,
@ -71,21 +71,21 @@ REDMINE_NODES = [
"description": t("Neues Ticket in Redmine anlegen. Mirror wird sofort aktualisiert."), "description": t("Neues Ticket in Redmine anlegen. Mirror wird sofort aktualisiert."),
"parameters": [ "parameters": [
dict(_REDMINE_INSTANCE_PARAM), dict(_REDMINE_INSTANCE_PARAM),
{"name": "subject", "type": "string", "required": True, "frontendType": "text", {"name": "subject", "type": "str", "required": True, "frontendType": "text",
"description": t("Ticket-Titel")}, "description": t("Ticket-Titel")},
{"name": "trackerId", "type": "number", "required": True, "frontendType": "number", {"name": "trackerId", "type": "int", "required": True, "frontendType": "number",
"description": t("Tracker-ID (Userstory, Feature, Task, ...)")}, "description": t("Tracker-ID (Userstory, Feature, Task, ...)")},
{"name": "description", "type": "string", "required": False, "frontendType": "textarea", {"name": "description", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Ticket-Beschreibung"), "default": ""}, "description": t("Ticket-Beschreibung"), "default": ""},
{"name": "statusId", "type": "number", "required": False, "frontendType": "number", {"name": "statusId", "type": "int", "required": False, "frontendType": "number",
"description": t("Status-ID (optional)")}, "description": t("Status-ID (optional)")},
{"name": "priorityId", "type": "number", "required": False, "frontendType": "number", {"name": "priorityId", "type": "int", "required": False, "frontendType": "number",
"description": t("Prioritaet-ID (optional)")}, "description": t("Prioritaet-ID (optional)")},
{"name": "assignedToId", "type": "number", "required": False, "frontendType": "number", {"name": "assignedToId", "type": "int", "required": False, "frontendType": "number",
"description": t("Zugewiesene Benutzer-ID (optional)")}, "description": t("Zugewiesene Benutzer-ID (optional)")},
{"name": "parentIssueId", "type": "number", "required": False, "frontendType": "number", {"name": "parentIssueId", "type": "int", "required": False, "frontendType": "number",
"description": t("Uebergeordnetes Ticket (optional)")}, "description": t("Uebergeordnetes Ticket (optional)")},
{"name": "customFields", "type": "string", "required": False, "frontendType": "textarea", {"name": "customFields", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Custom Fields als JSON {id: value}"), "default": ""}, "description": t("Custom Fields als JSON {id: value}"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -103,25 +103,25 @@ REDMINE_NODES = [
"description": t("Felder eines Redmine-Tickets aktualisieren. Nur gesetzte Felder werden uebertragen."), "description": t("Felder eines Redmine-Tickets aktualisieren. Nur gesetzte Felder werden uebertragen."),
"parameters": [ "parameters": [
dict(_REDMINE_INSTANCE_PARAM), dict(_REDMINE_INSTANCE_PARAM),
{"name": "ticketId", "type": "number", "required": True, "frontendType": "number", {"name": "ticketId", "type": "int", "required": True, "frontendType": "number",
"description": t("Ticket-ID")}, "description": t("Ticket-ID")},
{"name": "subject", "type": "string", "required": False, "frontendType": "text", {"name": "subject", "type": "str", "required": False, "frontendType": "text",
"description": t("Neuer Titel")}, "description": t("Neuer Titel")},
{"name": "description", "type": "string", "required": False, "frontendType": "textarea", {"name": "description", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Neue Beschreibung")}, "description": t("Neue Beschreibung")},
{"name": "trackerId", "type": "number", "required": False, "frontendType": "number", {"name": "trackerId", "type": "int", "required": False, "frontendType": "number",
"description": t("Neuer Tracker")}, "description": t("Neuer Tracker")},
{"name": "statusId", "type": "number", "required": False, "frontendType": "number", {"name": "statusId", "type": "int", "required": False, "frontendType": "number",
"description": t("Neuer Status")}, "description": t("Neuer Status")},
{"name": "priorityId", "type": "number", "required": False, "frontendType": "number", {"name": "priorityId", "type": "int", "required": False, "frontendType": "number",
"description": t("Neue Prioritaet")}, "description": t("Neue Prioritaet")},
{"name": "assignedToId", "type": "number", "required": False, "frontendType": "number", {"name": "assignedToId", "type": "int", "required": False, "frontendType": "number",
"description": t("Neue Zuweisung")}, "description": t("Neue Zuweisung")},
{"name": "parentIssueId", "type": "number", "required": False, "frontendType": "number", {"name": "parentIssueId", "type": "int", "required": False, "frontendType": "number",
"description": t("Neues Parent-Ticket")}, "description": t("Neues Parent-Ticket")},
{"name": "notes", "type": "string", "required": False, "frontendType": "textarea", {"name": "notes", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Kommentar (Journal-Eintrag)"), "default": ""}, "description": t("Kommentar (Journal-Eintrag)"), "default": ""},
{"name": "customFields", "type": "string", "required": False, "frontendType": "textarea", {"name": "customFields", "type": "str", "required": False, "frontendType": "textarea",
"description": t("Custom Fields als JSON {id: value}"), "default": ""}, "description": t("Custom Fields als JSON {id: value}"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -139,13 +139,13 @@ REDMINE_NODES = [
"description": t("Aggregierte Kennzahlen (KPIs, Durchsatz, Status-Verteilung, Backlog) aus dem Mirror."), "description": t("Aggregierte Kennzahlen (KPIs, Durchsatz, Status-Verteilung, Backlog) aus dem Mirror."),
"parameters": [ "parameters": [
dict(_REDMINE_INSTANCE_PARAM), dict(_REDMINE_INSTANCE_PARAM),
{"name": "dateFrom", "type": "string", "required": False, "frontendType": "date", {"name": "dateFrom", "type": "str", "required": False, "frontendType": "date",
"description": t("Zeitraum ab")}, "description": t("Zeitraum ab")},
{"name": "dateTo", "type": "string", "required": False, "frontendType": "date", {"name": "dateTo", "type": "str", "required": False, "frontendType": "date",
"description": t("Zeitraum bis")}, "description": t("Zeitraum bis")},
{"name": "bucket", "type": "string", "required": False, "frontendType": "text", {"name": "bucket", "type": "str", "required": False, "frontendType": "text",
"description": t("Bucket: day | week | month"), "default": "week"}, "description": t("Bucket: day | week | month"), "default": "week"},
{"name": "trackerIds", "type": "string", "required": False, "frontendType": "text", {"name": "trackerIds", "type": "str", "required": False, "frontendType": "text",
"description": t("Tracker-IDs (Komma-separiert)"), "default": ""}, "description": t("Tracker-IDs (Komma-separiert)"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -163,7 +163,7 @@ REDMINE_NODES = [
"description": t("Tickets und Beziehungen aus Redmine in den lokalen Mirror uebernehmen."), "description": t("Tickets und Beziehungen aus Redmine in den lokalen Mirror uebernehmen."),
"parameters": [ "parameters": [
dict(_REDMINE_INSTANCE_PARAM), dict(_REDMINE_INSTANCE_PARAM),
{"name": "force", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "force", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Vollsync erzwingen (ignoriert lastSyncAt)"), "default": False}, "description": t("Vollsync erzwingen (ignoriert lastSyncAt)"), "default": False},
], ],
"inputs": 1, "inputs": 1,

View file

@ -10,14 +10,14 @@ SHAREPOINT_NODES = [
"label": t("Datei finden"), "label": t("Datei finden"),
"description": t("Datei nach Pfad oder Suche finden"), "description": t("Datei nach Pfad oder Suche finden"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung")}, "description": t("SharePoint-Verbindung")},
{"name": "searchQuery", "type": "string", "required": True, "frontendType": "text", {"name": "searchQuery", "type": "str", "required": True, "frontendType": "text",
"description": t("Suchanfrage oder Pfad")}, "description": t("Suchanfrage oder Pfad")},
{"name": "site", "type": "string", "required": False, "frontendType": "text", {"name": "site", "type": "str", "required": False, "frontendType": "text",
"description": t("Optionaler Site-Hinweis"), "default": ""}, "description": t("Optionaler Site-Hinweis"), "default": ""},
{"name": "maxResults", "type": "number", "required": False, "frontendType": "number", {"name": "maxResults", "type": "int", "required": False, "frontendType": "number",
"description": t("Max Ergebnisse"), "default": 1000}, "description": t("Max Ergebnisse"), "default": 1000},
], ],
"inputs": 1, "inputs": 1,
@ -34,10 +34,10 @@ SHAREPOINT_NODES = [
"label": t("Datei lesen"), "label": t("Datei lesen"),
"description": t("Inhalt aus Datei extrahieren"), "description": t("Inhalt aus Datei extrahieren"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung")}, "description": t("SharePoint-Verbindung")},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile", {"name": "pathQuery", "type": "str", "required": True, "frontendType": "sharepointFile",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Dateipfad")}, "description": t("Dateipfad")},
], ],
@ -55,13 +55,13 @@ SHAREPOINT_NODES = [
"label": t("Datei hochladen"), "label": t("Datei hochladen"),
"description": t("Datei zu SharePoint hochladen"), "description": t("Datei zu SharePoint hochladen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung")}, "description": t("SharePoint-Verbindung")},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder", {"name": "pathQuery", "type": "str", "required": True, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Zielordner-Pfad")}, "description": t("Zielordner-Pfad")},
{"name": "content", "type": "string", "required": True, "frontendType": "hidden", {"name": "content", "type": "str", "required": True, "frontendType": "hidden",
"description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""}, "description": t("Datei-Inhalt aus Upstream-Node (via Wire oder DataRef)"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -78,10 +78,10 @@ SHAREPOINT_NODES = [
"label": t("Dateien auflisten"), "label": t("Dateien auflisten"),
"description": t("Dateien in Ordner auflisten"), "description": t("Dateien in Ordner auflisten"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung")}, "description": t("SharePoint-Verbindung")},
{"name": "pathQuery", "type": "string", "required": False, "frontendType": "sharepointFolder", {"name": "pathQuery", "type": "str", "required": False, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Ordnerpfad"), "default": "/"}, "description": t("Ordnerpfad"), "default": "/"},
], ],
@ -99,10 +99,10 @@ SHAREPOINT_NODES = [
"label": t("Datei herunterladen"), "label": t("Datei herunterladen"),
"description": t("Datei vom Pfad herunterladen"), "description": t("Datei vom Pfad herunterladen"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung")}, "description": t("SharePoint-Verbindung")},
{"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile", {"name": "pathQuery", "type": "str", "required": True, "frontendType": "sharepointFile",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Vollständiger Dateipfad")}, "description": t("Vollständiger Dateipfad")},
], ],
@ -120,13 +120,13 @@ SHAREPOINT_NODES = [
"label": t("Datei kopieren"), "label": t("Datei kopieren"),
"description": t("Datei an Ziel kopieren"), "description": t("Datei an Ziel kopieren"),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": True, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung")}, "description": t("SharePoint-Verbindung")},
{"name": "sourcePath", "type": "string", "required": True, "frontendType": "sharepointFile", {"name": "sourcePath", "type": "str", "required": True, "frontendType": "sharepointFile",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Quelldatei-Pfad")}, "description": t("Quelldatei-Pfad")},
{"name": "destPath", "type": "string", "required": True, "frontendType": "sharepointFolder", {"name": "destPath", "type": "str", "required": True, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("Zielordner")}, "description": t("Zielordner")},
], ],

View file

@ -46,7 +46,7 @@ TRIGGER_NODES = [
"parameters": [ "parameters": [
{ {
"name": "cron", "name": "cron",
"type": "string", "type": "str",
"required": False, "required": False,
"frontendType": "cron", "frontendType": "cron",
"description": t("Cron-Ausdruck"), "description": t("Cron-Ausdruck"),

View file

@ -25,11 +25,11 @@ TRUSTEE_NODES = [
"description": t("Buchhaltungsdaten aus externem System importieren/aktualisieren."), "description": t("Buchhaltungsdaten aus externem System importieren/aktualisieren."),
"parameters": [ "parameters": [
dict(_TRUSTEE_INSTANCE_PARAM), dict(_TRUSTEE_INSTANCE_PARAM),
{"name": "forceRefresh", "type": "boolean", "required": False, "frontendType": "checkbox", {"name": "forceRefresh", "type": "bool", "required": False, "frontendType": "checkbox",
"description": t("Import erzwingen"), "default": False}, "description": t("Import erzwingen"), "default": False},
{"name": "dateFrom", "type": "string", "required": False, "frontendType": "date", {"name": "dateFrom", "type": "str", "required": False, "frontendType": "date",
"description": t("Startdatum"), "default": ""}, "description": t("Startdatum"), "default": ""},
{"name": "dateTo", "type": "string", "required": False, "frontendType": "date", {"name": "dateTo", "type": "str", "required": False, "frontendType": "date",
"description": t("Enddatum"), "default": ""}, "description": t("Enddatum"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -46,14 +46,14 @@ TRUSTEE_NODES = [
"label": t("Dokumente extrahieren"), "label": t("Dokumente extrahieren"),
"description": t("Dokumenttyp und Daten aus PDF/JPG per AI extrahieren."), "description": t("Dokumenttyp und Daten aus PDF/JPG per AI extrahieren."),
"parameters": [ "parameters": [
{"name": "connectionReference", "type": "string", "required": False, "frontendType": "userConnection", {"name": "connectionReference", "type": "str", "required": False, "frontendType": "userConnection",
"frontendOptions": {"authority": "msft"}, "frontendOptions": {"authority": "msft"},
"description": t("SharePoint-Verbindung"), "default": ""}, "description": t("SharePoint-Verbindung"), "default": ""},
{"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder", {"name": "sharepointFolder", "type": "str", "required": False, "frontendType": "sharepointFolder",
"frontendOptions": {"dependsOn": "connectionReference"}, "frontendOptions": {"dependsOn": "connectionReference"},
"description": t("SharePoint-Ordnerpfad"), "default": ""}, "description": t("SharePoint-Ordnerpfad"), "default": ""},
dict(_TRUSTEE_INSTANCE_PARAM), dict(_TRUSTEE_INSTANCE_PARAM),
{"name": "prompt", "type": "string", "required": False, "frontendType": "textarea", {"name": "prompt", "type": "str", "required": False, "frontendType": "textarea",
"description": t("AI-Prompt für Extraktion"), "default": ""}, "description": t("AI-Prompt für Extraktion"), "default": ""},
], ],
"inputs": 1, "inputs": 1,
@ -77,7 +77,7 @@ TRUSTEE_NODES = [
# is List[ActionDocument] (see datamodelChat.ActionResult). The # is List[ActionDocument] (see datamodelChat.ActionResult). The
# DataPicker uses this string to filter compatible upstream paths. # DataPicker uses this string to filter compatible upstream paths.
{"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef", {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
"description": t("Dokumentenliste — gebunden via DataRef.")}, "description": t("Dokumente aus vorherigen Schritten")},
dict(_TRUSTEE_INSTANCE_PARAM), dict(_TRUSTEE_INSTANCE_PARAM),
], ],
"inputs": 1, "inputs": 1,
@ -95,7 +95,7 @@ TRUSTEE_NODES = [
"description": t("Trustee-Positionen in Buchhaltungssystem übertragen."), "description": t("Trustee-Positionen in Buchhaltungssystem übertragen."),
"parameters": [ "parameters": [
{"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef", {"name": "documentList", "type": "List[ActionDocument]", "required": True, "frontendType": "dataRef",
"description": t("Verarbeitete Dokumentenliste — gebunden via DataRef.")}, "description": t("Dokumente aus vorherigen Schritten")},
dict(_TRUSTEE_INSTANCE_PARAM), dict(_TRUSTEE_INSTANCE_PARAM),
], ],
"inputs": 1, "inputs": 1,
@ -113,25 +113,25 @@ TRUSTEE_NODES = [
"description": t("Daten aus der Trustee-DB lesen (Lookup, Aggregation, Roh-Export). Pendant zu refreshAccountingData ohne externen Sync."), "description": t("Daten aus der Trustee-DB lesen (Lookup, Aggregation, Roh-Export). Pendant zu refreshAccountingData ohne externen Sync."),
"parameters": [ "parameters": [
dict(_TRUSTEE_INSTANCE_PARAM), dict(_TRUSTEE_INSTANCE_PARAM),
{"name": "mode", "type": "string", "required": True, "frontendType": "select", {"name": "mode", "type": "str", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["lookup", "raw", "aggregate"]}, "frontendOptions": {"options": ["lookup", "raw", "aggregate"]},
"description": t("Abfragemodus"), "default": "lookup"}, "description": t("Abfragemodus"), "default": "lookup"},
{"name": "entity", "type": "string", "required": True, "frontendType": "select", {"name": "entity", "type": "str", "required": True, "frontendType": "select",
"frontendOptions": {"options": ["tenantWithRent", "contact", "journalLines", "accounts", "balances"]}, "frontendOptions": {"options": ["tenantWithRent", "contact", "journalLines", "accounts", "balances"]},
"description": t("Entität, die gelesen werden soll"), "default": "tenantWithRent"}, "description": t("Entität, die gelesen werden soll"), "default": "tenantWithRent"},
{"name": "tenantNameRef", "type": "string", "required": False, "frontendType": "text", {"name": "tenantNameRef", "type": "str", "required": False, "frontendType": "text",
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "contact"]}, "frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "contact"]},
"description": t("Mietername (oder {{wire.feld}} aus Upstream)"), "default": ""}, "description": t("Mietername (oder {{wire.feld}} aus Upstream)"), "default": ""},
{"name": "tenantAddressRef", "type": "string", "required": False, "frontendType": "text", {"name": "tenantAddressRef", "type": "str", "required": False, "frontendType": "text",
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "contact"]}, "frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "contact"]},
"description": t("Mieteradresse (Toleranz für Tippfehler)"), "default": ""}, "description": t("Mieteradresse (Toleranz für Tippfehler)"), "default": ""},
{"name": "period", "type": "string", "required": False, "frontendType": "text", {"name": "period", "type": "str", "required": False, "frontendType": "text",
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "journalLines", "balances"]}, "frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent", "journalLines", "balances"]},
"description": t("Zeitraum (YYYY oder YYYY-MM-DD/YYYY-MM-DD)"), "default": ""}, "description": t("Zeitraum (YYYY oder YYYY-MM-DD/YYYY-MM-DD)"), "default": ""},
{"name": "rentAccountPattern", "type": "string", "required": False, "frontendType": "text", {"name": "rentAccountPattern", "type": "str", "required": False, "frontendType": "text",
"frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent"]}, "frontendOptions": {"dependsOn": "entity", "showWhen": ["tenantWithRent"]},
"description": t("Konto-Filter für Mietzins (z.B. '6000-6099' oder '6*')"), "default": ""}, "description": t("Konto-Filter für Mietzins (z.B. '6000-6099' oder '6*')"), "default": ""},
{"name": "filterJson", "type": "string", "required": False, "frontendType": "textarea", {"name": "filterJson", "type": "str", "required": False, "frontendType": "textarea",
"frontendOptions": {"dependsOn": "mode", "showWhen": ["raw", "aggregate"]}, "frontendOptions": {"dependsOn": "mode", "showWhen": ["raw", "aggregate"]},
"description": t("Optionaler JSON-Filter für mode=raw/aggregate"), "default": ""}, "description": t("Optionaler JSON-Filter für mode=raw/aggregate"), "default": ""},
], ],

View file

@ -9,6 +9,7 @@ import logging
from typing import Dict, List, Any, Optional from typing import Dict, List, Any, Optional
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
from modules.features.graphicalEditor.nodeDefinitions.input import FORM_FIELD_TYPES
from modules.features.graphicalEditor.nodeAdapter import bindsActionFromLegacy from modules.features.graphicalEditor.nodeAdapter import bindsActionFromLegacy
from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText
@ -119,6 +120,7 @@ def getNodeTypesForApi(
"categories": categories, "categories": categories,
"portTypeCatalog": catalogSerialized, "portTypeCatalog": catalogSerialized,
"systemVariables": SYSTEM_VARIABLES, "systemVariables": SYSTEM_VARIABLES,
"formFieldTypes": FORM_FIELD_TYPES,
} }

View file

@ -34,6 +34,8 @@ class PortField(BaseModel):
# FeatureInstanceRef.featureCode). Pickers/validators use it to filter compatible # FeatureInstanceRef.featureCode). Pickers/validators use it to filter compatible
# producers by sub-type. Type must be "str" when discriminator is True. # producers by sub-type. Type must be "str" when discriminator is True.
discriminator: bool = False discriminator: bool = False
# Surfaces this field at the top of the DataPicker list as the most common pick.
recommended: bool = False
class PortSchema(BaseModel): class PortSchema(BaseModel):
@ -153,7 +155,7 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
]), ]),
"DocumentList": PortSchema(name="DocumentList", fields=[ "DocumentList": PortSchema(name="DocumentList", fields=[
PortField(name="documents", type="List[Document]", PortField(name="documents", type="List[Document]",
description="Dokumentenliste"), description="Dokumente aus vorherigen Schritten", recommended=True),
PortField(name="connection", type="ConnectionRef", required=False, PortField(name="connection", type="ConnectionRef", required=False,
description="Verbindung, mit der die Liste erzeugt wurde"), description="Verbindung, mit der die Liste erzeugt wurde"),
PortField(name="source", type="SharePointFolderRef", required=False, PortField(name="source", type="SharePointFolderRef", required=False,
@ -219,9 +221,9 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
PortField(name="prompt", type="str", PortField(name="prompt", type="str",
description="Prompt"), description="Prompt"),
PortField(name="response", type="str", PortField(name="response", type="str",
description="Antworttext"), description="Antworttext", recommended=True),
PortField(name="responseData", type="Dict", required=False, PortField(name="responseData", type="Dict", required=False,
description="Strukturierte Antwort"), description="Strukturierte Antwort (nur bei JSON-Ausgabe)"),
PortField(name="context", type="str", PortField(name="context", type="str",
description="Kontext"), description="Kontext"),
PortField(name="documents", type="List[Document]", PortField(name="documents", type="List[Document]",
@ -642,6 +644,69 @@ def resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any:
# Output normalizers # Output normalizers
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
def _file_record_to_document(f: Any) -> Optional[Dict[str, Any]]:
"""Map API / task-upload file dicts onto PortSchema ``Document`` fields."""
if f is None:
return None
if isinstance(f, str) and f.strip():
return {"id": f.strip()}
if not isinstance(f, dict):
return None
inner = f.get("file") if isinstance(f.get("file"), dict) else None
src = inner or f
out: Dict[str, Any] = {}
fid = src.get("id") or f.get("id")
if fid is not None and str(fid).strip():
out["id"] = str(fid).strip()
name = (
src.get("name")
or src.get("fileName")
or f.get("fileName")
or f.get("name")
)
if name is not None and str(name).strip():
out["name"] = str(name).strip()
mime = src.get("mimeType") or src.get("mime") or f.get("mimeType")
if mime is not None and str(mime).strip():
out["mimeType"] = str(mime).strip()
for k in ("sizeBytes", "downloadUrl", "filePath"):
v = src.get(k) if k in src else f.get(k)
if v is not None and v != "":
out[k] = v
return out if out else None
def _coerce_document_list_upload_fields(result: Dict[str, Any]) -> None:
"""
Human task ``input.upload`` completes with ``file`` / ``files`` / ``fileIds``.
DocumentList expects ``documents``. Without this, resume adds ``documents: []`` and drops the real files.
"""
docs = result.get("documents")
if isinstance(docs, list) and len(docs) > 0:
return
collected: List[Dict[str, Any]] = []
files = result.get("files")
if isinstance(files, list):
for item in files:
d = _file_record_to_document(item)
if d:
collected.append(d)
if not collected:
single = result.get("file")
d = _file_record_to_document(single)
if d:
collected.append(d)
if not collected and isinstance(result.get("fileIds"), list):
for fid in result["fileIds"]:
if fid is not None and str(fid).strip():
collected.append({"id": str(fid).strip()})
if not collected:
return
result["documents"] = collected
if not result.get("count"):
result["count"] = len(collected)
def normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]: def normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
""" """
Normalize raw executor output to match the declared port schema. Normalize raw executor output to match the declared port schema.
@ -658,8 +723,17 @@ def normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
if not schema or schemaName == "Transit": if not schema or schemaName == "Transit":
return result return result
<<<<<<< HEAD
if schemaName == "DocumentList":
_coerce_document_list_upload_fields(result)
=======
>>>>>>> 875f8252 (ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes)
# Only default **required** fields. Optional fields stay absent so DataRefs / context
# resolution never pick a synthetic `{}` or `[]` (e.g. AiResult.responseData when the
# model returned plain text only).
for field in schema.fields: for field in schema.fields:
if field.name not in result: if field.name not in result and field.required:
result[field.name] = _defaultForType(field.type) result[field.name] = _defaultForType(field.type)
return result return result
@ -740,6 +814,9 @@ def _resolveTransitChain(
def deriveFormPayloadSchemaFromParam(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]: def deriveFormPayloadSchemaFromParam(node: Dict[str, Any], param_key: str) -> Optional[PortSchema]:
"""Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …).""" """Derive output schema from a field-builder JSON list (``fields``, ``formFields``, …)."""
from modules.features.graphicalEditor.nodeDefinitions.input import FORM_FIELD_TYPES
_FORM_TYPE_TO_PORT: Dict[str, str] = {f["id"]: f["portType"] for f in FORM_FIELD_TYPES}
fields_param = (node.get("parameters") or {}).get(param_key) fields_param = (node.get("parameters") or {}).get(param_key)
if not fields_param or not isinstance(fields_param, list): if not fields_param or not isinstance(fields_param, list):
return None return None
@ -749,9 +826,11 @@ def deriveFormPayloadSchemaFromParam(node: Dict[str, Any], param_key: str) -> Op
_desc = resolveText(lab) if lab is not None else fname _desc = resolveText(lab) if lab is not None else fname
if not str(_desc).strip(): if not str(_desc).strip():
_desc = fname _desc = fname
raw_type = str(ftype) if ftype is not None else "str"
port_type = _FORM_TYPE_TO_PORT.get(raw_type, raw_type)
portFields.append(PortField( portFields.append(PortField(
name=fname, name=fname,
type=str(ftype) if ftype is not None else "str", type=port_type,
description=_desc, description=_desc,
required=required, required=required,
)) ))

View file

@ -4028,58 +4028,92 @@ class AppObjects:
raise raise
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Table Grouping (user-defined groups for FormGeneratorTable instances) # Table List Views (saved display presets: filters, sort, groupByLevels)
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
def getTableGrouping(self, contextKey: str): def getTableListViews(self, contextKey: str) -> list:
""" """Return all saved views for the current user and contextKey."""
Load the group tree for the current user and the given contextKey. from modules.datamodels.datamodelPagination import TableListView
Returns a TableGrouping instance or None if no grouping has been saved yet.
contextKey identifies the table instance, e.g. "connections", "prompts",
"admin/users", "trustee/{instanceId}/documents".
"""
from modules.datamodels.datamodelPagination import TableGrouping
try: try:
records = self.db.getRecordset( rows = self.db.getRecordset(
TableGrouping, TableListView,
recordFilter={"userId": str(self.userId), "contextKey": contextKey}, recordFilter={"userId": str(self.userId), "contextKey": contextKey},
) )
if not records: result = []
return None for row in (rows or []):
row = records[0] try:
return TableGrouping.model_validate(row) if isinstance(row, dict) else row result.append(TableListView.model_validate(row) if isinstance(row, dict) else row)
except Exception:
pass
return result
except Exception as e: except Exception as e:
logger.error(f"getTableGrouping failed for user={self.userId} key={contextKey}: {e}") logger.error(f"getTableListViews failed for user={self.userId} context={contextKey}: {e}")
return []
def getTableListView(self, contextKey: str, viewKey: str):
"""Return one view by viewKey or None if not found."""
from modules.datamodels.datamodelPagination import TableListView
try:
rows = self.db.getRecordset(
TableListView,
recordFilter={"userId": str(self.userId), "contextKey": contextKey, "viewKey": viewKey},
)
if not rows:
return None
row = rows[0]
return TableListView.model_validate(row) if isinstance(row, dict) else row
except Exception as e:
logger.error(f"getTableListView failed for user={self.userId} key={viewKey}: {e}")
return None return None
def upsertTableGrouping(self, contextKey: str, rootGroups: list): def createTableListView(self, contextKey: str, viewKey: str, displayName: str, config: dict):
""" """Create a new view. Raises ValueError if viewKey already exists for this context."""
Create or replace the group tree for the current user and contextKey. from modules.datamodels.datamodelPagination import TableListView
from modules.shared.timeUtils import getUtcTimestamp
if self.getTableListView(contextKey=contextKey, viewKey=viewKey) is not None:
raise ValueError(f"View '{viewKey}' already exists for context '{contextKey}'")
data = {
"id": str(uuid.uuid4()),
"userId": str(self.userId),
"contextKey": contextKey,
"viewKey": viewKey,
"displayName": displayName,
"config": config,
"updatedAt": getUtcTimestamp(),
}
try:
self.db.recordCreate(TableListView, data)
return TableListView.model_validate(data)
except Exception as e:
logger.error(f"createTableListView failed: {e}")
raise
rootGroups is a list of TableGroupNode-compatible dicts (the full tree). def updateTableListView(self, viewId: str, updates: dict):
Returns the saved TableGrouping instance. """Update an existing view by its primary key id."""
""" from modules.datamodels.datamodelPagination import TableListView
from modules.datamodels.datamodelPagination import TableGrouping
from modules.shared.timeUtils import getUtcTimestamp from modules.shared.timeUtils import getUtcTimestamp
try: try:
existing = self.getTableGrouping(contextKey) updates = {**updates, "updatedAt": getUtcTimestamp()}
data = { self.db.recordModify(TableListView, viewId, updates)
"id": existing.id if existing else str(uuid.uuid4()), rows = self.db.getRecordset(TableListView, recordFilter={"id": viewId})
"userId": str(self.userId), if rows:
"contextKey": contextKey, row = rows[0]
"rootGroups": rootGroups, return TableListView.model_validate(row) if isinstance(row, dict) else row
"updatedAt": getUtcTimestamp(), return None
}
if existing:
self.db.recordModify(TableGrouping, existing.id, data)
else:
self.db.recordCreate(TableGrouping, data)
return TableGrouping.model_validate(data)
except Exception as e: except Exception as e:
logger.error(f"upsertTableGrouping failed for user={self.userId} key={contextKey}: {e}") logger.error(f"updateTableListView failed for id={viewId}: {e}")
raise raise
def deleteTableListView(self, viewId: str) -> bool:
"""Delete a view by primary key id. Returns True on success."""
from modules.datamodels.datamodelPagination import TableListView
try:
self.db.recordDelete(TableListView, viewId)
return True
except Exception as e:
logger.error(f"deleteTableListView failed for id={viewId}: {e}")
return False
# Public Methods # Public Methods

View file

@ -19,7 +19,7 @@ from modules.interfaces.interfaceRbac import getRecordsetWithRBAC, getRecordsetP
from modules.security.rbac import RbacClass from modules.security.rbac import RbacClass
from modules.datamodels.datamodelRbac import AccessRuleContext from modules.datamodels.datamodelRbac import AccessRuleContext
from modules.datamodels.datamodelUam import AccessLevel from modules.datamodels.datamodelUam import AccessLevel
from modules.datamodels.datamodelFiles import FilePreview, FileItem, FileData from modules.datamodels.datamodelFiles import FilePreview, FileItem, FileData, FileFolder
from modules.datamodels.datamodelUtils import Prompt from modules.datamodels.datamodelUtils import Prompt
from modules.datamodels.datamodelMessaging import ( from modules.datamodels.datamodelMessaging import (
MessagingSubscription, MessagingSubscription,
@ -115,6 +115,14 @@ class ComponentObjects:
# Update database context # Update database context
self.db.updateContext(self.userId) self.db.updateContext(self.userId)
def _effective_user_id(self) -> Optional[str]:
"""User id for audit + FileData writes; singleton hub may unset userId but keep currentUser."""
if self.userId:
return self.userId
if self.currentUser is not None:
return getattr(self.currentUser, "id", None)
return None
def __del__(self): def __del__(self):
"""Cleanup method to close database connection.""" """Cleanup method to close database connection."""
if hasattr(self, 'db') and self.db is not None: if hasattr(self, 'db') and self.db is not None:
@ -1068,6 +1076,241 @@ class ComponentObjects:
logger.error(f"Error converting file record: {str(e)}") logger.error(f"Error converting file record: {str(e)}")
return None return None
# ── Folder methods ─────────────────────────────────────────────────────────
def getOwnFolderTree(self) -> List[Dict[str, Any]]:
"""Folders owned by the current user, filtered via RBAC."""
return getRecordsetWithRBAC(
self.db, FileFolder, self.currentUser,
recordFilter={"sysCreatedBy": self.userId},
mandateId=self.mandateId,
featureInstanceId=self.featureInstanceId,
)
def getSharedFolderTree(self) -> List[Dict[str, Any]]:
"""Folders visible via scope but NOT owned by the current user.
Adds contextOrphan=True when a folder's parentId is not in the result set."""
allFolders = getRecordsetWithRBAC(
self.db, FileFolder, self.currentUser,
mandateId=self.mandateId,
featureInstanceId=self.featureInstanceId,
)
shared = [f for f in allFolders if f.get("sysCreatedBy") != self.userId]
sharedIds = {f["id"] for f in shared}
for f in shared:
f["contextOrphan"] = bool(f.get("parentId") and f["parentId"] not in sharedIds)
return shared
def getFolder(self, folderId: str) -> Optional[Dict[str, Any]]:
"""Return a single folder dict or None."""
results = getRecordsetWithRBAC(
self.db, FileFolder, self.currentUser,
recordFilter={"id": folderId},
mandateId=self.mandateId,
featureInstanceId=self.featureInstanceId,
)
return results[0] if results else None
def _isFolderOwner(self, folder) -> bool:
createdBy = (
getattr(folder, "sysCreatedBy", None)
or (folder.get("sysCreatedBy") if isinstance(folder, dict) else None)
)
return createdBy == self.userId
def _requireFolderWriteAccess(self, folder, folderId: str, operation: str = "update"):
"""Raise PermissionError if the user cannot mutate this folder.
Owners always can. Non-owners need RBAC ALL level."""
if self._isFolderOwner(folder):
return
from modules.interfaces.interfaceRbac import buildDataObjectKey
objectKey = buildDataObjectKey("FileFolder")
permissions = self.rbac.getUserPermissions(
self.currentUser, AccessRuleContext.DATA, objectKey,
mandateId=self.mandateId, featureInstanceId=self.featureInstanceId,
)
level = getattr(permissions, operation, None)
if level != AccessLevel.ALL:
raise PermissionError(
f"No permission to {operation} folder {folderId} (not owner, access level: {level})"
)
def createFolder(self, name: str, parentId: Optional[str] = None) -> Dict[str, Any]:
if not self.checkRbacPermission(FileFolder, "create"):
raise PermissionError("No permission to create folders")
folder = FileFolder(
name=name,
parentId=parentId,
mandateId=self.mandateId or "",
featureInstanceId=self.featureInstanceId or "",
scope="personal",
neutralize=False,
)
self.db.recordCreate(FileFolder, folder)
return folder.model_dump()
def renameFolder(self, folderId: str, newName: str) -> Dict[str, Any]:
folder = self.getFolder(folderId)
if not folder:
raise FileNotFoundError(f"Folder {folderId} not found")
self._requireFolderWriteAccess(folder, folderId, "update")
self.db.recordModify(FileFolder, folderId, {"name": newName})
folder["name"] = newName
return folder
def moveFolder(self, folderId: str, newParentId: Optional[str] = None) -> Dict[str, Any]:
folder = self.getFolder(folderId)
if not folder:
raise FileNotFoundError(f"Folder {folderId} not found")
self._requireFolderWriteAccess(folder, folderId, "update")
if newParentId:
parent = self.getFolder(newParentId)
if not parent:
raise FileNotFoundError(f"Target parent folder {newParentId} not found")
self._requireFolderWriteAccess(parent, newParentId, "update")
# Circular-reference guard: newParentId must not be a descendant of folderId
if self._isDescendant(newParentId, folderId):
raise ValueError(f"Cannot move folder into its own subtree (circular reference)")
self.db.recordModify(FileFolder, folderId, {"parentId": newParentId})
folder["parentId"] = newParentId
return folder
def _isDescendant(self, candidateId: str, ancestorId: str) -> bool:
"""Return True if candidateId is a descendant of (or equal to) ancestorId."""
visited = set()
current = candidateId
while current:
if current == ancestorId:
return True
if current in visited:
break
visited.add(current)
f = self.getFolder(current)
current = f.get("parentId") if f else None
return False
def deleteFolderCascade(self, folderId: str) -> Dict[str, Any]:
"""Delete a folder and all owned sub-folders + their files."""
folder = self.getFolder(folderId)
if not folder:
raise FileNotFoundError(f"Folder {folderId} not found")
self._requireFolderWriteAccess(folder, folderId, "delete")
folderIds = self._collectChildFolderIds(folderId)
# Verify all child folders are owned
for fid in folderIds:
if fid == folderId:
continue
child = self.getFolder(fid)
if child and not self._isFolderOwner(child):
raise PermissionError(f"Cannot delete folder tree: sub-folder {fid} is not owned by you")
# Collect files in those folders
fileRows = []
for fid in folderIds:
items = self.db.getRecordset(FileItem, recordFilter={"folderId": fid})
fileRows.extend(items)
for item in fileRows:
itemOwner = item.get("sysCreatedBy") if isinstance(item, dict) else getattr(item, "sysCreatedBy", None)
if itemOwner != self.userId:
itemId = item.get("id") if isinstance(item, dict) else getattr(item, "id", None)
raise PermissionError(f"Cannot delete folder tree: file {itemId} is not owned by you")
fileIds = [
(item.get("id") if isinstance(item, dict) else getattr(item, "id", None))
for item in fileRows
]
# Single transaction: delete FileData, FileItem, then FileFolder (children first)
self.db._ensure_connection()
try:
with self.db.connection.cursor() as cursor:
if fileIds:
cursor.execute('DELETE FROM "FileData" WHERE "id" = ANY(%s)', (fileIds,))
cursor.execute('DELETE FROM "FileItem" WHERE "id" = ANY(%s)', (fileIds,))
orderedIds = list(folderIds)
orderedIds.remove(folderId)
orderedIds.append(folderId)
if orderedIds:
cursor.execute('DELETE FROM "FileFolder" WHERE "id" = ANY(%s)', (orderedIds,))
self.db.connection.commit()
except Exception:
self.db.connection.rollback()
raise
return {"deletedFolders": len(folderIds), "deletedFiles": len(fileIds)}
def _collectChildFolderIds(self, folderId: str) -> List[str]:
"""BFS to collect folderId + all descendant folder IDs owned by user."""
result = [folderId]
queue = [folderId]
while queue:
parentId = queue.pop(0)
children = self.db.getRecordset(FileFolder, recordFilter={"parentId": parentId})
for child in children:
cid = child.get("id") if isinstance(child, dict) else getattr(child, "id", None)
if cid and cid not in result:
result.append(cid)
queue.append(cid)
return result
def patchFolderScope(self, folderId: str, scope: str, cascadeToFiles: bool = False) -> Dict[str, Any]:
validScopes = {"personal", "featureInstance", "mandate", "global"}
if scope not in validScopes:
raise ValueError(f"Invalid scope: {scope}. Must be one of {validScopes}")
folder = self.getFolder(folderId)
if not folder:
raise FileNotFoundError(f"Folder {folderId} not found")
self._requireFolderWriteAccess(folder, folderId, "update")
if scope == "global":
from modules.interfaces.interfaceRbac import buildDataObjectKey
objectKey = buildDataObjectKey("FileFolder")
permissions = self.rbac.getUserPermissions(
self.currentUser, AccessRuleContext.DATA, objectKey,
mandateId=self.mandateId, featureInstanceId=self.featureInstanceId,
)
if getattr(permissions, "update", None) != AccessLevel.ALL:
raise PermissionError("Setting global scope requires ALL permission")
self.db.recordModify(FileFolder, folderId, {"scope": scope})
filesUpdated = 0
if cascadeToFiles:
items = self.db.getRecordset(FileItem, recordFilter={"folderId": folderId})
for item in items:
owner = item.get("sysCreatedBy") if isinstance(item, dict) else getattr(item, "sysCreatedBy", None)
if owner == self.userId:
iid = item.get("id") if isinstance(item, dict) else getattr(item, "id", None)
self.db.recordModify(FileItem, iid, {"scope": scope})
filesUpdated += 1
return {"folderId": folderId, "scope": scope, "filesUpdated": filesUpdated}
def patchFolderNeutralize(self, folderId: str, neutralize: bool) -> Dict[str, Any]:
folder = self.getFolder(folderId)
if not folder:
raise FileNotFoundError(f"Folder {folderId} not found")
self._requireFolderWriteAccess(folder, folderId, "update")
self.db.recordModify(FileFolder, folderId, {"neutralize": neutralize})
items = self.db.getRecordset(FileItem, recordFilter={"folderId": folderId})
filesUpdated = 0
for item in items:
owner = item.get("sysCreatedBy") if isinstance(item, dict) else getattr(item, "sysCreatedBy", None)
if owner == self.userId:
iid = item.get("id") if isinstance(item, dict) else getattr(item, "id", None)
self.db.recordModify(FileItem, iid, {"neutralize": neutralize})
filesUpdated += 1
return {"folderId": folderId, "neutralize": neutralize, "filesUpdated": filesUpdated}
def _isfileNameUnique(self, fileName: str, excludeFileId: Optional[str] = None) -> bool: def _isfileNameUnique(self, fileName: str, excludeFileId: Optional[str] = None) -> bool:
"""Checks if a fileName is unique for the current user.""" """Checks if a fileName is unique for the current user."""
# Get all files filtered by RBAC (will be filtered by user's access level) # Get all files filtered by RBAC (will be filtered by user's access level)
@ -1144,9 +1387,30 @@ class ComponentObjects:
fileSize=fileSize, fileSize=fileSize,
fileHash=fileHash, fileHash=fileHash,
) )
# Ensure audit user is always stored: workflow/singleton contexts sometimes leave
# the connector without _current_user_id, so _saveRecord skips sysCreatedBy →
# getFile/createFileData RBAC then breaks (None != self.userId).
uid = self._effective_user_id()
if uid:
fileItem = fileItem.model_copy(update={"sysCreatedBy": str(uid)})
# Store in database # Store in database
self.db.recordCreate(FileItem, fileItem) self.db.recordCreate(FileItem, fileItem)
verify = self.db.getRecordset(FileItem, recordFilter={"id": fileItem.id})
verify_creator = (verify[0].get("sysCreatedBy") if verify else None)
logger.info(
"createFile: id=%s name=%s scope=%s model_sysCreatedBy=%r db_sysCreatedBy=%r mandateId=%r featureInstanceId=%r "
"verify_rows=%s db=%s",
fileItem.id,
uniqueName,
fileItem.scope,
getattr(fileItem, "sysCreatedBy", None),
verify_creator,
mandateId or None,
featureInstanceId if featureInstanceId else None,
len(verify) if verify else 0,
getattr(self.db, "dbDatabase", "?"),
)
return fileItem return fileItem
@ -1268,44 +1532,8 @@ class ComponentObjects:
raise FileDeletionError(f"Error deleting files in batch: {str(e)}") raise FileDeletionError(f"Error deleting files in batch: {str(e)}")
def _ensureFeatureInstanceGroup(self, featureInstanceId: str, contextKey: str = "files/list") -> Optional[str]: def _ensureFeatureInstanceGroup(self, featureInstanceId: str, contextKey: str = "files/list") -> Optional[str]:
"""Return the groupId of the default group for a feature instance. """Stub — file group tree removed. Returns None."""
Creates the group if it doesn't exist yet.""" return None
try:
import modules.interfaces.interfaceDbApp as _appIface
appInterface = _appIface.getInterface(self._currentUser)
existing = appInterface.getTableGrouping(contextKey)
nodes = [n.model_dump() if hasattr(n, 'model_dump') else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
# Look for group with name matching featureInstanceId
def _find(nds):
for nd in nds:
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
nmeta = nd.get("meta", {}) if isinstance(nd, dict) else getattr(nd, "meta", {})
if (nmeta or {}).get("featureInstanceId") == featureInstanceId:
return nid
subs = nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", [])
result = _find(subs)
if result:
return result
return None
found = _find(nodes)
if found:
return found
# Create new group
import uuid
newId = str(uuid.uuid4())
newGroup = {
"id": newId,
"name": featureInstanceId,
"itemIds": [],
"subGroups": [],
"meta": {"featureInstanceId": featureInstanceId},
}
nodes.append(newGroup)
appInterface.upsertTableGrouping(contextKey, nodes)
return newId
except Exception as e:
logger.error(f"_ensureFeatureInstanceGroup failed: {e}")
return None
def copyFile(self, sourceFileId: str, newFileName: Optional[str] = None) -> FileItem: def copyFile(self, sourceFileId: str, newFileName: Optional[str] = None) -> FileItem:
"""Create a full duplicate of a file (FileItem + FileData).""" """Create a full duplicate of a file (FileItem + FileData)."""
@ -1345,13 +1573,133 @@ class ComponentObjects:
# FileData methods - data operations # FileData methods - data operations
def _getFileItemForDataWrite(self, fileId: str) -> Optional[FileItem]:
"""Resolve FileItem for storing FileData: RBAC-aware getFile, then same-user row fallback.
createFile() can insert a row that getFile() still hides (e.g. scope NULL vs GROUP rules,
or connector / context edge cases). The creator must still be allowed to attach blob data.
"""
logger.info(
"[FileData] resolve start fileId=%s iface_userId=%r effective_uid=%r mandateId=%r featureInstanceId=%r db=%s",
fileId,
self.userId,
self._effective_user_id(),
self.mandateId,
self.featureInstanceId,
getattr(self.db, "dbDatabase", "?"),
)
file = self.getFile(fileId)
if file:
logger.info("[FileData] getFile OK fileId=%s", fileId)
return file
uid = self._effective_user_id()
if not uid:
logger.error(
"[FileData] FAIL no user id fileId=%s userId=%r hasCurrentUser=%s",
fileId,
self.userId,
self.currentUser is not None,
)
return None
uid_s = str(uid)
rows = self.db.getRecordset(FileItem, recordFilter={"id": fileId})
if not rows:
logger.error(
"[FileData] FAIL no FileItem row fileId=%s (createFile committed to same db? db=%s)",
fileId,
getattr(self.db, "dbDatabase", "?"),
)
return None
row = dict(rows[0])
creator = row.get("sysCreatedBy")
creator_s = str(creator) if creator is not None else None
if creator_s != uid_s:
if not creator_s:
try:
self.db.recordModify(FileItem, fileId, {"sysCreatedBy": uid_s})
row["sysCreatedBy"] = uid_s
logger.warning(
"[FileData] patched NULL sysCreatedBy fileId=%s -> %s",
fileId,
uid_s,
)
except Exception as e:
logger.error(
"[FileData] FAIL patch sysCreatedBy fileId=%s: %s",
fileId,
e,
exc_info=True,
)
return None
else:
# _saveRecord used to overwrite explicit creators with contextvar "system"
if creator_s == "system":
try:
self.db.recordModify(FileItem, fileId, {"sysCreatedBy": uid_s})
row["sysCreatedBy"] = uid_s
logger.warning(
"[FileData] patched sysCreatedBy system→user fileId=%s -> %s",
fileId,
uid_s,
)
except Exception as e:
logger.error(
"[FileData] FAIL patch system sysCreatedBy fileId=%s: %s",
fileId,
e,
exc_info=True,
)
return None
else:
logger.error(
"[FileData] FAIL creator mismatch fileId=%s row.sysCreatedBy=%r (%s) effective_uid=%r (%s) scope=%r",
fileId,
creator,
type(creator).__name__,
uid,
type(uid).__name__,
row.get("scope"),
)
return None
logger.info(
"[FileData] RBAC miss, owner fallback OK fileId=%s scope=%r sysCreatedBy=%r",
fileId,
row.get("scope"),
row.get("sysCreatedBy"),
)
try:
if row.get("sysCreatedAt") is None or row.get("sysCreatedAt") in (0, 0.0):
row["sysCreatedAt"] = getUtcTimestamp()
if row.get("scope") is None:
row["scope"] = "personal"
if row.get("neutralize") is None:
row["neutralize"] = False
return FileItem(**row)
except Exception as e:
logger.error(
"[FileData] FAIL FileItem(**row) fileId=%s keys=%s err=%s",
fileId,
list(row.keys()),
e,
exc_info=True,
)
return None
def createFileData(self, fileId: str, data: bytes) -> bool: def createFileData(self, fileId: str, data: bytes) -> bool:
"""Stores the binary data of a file in the database.""" """Stores the binary data of a file in the database."""
try: try:
logger.info(
"[FileData] createFileData enter fileId=%s bytes=%s",
fileId,
len(data) if data is not None else 0,
)
# Check file access # Check file access
file = self.getFile(fileId) file = self._getFileItemForDataWrite(fileId)
if not file: if not file:
logger.error(f"File with ID {fileId} not found when storing data") logger.error(
"[FileData] FAIL _getFileItemForDataWrite returned None fileId=%s",
fileId,
)
return False return False
# Determine if this is a text-based format # Determine if this is a text-based format
@ -1396,12 +1744,10 @@ class ComponentObjects:
self.db.recordCreate(FileData, fileDataObj) self.db.recordCreate(FileData, fileDataObj)
# Clear cache to ensure fresh data logger.info("[FileData] recordCreate OK fileId=%s base64Encoded=%s", fileId, base64Encoded)
logger.debug(f"Successfully stored data for file {fileId} (base64Encoded: {base64Encoded})")
return True return True
except Exception as e: except Exception as e:
logger.error(f"Error storing data for file {fileId}: {str(e)}") logger.error("Error storing data for file %s: %s", fileId, e, exc_info=True)
return False return False
def getFileData(self, fileId: str) -> Optional[bytes]: def getFileData(self, fileId: str) -> Optional[bytes]:

View file

@ -204,6 +204,7 @@ TABLE_NAMESPACE = {
# Files - benutzer-eigen # Files - benutzer-eigen
"FileItem": "files", "FileItem": "files",
"FileData": "files", "FileData": "files",
"FileFolder": "files",
# Automation - benutzer-eigen # Automation - benutzer-eigen
"AutomationDefinition": "automation", "AutomationDefinition": "automation",
"AutomationTemplate": "automation", "AutomationTemplate": "automation",
@ -746,6 +747,7 @@ def buildFilesScopeWhereClause(
Only own files: sysCreatedBy = currentUser Only own files: sysCreatedBy = currentUser
WITH instance context (Instanz-Seiten): WITH instance context (Instanz-Seiten):
- scope = 'personal' AND sysCreatedBy = me (creator's personal files; e.g. workflow outputs)
- sysCreatedBy = me AND featureInstanceId = X (own personal files of this instance) - sysCreatedBy = me AND featureInstanceId = X (own personal files of this instance)
- scope = 'featureInstance' AND featureInstanceId = X - scope = 'featureInstance' AND featureInstanceId = X
- scope = 'mandate' AND mandateId = M (M = mandate of the instance) - scope = 'mandate' AND mandateId = M (M = mandate of the instance)
@ -779,6 +781,15 @@ def buildFilesScopeWhereClause(
scopeParts: List[str] = [] scopeParts: List[str] = []
scopeValues: List = [] scopeValues: List = []
# Personal files created by this user must remain visible even when the request
# carries mandate/instance context (GROUP reads use this clause). Otherwise
# createFile → createFileData → getFile fails and workflow outputs vanish from /files.
# Also treat scope IS NULL as legacy/personal for the owner (column default not applied).
scopeParts.append(
'(("scope" = \'personal\' OR "scope" IS NULL) AND "sysCreatedBy" = %s)'
)
scopeValues.append(currentUser.id)
if featureInstanceId: if featureInstanceId:
# 1) Own personal files of this specific instance # 1) Own personal files of this specific instance
scopeParts.append('("sysCreatedBy" = %s AND "featureInstanceId" = %s)') scopeParts.append('("sysCreatedBy" = %s AND "featureInstanceId" = %s)')

View file

@ -0,0 +1,11 @@
# Archived one-off migrations
`migrate_folders_to_groups.py` copies `FileFolder` + `FileItem.folderId` into `TableGrouping` (`files/list`). It was used during an experimental UI path; **product choice** is to keep physical folders (`FileFolder`, `folderId`) and recover `FormGeneratorTree` (see `wiki/c-work/1-plan/2026-05-formgenerator-tree-and-folder-recovery.md`).
Run only if you need a historical data rescue:
```bash
cd gateway
python -m modules.migrations._archive.migrate_folders_to_groups --verbose
python -m modules.migrations._archive.migrate_folders_to_groups --execute --verbose
```

View file

@ -0,0 +1 @@
# Subpackage for archived one-off migration scripts (not part of normal app startup).

View file

@ -1,11 +1,16 @@
""" """
One-time migration: Convert FileFolder tree + FileItem.folderId table_groupings. One-time migration: Convert FileFolder tree + FileItem.folderId to table_groupings.
Archived per wiki plan 2026-05-formgenerator-tree-and-folder-recovery (Stage 1.A).
Product direction: keep FileFolder + folderId; do not run DROP migrations.
This script remains for audit / one-off data rescue only.
Run this BEFORE dropping the physical FileFolder table and FileItem.folderId column Run this BEFORE dropping the physical FileFolder table and FileItem.folderId column
from the database (those are separate Alembic/SQL steps). from the database (those would be separate Alembic/SQL steps -- not part of current product path).
Usage: Usage (from gateway working directory):
python -m modules.migrations.migrate_folders_to_groups [--dry-run] [--verbose] python -m modules.migrations._archive.migrate_folders_to_groups [--dry-run] [--verbose]
python -m modules.migrations._archive.migrate_folders_to_groups --execute --verbose
Steps: Steps:
1. For each distinct (userId, mandateId) combination that has FileFolder records: 1. For each distinct (userId, mandateId) combination that has FileFolder records:
@ -30,6 +35,14 @@ from typing import Optional
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _scalarRow(row):
if row is None:
return None
if isinstance(row, dict):
return next(iter(row.values()))
return row[0]
# ── Helpers ────────────────────────────────────────────────────────────────── # ── Helpers ──────────────────────────────────────────────────────────────────
def _build_tree(folders: list, parent_id: Optional[str]) -> list: def _build_tree(folders: list, parent_id: Optional[str]) -> list:
@ -76,11 +89,19 @@ def _now_ts() -> str:
def run_migration(dry_run: bool = True, verbose: bool = False): def run_migration(dry_run: bool = True, verbose: bool = False):
"""Main migration entry point.""" """Main migration entry point."""
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
logger.info(f"Starting foldergroup migration (dry_run={dry_run})") logger.info(f"Starting folder to group migration (dry_run={dry_run})")
from modules.connectors.connectorDbPostgre import getCachedConnector from modules.connectors.connectorDbPostgre import getCachedConnector
from modules.shared.configuration import APP_CONFIG
connector = getCachedConnector() connector = getCachedConnector(
dbHost=APP_CONFIG.get("DB_HOST", "_no_config_default_data"),
dbDatabase="poweron_management",
dbUser=APP_CONFIG.get("DB_USER"),
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
userId=None,
)
if not connector or not connector.connection: if not connector or not connector.connection:
logger.error("Could not obtain a DB connection. Aborting.") logger.error("Could not obtain a DB connection. Aborting.")
return return
@ -93,17 +114,17 @@ def run_migration(dry_run: bool = True, verbose: bool = False):
SELECT EXISTS ( SELECT EXISTS (
SELECT 1 FROM information_schema.tables SELECT 1 FROM information_schema.tables
WHERE table_name = 'FileFolder' WHERE table_name = 'FileFolder'
) ) AS ok
""") """)
folder_table_exists = cur.fetchone()[0] folder_table_exists = bool(_scalarRow(cur.fetchone()))
cur.execute(""" cur.execute("""
SELECT EXISTS ( SELECT EXISTS (
SELECT 1 FROM information_schema.columns SELECT 1 FROM information_schema.columns
WHERE table_name = 'FileItem' AND column_name = 'folderId' WHERE table_name = 'FileItem' AND column_name = 'folderId'
) ) AS ok
""") """)
folder_column_exists = cur.fetchone()[0] folder_column_exists = bool(_scalarRow(cur.fetchone()))
if not folder_table_exists and not folder_column_exists: if not folder_table_exists and not folder_column_exists:
logger.info("FileFolder table and FileItem.folderId column not found — migration already applied or not needed.") logger.info("FileFolder table and FileItem.folderId column not found — migration already applied or not needed.")
@ -126,7 +147,7 @@ def run_migration(dry_run: bool = True, verbose: bool = False):
}) })
logger.info(f"Loaded folders for {len(folders_by_user)} (user, mandate) combinations") logger.info(f"Loaded folders for {len(folders_by_user)} (user, mandate) combinations")
# ── 3. Load file→folder assignments ────────────────────────────────────── # ── 3. Load file to folder assignments ────────────────────────────────────
files_by_key: dict = {} files_by_key: dict = {}
if folder_column_exists: if folder_column_exists:
cur.execute( cur.execute(
@ -139,7 +160,7 @@ def run_migration(dry_run: bool = True, verbose: bool = False):
total_files = sum( total_files = sum(
sum(len(v) for v in d.values()) for d in files_by_key.values() sum(len(v) for v in d.values()) for d in files_by_key.values()
) )
logger.info(f"Found {total_files} filefolder assignments across {len(files_by_key)} (user, mandate) combos") logger.info(f"Found {total_files} file to folder assignments across {len(files_by_key)} (user, mandate) combos")
# ── 4. Combine and upsert groupings ────────────────────────────────────── # ── 4. Combine and upsert groupings ──────────────────────────────────────
all_keys = set(folders_by_user.keys()) | set(files_by_key.keys()) all_keys = set(folders_by_user.keys()) | set(files_by_key.keys())
@ -231,7 +252,7 @@ def run_migration(dry_run: bool = True, verbose: bool = False):
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Migrate FileFolder tree to table_groupings") parser = argparse.ArgumentParser(description="Migrate FileFolder tree to table_groupings (archived script)")
parser.add_argument("--dry-run", action="store_true", default=True, help="Preview only, no DB writes (default)") parser.add_argument("--dry-run", action="store_true", default=True, help="Preview only, no DB writes (default)")
parser.add_argument("--execute", action="store_true", help="Actually write to DB (disables dry-run)") parser.add_argument("--execute", action="store_true", help="Actually write to DB (disables dry-run)")
parser.add_argument("--verbose", action="store_true", help="Show per-user details") parser.add_argument("--verbose", action="store_true", help="Show per-user details")

View file

@ -9,9 +9,9 @@ Features:
- Admin endpoints: Manage settings, add credits, view all accounts - Admin endpoints: Manage settings, add credits, view all accounts
""" """
from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Response, Query, Header from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Response, Query, Header, status
from fastapi.responses import JSONResponse
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
from fastapi import status
import logging import logging
from datetime import date, datetime, timezone from datetime import date, datetime, timezone
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -24,7 +24,13 @@ from modules.interfaces.interfaceDbBilling import getInterface as getBillingInte
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService
import json import json
import math import math
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict from modules.datamodels.datamodelPagination import (
PaginationParams,
PaginatedResponse,
PaginationMetadata,
normalize_pagination_dict,
AppliedViewMeta,
)
from modules.datamodels.datamodelBilling import ( from modules.datamodels.datamodelBilling import (
BillingAccount, BillingAccount,
BillingTransaction, BillingTransaction,
@ -478,50 +484,193 @@ def getBalanceForMandate(
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@router.get("/transactions", response_model=List[TransactionResponse]) def _normalize_billing_tx_dict(t: Dict[str, Any]) -> Dict[str, Any]:
"""Make billing transaction rows JSON/grouping-safe (datetimes → str, enums → str)."""
from datetime import date as date_cls, datetime as dt_cls
r = dict(t)
for k, v in list(r.items()):
if isinstance(v, dt_cls):
r[k] = v.isoformat()
elif isinstance(v, date_cls):
r[k] = v.isoformat()
for ek in ("transactionType", "referenceType"):
if ek in r and r[ek] is not None and not isinstance(r[ek], str):
ev = r[ek]
r[ek] = getattr(ev, "value", None) or str(ev)
return r
def _load_billing_user_transactions_normalized(billingService) -> List[Dict[str, Any]]:
raw = billingService.getTransactionHistory(limit=5000)
return [_normalize_billing_tx_dict(t) for t in raw]
def _view_user_transactions_filtered_list(
billing_interface,
load_mandate_ids: Optional[List[str]],
effective_scope: str,
personal_user_id: Optional[str],
pagination_params: PaginationParams,
ctx_user,
) -> List[Dict[str, Any]]:
"""Up to 5000 rows: SQL window + in-memory filters/sort (incl. enriched columns)."""
from modules.interfaces.interfaceDbManagement import ComponentObjects
bulk_params = pagination_params.model_copy(deep=True)
bulk_params.page = 1
bulk_params.pageSize = 5000
bulk_result = billing_interface.getTransactionsForMandatesPaginated(
mandateIds=load_mandate_ids,
pagination=bulk_params,
scope=effective_scope,
userId=personal_user_id,
)
all_items = [_normalize_billing_tx_dict(dict(x)) for x in bulk_result.items]
comp = ComponentObjects()
comp.setUserContext(ctx_user)
if pagination_params.filters:
all_items = comp._applyFilters(all_items, pagination_params.filters)
if pagination_params.sort:
all_items = comp._applySorting(all_items, pagination_params.sort)
return all_items
@router.get("/transactions")
@limiter.limit("30/minute") @limiter.limit("30/minute")
def getTransactions( def getTransactions(
request: Request, request: Request,
limit: int = Query(default=50, ge=1, le=500), limit: int = Query(default=50, ge=1, le=500),
offset: int = Query(default=0, ge=0), offset: int = Query(default=0, ge=0),
ctx: RequestContext = Depends(getRequestContext) pagination: Optional[str] = Query(
None,
description="JSON PaginationParams for table UI (filters, sort, viewKey, groupByLevels).",
),
mode: Optional[str] = Query(None, description="'filterValues' | 'ids' with pagination"),
column: Optional[str] = Query(None, description="Column for mode=filterValues"),
ctx: RequestContext = Depends(getRequestContext),
): ):
""" """
Get transaction history across all mandates the user belongs to. Get transaction history across all mandates the user belongs to.
Without ``pagination`` query: legacy behaviour returns a JSON array of
transactions (`limit`/`offset` window).
With ``pagination`` JSON: returns ``{ items, pagination, groupLayout?, appliedView? }``.
Table list views use contextKey ``billing/transactions``.
""" """
try: try:
billingService = getBillingService( billingService = getBillingService(
ctx.user, ctx.user,
ctx.mandateId, ctx.mandateId,
featureCode="billing" featureCode="billing",
) )
# Fetch enough transactions for pagination if pagination:
from modules.routes.routeHelpers import (
applyViewToParams,
buildGroupLayout,
effective_group_by_levels,
handleFilterValuesInMemory,
handleIdsInMemory,
resolveView,
)
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
from modules.interfaces.interfaceDbManagement import ComponentObjects
CONTEXT_KEY = "billing/transactions"
try:
paginationDict = json.loads(pagination)
if not paginationDict:
raise ValueError("empty pagination")
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError, TypeError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
appInterface = getAppInterface(ctx.user)
viewKey = paginationParams.viewKey
viewConfig, viewDisplayName = resolveView(appInterface, CONTEXT_KEY, viewKey)
viewMeta = AppliedViewMeta(viewKey=viewKey, displayName=viewDisplayName) if viewKey else None
paginationParams = applyViewToParams(paginationParams, viewConfig)
groupByLevels = effective_group_by_levels(paginationParams, viewConfig)
all_items = _load_billing_user_transactions_normalized(billingService)
if mode == "filterValues":
if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
return handleFilterValuesInMemory(all_items, column, pagination)
if mode == "ids":
return handleIdsInMemory(all_items, pagination)
comp = ComponentObjects()
comp.setUserContext(ctx.user)
if paginationParams.filters:
all_items = comp._applyFilters(all_items, paginationParams.filters)
if paginationParams.sort:
all_items = comp._applySorting(all_items, paginationParams.sort)
totalItems = len(all_items)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
if not groupByLevels:
pstart = (paginationParams.page - 1) * paginationParams.pageSize
page_items = all_items[pstart : pstart + paginationParams.pageSize]
group_layout = None
else:
page_items, group_layout = buildGroupLayout(
all_items,
groupByLevels,
paginationParams.page,
paginationParams.pageSize,
)
resp: Dict[str, Any] = {
"items": page_items,
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters,
).model_dump(),
}
if group_layout:
resp["groupLayout"] = group_layout.model_dump()
if viewMeta:
resp["appliedView"] = viewMeta.model_dump()
return JSONResponse(content=resp)
transactions = billingService.getTransactionHistory(limit=offset + limit) transactions = billingService.getTransactionHistory(limit=offset + limit)
result: List[TransactionResponse] = []
# Convert to response model for t in transactions[offset : offset + limit]:
result = [] result.append(
for t in transactions[offset:offset + limit]: TransactionResponse(
result.append(TransactionResponse( id=t.get("id"),
id=t.get("id"), accountId=t.get("accountId"),
accountId=t.get("accountId"), transactionType=TransactionTypeEnum(t.get("transactionType", "DEBIT")),
transactionType=TransactionTypeEnum(t.get("transactionType", "DEBIT")), amount=t.get("amount", 0.0),
amount=t.get("amount", 0.0), description=t.get("description", ""),
description=t.get("description", ""), referenceType=ReferenceTypeEnum(t["referenceType"]) if t.get("referenceType") else None,
referenceType=ReferenceTypeEnum(t["referenceType"]) if t.get("referenceType") else None, workflowId=t.get("workflowId"),
workflowId=t.get("workflowId"), featureCode=t.get("featureCode"),
featureCode=t.get("featureCode"), featureInstanceId=t.get("featureInstanceId"),
featureInstanceId=t.get("featureInstanceId"), aicoreProvider=t.get("aicoreProvider"),
aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"),
aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"),
createdByUserId=t.get("createdByUserId"), sysCreatedAt=t.get("sysCreatedAt"),
sysCreatedAt=t.get("sysCreatedAt"), mandateId=t.get("mandateId"),
mandateId=t.get("mandateId"), mandateName=t.get("mandateName"),
mandateName=t.get("mandateName") )
)) )
return result return result
except HTTPException:
raise
except Exception as e: except Exception as e:
logger.error(f"Error getting billing transactions: {e}") logger.error(f"Error getting billing transactions: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@ -1757,7 +1906,7 @@ def getUserViewStatistics(
@router.get("/view/users/transactions", response_model=PaginatedResponse[UserTransactionResponse]) @router.get("/view/users/transactions", response_model=PaginatedResponse[UserTransactionResponse])
@limiter.limit("30/minute") @limiter.limit("120/minute")
def getUserViewTransactions( def getUserViewTransactions(
request: Request, request: Request,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
@ -1808,7 +1957,6 @@ def getUserViewTransactions(
if mode == "filterValues": if mode == "filterValues":
if not column: if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
from fastapi.responses import JSONResponse
crossFilterParams = parseCrossFilterPagination(column, pagination) crossFilterParams = parseCrossFilterPagination(column, pagination)
values = billingInterface.getTransactionDistinctValues( values = billingInterface.getTransactionDistinctValues(
mandateIds=loadMandateIds, mandateIds=loadMandateIds,
@ -1820,7 +1968,6 @@ def getUserViewTransactions(
return JSONResponse(content=values) return JSONResponse(content=values)
if mode == "ids": if mode == "ids":
from fastapi.responses import JSONResponse
paginationParams = None paginationParams = None
if pagination: if pagination:
import json as _json import json as _json
@ -1835,6 +1982,66 @@ def getUserViewTransactions(
) if hasattr(billingInterface, 'getTransactionIds') else [] ) if hasattr(billingInterface, 'getTransactionIds') else []
return JSONResponse(content=ids) return JSONResponse(content=ids)
if mode == "groupSummary":
if not pagination:
raise HTTPException(status_code=400, detail="pagination required for groupSummary")
import json as _json
from collections import defaultdict
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
from modules.routes.routeHelpers import (
applyViewToParams,
effective_group_by_levels,
resolveView,
)
pagination_dict = _json.loads(pagination)
pagination_dict = normalize_pagination_dict(pagination_dict)
summary_params = PaginationParams(**pagination_dict)
CONTEXT_KEY = "billing/view/users/transactions"
app_interface = getAppInterface(ctx.user)
summary_vk = summary_params.viewKey
summary_view_cfg, _ = resolveView(app_interface, CONTEXT_KEY, summary_vk)
summary_params = applyViewToParams(summary_params, summary_view_cfg)
levels = effective_group_by_levels(summary_params, summary_view_cfg)
if not levels or not levels[0].get("field"):
raise HTTPException(
status_code=400,
detail="groupByLevels[0].field required for groupSummary",
)
field = levels[0]["field"]
null_label = str(levels[0].get("nullLabel") or "")
all_rows = _view_user_transactions_filtered_list(
billingInterface,
loadMandateIds,
scope,
personalUserId,
summary_params,
ctx.user,
)
counts: Dict[str, int] = defaultdict(int)
labels: Dict[str, str] = {}
null_key = "\x00NULL"
for item in all_rows:
raw = item.get(field)
if raw is None or raw == "":
nk = null_key
labels[nk] = null_label
else:
nk = str(raw)
if nk not in labels:
labels[nk] = nk
counts[nk] += 1
groups_out: List[Dict[str, Any]] = []
for nk in sorted(counts.keys(), key=lambda x: (x == null_key, labels.get(x, x).lower())):
groups_out.append(
{
"value": None if nk == null_key else nk,
"label": labels.get(nk, nk),
"totalCount": counts[nk],
}
)
return JSONResponse(content={"groups": groups_out})
paginationParams = None paginationParams = None
if pagination: if pagination:
import json as _json import json as _json
@ -1847,15 +2054,21 @@ def getUserViewTransactions(
if not paginationParams: if not paginationParams:
paginationParams = PaginationParams(page=1, pageSize=50) paginationParams = PaginationParams(page=1, pageSize=50)
result = billingInterface.getTransactionsForMandatesPaginated( from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
mandateIds=loadMandateIds, from modules.routes.routeHelpers import (
pagination=paginationParams, applyViewToParams,
scope=effectiveScope, buildGroupLayout,
userId=personalUserId, effective_group_by_levels,
resolveView,
) )
logger.debug(f"SQL-paginated {result.totalItems} transactions for user {ctx.user.id} " CONTEXT_KEY = "billing/view/users/transactions"
f"(scope={scope}, mandateId={mandateId}, page={paginationParams.page})") appInterface = getAppInterface(ctx.user)
viewKey = paginationParams.viewKey
viewConfig, viewDisplayName = resolveView(appInterface, CONTEXT_KEY, viewKey)
viewMeta = AppliedViewMeta(viewKey=viewKey, displayName=viewDisplayName) if viewKey else None
paginationParams = applyViewToParams(paginationParams, viewConfig)
groupByLevels = effective_group_by_levels(paginationParams, viewConfig)
def _toResponse(d): def _toResponse(d):
return UserTransactionResponse( return UserTransactionResponse(
@ -1875,9 +2088,56 @@ def getUserViewTransactions(
mandateId=d.get("mandateId"), mandateId=d.get("mandateId"),
mandateName=d.get("mandateName"), mandateName=d.get("mandateName"),
userId=d.get("userId"), userId=d.get("userId"),
userName=d.get("userName") userName=d.get("userName"),
) )
if groupByLevels:
all_items = _view_user_transactions_filtered_list(
billingInterface,
loadMandateIds,
effectiveScope,
personalUserId,
paginationParams,
ctx.user,
)
totalItems = len(all_items)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
page_items, group_layout = buildGroupLayout(
all_items,
groupByLevels,
paginationParams.page,
paginationParams.pageSize,
)
resp: Dict[str, Any] = {
"items": [_toResponse(d).model_dump(mode="json") for d in page_items],
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters,
).model_dump(mode="json"),
}
if group_layout:
resp["groupLayout"] = group_layout.model_dump(mode="json")
if viewMeta:
resp["appliedView"] = viewMeta.model_dump(mode="json")
return JSONResponse(content=resp)
result = billingInterface.getTransactionsForMandatesPaginated(
mandateIds=loadMandateIds,
pagination=paginationParams,
scope=effectiveScope,
userId=personalUserId,
)
logger.debug(
f"SQL-paginated {result.totalItems} transactions for user {ctx.user.id} "
f"(scope={scope}, mandateId={mandateId}, page={paginationParams.page})"
)
return PaginatedResponse( return PaginatedResponse(
items=[_toResponse(d) for d in result.items], items=[_toResponse(d) for d in result.items],
pagination=PaginationMetadata( pagination=PaginationMetadata(
@ -1887,7 +2147,7 @@ def getUserViewTransactions(
totalPages=result.totalPages, totalPages=result.totalPages,
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters, filters=paginationParams.filters,
) ),
) )
except Exception as e: except Exception as e:

View file

@ -57,8 +57,8 @@ def _svc_for_connection(current_user: User, connection: UserConnection):
services = getServices(current_user, None) services = getServices(current_user, None)
if not services.clickup.setAccessTokenFromConnection(connection): if not services.clickup.setAccessTokenFromConnection(connection):
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, status_code=status.HTTP_502_BAD_GATEWAY,
detail=routeApiMsg("Failed to set ClickUp access token"), detail=routeApiMsg("Failed to set ClickUp access token. Connection may be expired or invalid."),
) )
return services.clickup return services.clickup

View file

@ -17,6 +17,7 @@ import logging
import json import json
import math import math
from urllib.parse import quote from urllib.parse import quote
from fastapi.responses import JSONResponse
from modules.datamodels.datamodelUam import User, UserConnection, AuthAuthority, ConnectionStatus from modules.datamodels.datamodelUam import User, UserConnection, AuthAuthority, ConnectionStatus
from modules.datamodels.datamodelSecurity import Token from modules.datamodels.datamodelSecurity import Token
@ -154,12 +155,12 @@ async def get_connections(
""" """
from modules.routes.routeHelpers import ( from modules.routes.routeHelpers import (
handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels, handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels,
handleGroupingInRequest, applyGroupScopeFilter, resolveView, applyViewToParams, buildGroupLayout, effective_group_by_levels,
) )
from modules.datamodels.datamodelPagination import AppliedViewMeta
CONTEXT_KEY = "connections" CONTEXT_KEY = "connections"
# Parse pagination params early — needed for grouping in all modes
paginationParams = None paginationParams = None
if pagination: if pagination:
try: try:
@ -171,7 +172,13 @@ async def get_connections(
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
interface = getInterface(currentUser) interface = getInterface(currentUser)
groupCtx = handleGroupingInRequest(paginationParams, interface, CONTEXT_KEY)
# Resolve view and merge config into params
viewKey = paginationParams.viewKey if paginationParams else None
viewConfig, viewDisplayName = resolveView(interface, CONTEXT_KEY, viewKey)
viewMeta = AppliedViewMeta(viewKey=viewKey, displayName=viewDisplayName) if viewKey else None
paginationParams = applyViewToParams(paginationParams, viewConfig)
groupByLevels = effective_group_by_levels(paginationParams, viewConfig)
def _buildEnhancedItems(): def _buildEnhancedItems():
connections = interface.getUserConnections(currentUser.id) connections = interface.getUserConnections(currentUser.id)
@ -200,7 +207,6 @@ async def get_connections(
try: try:
items = _buildEnhancedItems() items = _buildEnhancedItems()
enrichRowsWithFkLabels(items, UserConnection) enrichRowsWithFkLabels(items, UserConnection)
items = applyGroupScopeFilter(items, groupCtx.itemIds)
return handleFilterValuesInMemory(items, column, pagination) return handleFilterValuesInMemory(items, column, pagination)
except Exception as e: except Exception as e:
logger.error(f"Error getting filter values for connections: {str(e)}") logger.error(f"Error getting filter values for connections: {str(e)}")
@ -208,19 +214,60 @@ async def get_connections(
if mode == "ids": if mode == "ids":
try: try:
items = applyGroupScopeFilter(_buildEnhancedItems(), groupCtx.itemIds) return handleIdsInMemory(_buildEnhancedItems(), pagination)
return handleIdsInMemory(items, pagination)
except Exception as e: except Exception as e:
logger.error(f"Error getting IDs for connections: {str(e)}") logger.error(f"Error getting IDs for connections: {str(e)}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
try: if mode == "groupSummary":
# NOTE: Cannot use db.getRecordsetPaginated() here because each connection if not pagination:
# is enriched with computed tokenStatus/tokenExpiresAt (requires per-row DB lookup). raise HTTPException(status_code=400, detail="pagination required for groupSummary")
# Token refresh also may trigger re-fetch. Connections per user are typically < 10, from modules.routes.routeHelpers import (
# so in-memory pagination is acceptable. apply_strategy_b_filters_and_sort,
build_group_summary_groups,
)
if not groupByLevels or not groupByLevels[0].get("field"):
raise HTTPException(
status_code=400,
detail="groupByLevels[0].field required for groupSummary",
)
field = groupByLevels[0]["field"]
null_label = str(groupByLevels[0].get("nullLabel") or "")
connections = interface.getUserConnections(currentUser.id)
try:
refresh_result = await token_refresh_service.refresh_expired_tokens(currentUser.id)
if refresh_result.get("refreshed", 0) > 0:
logger.info(
"Silently refreshed %s tokens for user %s (groupSummary)",
refresh_result["refreshed"],
currentUser.id,
)
connections = interface.getUserConnections(currentUser.id)
except Exception as e:
logger.warning(f"Silent token refresh failed for user {currentUser.id}: {str(e)}")
enhanced_connections_dict = []
for connection in connections:
tokenStatus, tokenExpiresAt = getTokenStatusForConnection(interface, connection.id)
enhanced_connections_dict.append({
"id": connection.id,
"userId": connection.userId,
"authority": connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority),
"externalId": connection.externalId,
"externalUsername": connection.externalUsername or "",
"externalEmail": connection.externalEmail,
"status": connection.status.value if hasattr(connection.status, 'value') else str(connection.status),
"connectedAt": connection.connectedAt,
"lastChecked": connection.lastChecked,
"expiresAt": connection.expiresAt,
"tokenStatus": tokenStatus,
"tokenExpiresAt": tokenExpiresAt
})
enrichRowsWithFkLabels(enhanced_connections_dict, UserConnection)
filtered = apply_strategy_b_filters_and_sort(enhanced_connections_dict, paginationParams, currentUser)
groups_out = build_group_summary_groups(filtered, field, null_label)
return JSONResponse(content={"groups": groups_out})
# SECURITY FIX: All users (including admins) can only see their own connections try:
connections = interface.getUserConnections(currentUser.id) connections = interface.getUserConnections(currentUser.id)
# Perform silent token refresh for expired OAuth connections # Perform silent token refresh for expired OAuth connections
@ -235,7 +282,7 @@ async def get_connections(
enhanced_connections_dict = [] enhanced_connections_dict = []
for connection in connections: for connection in connections:
tokenStatus, tokenExpiresAt = getTokenStatusForConnection(interface, connection.id) tokenStatus, tokenExpiresAt = getTokenStatusForConnection(interface, connection.id)
connection_dict = { enhanced_connections_dict.append({
"id": connection.id, "id": connection.id,
"userId": connection.userId, "userId": connection.userId,
"authority": connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority), "authority": connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority),
@ -248,46 +295,31 @@ async def get_connections(
"expiresAt": connection.expiresAt, "expiresAt": connection.expiresAt,
"tokenStatus": tokenStatus, "tokenStatus": tokenStatus,
"tokenExpiresAt": tokenExpiresAt "tokenExpiresAt": tokenExpiresAt
} })
enhanced_connections_dict.append(connection_dict)
enrichRowsWithFkLabels(enhanced_connections_dict, UserConnection) enrichRowsWithFkLabels(enhanced_connections_dict, UserConnection)
enhanced_connections_dict = applyGroupScopeFilter(enhanced_connections_dict, groupCtx.itemIds)
if paginationParams is None: if paginationParams is None:
return { return {"items": enhanced_connections_dict, "pagination": None}
"items": enhanced_connections_dict,
"pagination": None,
"groupTree": groupCtx.groupTree,
}
# Apply filtering if provided # Apply filtering and sorting over full list (Strategy B)
component_interface = ComponentObjects()
component_interface.setUserContext(currentUser)
if paginationParams.filters: if paginationParams.filters:
component_interface = ComponentObjects() enhanced_connections_dict = component_interface._applyFilters(enhanced_connections_dict, paginationParams.filters)
component_interface.setUserContext(currentUser)
enhanced_connections_dict = component_interface._applyFilters(
enhanced_connections_dict,
paginationParams.filters
)
# Apply sorting if provided
if paginationParams.sort: if paginationParams.sort:
component_interface = ComponentObjects() enhanced_connections_dict = component_interface._applySorting(enhanced_connections_dict, paginationParams.sort)
component_interface.setUserContext(currentUser)
enhanced_connections_dict = component_interface._applySorting(
enhanced_connections_dict,
paginationParams.sort
)
totalItems = len(enhanced_connections_dict) totalItems = len(enhanced_connections_dict)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize # Strategy B grouping: operates on full filtered+sorted list, then slices
endIdx = startIdx + paginationParams.pageSize page_items, groupLayout = buildGroupLayout(
paged_connections = enhanced_connections_dict[startIdx:endIdx] enhanced_connections_dict, groupByLevels, paginationParams.page, paginationParams.pageSize
)
return { response: dict = {
"items": paged_connections, "items": page_items,
"pagination": PaginationMetadata( "pagination": PaginationMetadata(
currentPage=paginationParams.page, currentPage=paginationParams.page,
pageSize=paginationParams.pageSize, pageSize=paginationParams.pageSize,
@ -296,8 +328,12 @@ async def get_connections(
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters filters=paginationParams.filters
).model_dump(), ).model_dump(),
"groupTree": groupCtx.groupTree,
} }
if groupLayout:
response["groupLayout"] = groupLayout.model_dump()
if viewMeta:
response["appliedView"] = viewMeta.model_dump()
return response
except HTTPException: except HTTPException:
raise raise

View file

@ -5,13 +5,14 @@ from fastapi.responses import JSONResponse
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
import logging import logging
import json import json
import math
# Import auth module # Import auth module
from modules.auth import limiter, getCurrentUser, getRequestContext, RequestContext from modules.auth import limiter, getCurrentUser, getRequestContext, RequestContext
# Import interfaces # Import interfaces
import modules.interfaces.interfaceDbManagement as interfaceDbManagement import modules.interfaces.interfaceDbManagement as interfaceDbManagement
from modules.datamodels.datamodelFiles import FileItem, FilePreview from modules.datamodels.datamodelFiles import FileItem, FilePreview, FileFolder
from modules.shared.attributeUtils import getModelAttributeDefinitions from modules.shared.attributeUtils import getModelAttributeDefinitions
from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
@ -72,14 +73,18 @@ def _resolveFileWithScope(currentUser: User, context: RequestContext, fileId: st
return scopedMgmt, fileItem return scopedMgmt, fileItem
async def _autoIndexFile(fileId: str, fileName: str, mimeType: str, user): async def _autoIndexFile(fileId: str, fileName: str, mimeType: str, user, *, mandateId: str = None, featureInstanceId: str = None):
"""Background task: pre-scan + extraction + knowledge indexing. """Background task: pre-scan + extraction + knowledge indexing.
Step 1: Structure Pre-Scan (AI-free) -> FileContentIndex (persisted) Step 1: Structure Pre-Scan (AI-free) -> FileContentIndex (persisted)
Step 2: Content extraction via runExtraction -> ContentParts Step 2: Content extraction via runExtraction -> ContentParts
Step 3: KnowledgeService.requestIngestion -> idempotent chunking + embedding -> Knowledge Store""" Step 3: KnowledgeService.requestIngestion -> idempotent chunking + embedding -> Knowledge Store"""
userId = user.id if hasattr(user, "id") else str(user) userId = user.id if hasattr(user, "id") else str(user)
try: try:
mgmtInterface = interfaceDbManagement.getInterface(user) mgmtInterface = interfaceDbManagement.getInterface(
user,
mandateId=mandateId or None,
featureInstanceId=featureInstanceId or None,
)
mgmtInterface.updateFile(fileId, {"status": "processing"}) mgmtInterface.updateFile(fileId, {"status": "processing"})
rawBytes = mgmtInterface.getFileData(fileId) rawBytes = mgmtInterface.getFileData(fileId)
@ -250,6 +255,213 @@ router = APIRouter(
} }
) )
@router.get("/folders/tree")
@limiter.limit("120/minute")
def get_folder_tree(
request: Request,
owner: str = Query("me", description="'me' | 'shared'"),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
o = (owner or "me").strip().lower()
if o == "me":
return managementInterface.getOwnFolderTree()
if o == "shared":
return managementInterface.getSharedFolderTree()
raise HTTPException(status_code=400, detail="owner must be 'me' or 'shared'")
except HTTPException:
raise
except Exception as e:
logger.error(f"get_folder_tree error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/folders", status_code=status.HTTP_201_CREATED)
@limiter.limit("30/minute")
def create_folder(
request: Request,
body: Dict[str, Any] = Body(...),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
name = body.get("name")
if not name or not str(name).strip():
raise HTTPException(status_code=400, detail="name is required")
parentId = body.get("parentId") or None
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
return managementInterface.createFolder(str(name).strip(), parentId)
except PermissionError as e:
raise HTTPException(status_code=403, detail=str(e))
except interfaceDbManagement.FileNotFoundError as e:
raise HTTPException(status_code=404, detail=str(e))
except HTTPException:
raise
except Exception as e:
logger.error(f"create_folder error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.patch("/folders/{folderId}")
@limiter.limit("30/minute")
def rename_folder(
request: Request,
folderId: str = Path(...),
body: Dict[str, Any] = Body(...),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
name = body.get("name")
if not name or not str(name).strip():
raise HTTPException(status_code=400, detail="name is required")
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
return managementInterface.renameFolder(folderId, str(name).strip())
except PermissionError as e:
raise HTTPException(status_code=403, detail=str(e))
except interfaceDbManagement.FileNotFoundError as e:
raise HTTPException(status_code=404, detail=str(e))
except HTTPException:
raise
except Exception as e:
logger.error(f"rename_folder error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/folders/{folderId}/move")
@limiter.limit("30/minute")
def move_folder(
request: Request,
folderId: str = Path(...),
body: Dict[str, Any] = Body(...),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
newParentId = body.get("parentId")
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
return managementInterface.moveFolder(folderId, newParentId or None)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except PermissionError as e:
raise HTTPException(status_code=403, detail=str(e))
except interfaceDbManagement.FileNotFoundError as e:
raise HTTPException(status_code=404, detail=str(e))
except HTTPException:
raise
except Exception as e:
logger.error(f"move_folder error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/folders/{folderId}")
@limiter.limit("30/minute")
def delete_folder(
request: Request,
folderId: str = Path(...),
cascade: bool = Query(True, description="Cascade delete sub-folders and files"),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
return managementInterface.deleteFolderCascade(folderId)
except PermissionError as e:
raise HTTPException(status_code=403, detail=str(e))
except interfaceDbManagement.FileNotFoundError as e:
raise HTTPException(status_code=404, detail=str(e))
except HTTPException:
raise
except Exception as e:
logger.error(f"delete_folder error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.patch("/folders/{folderId}/scope")
@limiter.limit("30/minute")
def patch_folder_scope(
request: Request,
folderId: str = Path(...),
body: Dict[str, Any] = Body(...),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
scope = body.get("scope")
if not scope:
raise HTTPException(status_code=400, detail="scope is required")
cascadeToFiles = body.get("cascadeToFiles", False)
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
return managementInterface.patchFolderScope(folderId, scope, cascadeToFiles)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except PermissionError as e:
raise HTTPException(status_code=403, detail=str(e))
except interfaceDbManagement.FileNotFoundError as e:
raise HTTPException(status_code=404, detail=str(e))
except HTTPException:
raise
except Exception as e:
logger.error(f"patch_folder_scope error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.patch("/folders/{folderId}/neutralize")
@limiter.limit("30/minute")
def patch_folder_neutralize(
request: Request,
folderId: str = Path(...),
body: Dict[str, Any] = Body(...),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
try:
neutralize = body.get("neutralize")
if neutralize is None:
raise HTTPException(status_code=400, detail="neutralize is required")
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
return managementInterface.patchFolderNeutralize(folderId, bool(neutralize))
except PermissionError as e:
raise HTTPException(status_code=403, detail=str(e))
except interfaceDbManagement.FileNotFoundError as e:
raise HTTPException(status_code=404, detail=str(e))
except HTTPException:
raise
except Exception as e:
logger.error(f"patch_folder_neutralize error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/list") @router.get("/list")
@limiter.limit("120/minute") @limiter.limit("120/minute")
def get_files( def get_files(
@ -289,9 +501,10 @@ def get_files(
from modules.routes.routeHelpers import ( from modules.routes.routeHelpers import (
handleIdsMode, handleIdsMode,
handleFilterValuesInMemory, handleFilterValuesInMemory,
handleGroupingInRequest, applyGroupScopeFilter, resolveView, applyViewToParams, buildGroupLayout, effective_group_by_levels,
) )
import modules.interfaces.interfaceDbApp as _appIface import modules.interfaces.interfaceDbApp as _appIface
from modules.datamodels.datamodelPagination import AppliedViewMeta
managementInterface = interfaceDbManagement.getInterface( managementInterface = interfaceDbManagement.getInterface(
currentUser, currentUser,
@ -299,11 +512,40 @@ def get_files(
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
) )
appInterface = _appIface.getInterface(currentUser) appInterface = _appIface.getInterface(currentUser)
groupCtx = handleGroupingInRequest(paginationParams, appInterface, "files/list")
# Resolve view and merge config into params
viewKey = paginationParams.viewKey if paginationParams else None
viewConfig, viewDisplayName = resolveView(appInterface, "files/list", viewKey)
viewMeta = AppliedViewMeta(viewKey=viewKey, displayName=viewDisplayName) if viewKey else None
paginationParams = applyViewToParams(paginationParams, viewConfig)
groupByLevels = effective_group_by_levels(paginationParams, viewConfig)
def _filesToDicts(fileItems): def _filesToDicts(fileItems):
return [f.model_dump() if hasattr(f, "model_dump") else (dict(f) if not isinstance(f, dict) else f) for f in fileItems] return [f.model_dump() if hasattr(f, "model_dump") else (dict(f) if not isinstance(f, dict) else f) for f in fileItems]
if mode == "groupSummary":
if not pagination:
raise HTTPException(status_code=400, detail="pagination required for groupSummary")
from modules.routes.routeHelpers import (
apply_strategy_b_filters_and_sort,
build_group_summary_groups,
)
if not groupByLevels or not groupByLevels[0].get("field"):
raise HTTPException(
status_code=400,
detail="groupByLevels[0].field required for groupSummary",
)
field = groupByLevels[0]["field"]
null_label = str(groupByLevels[0].get("nullLabel") or "")
allFiles = managementInterface.getAllFiles()
allItems = enrichRowsWithFkLabels(
_filesToDicts(allFiles if isinstance(allFiles, list) else (allFiles.items if hasattr(allFiles, "items") else [])),
FileItem,
)
filtered = apply_strategy_b_filters_and_sort(allItems, paginationParams, currentUser)
groups_out = build_group_summary_groups(filtered, field, null_label)
return JSONResponse(content={"groups": groups_out})
if mode == "filterValues": if mode == "filterValues":
if not column: if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
@ -311,33 +553,72 @@ def get_files(
items = allFiles if isinstance(allFiles, list) else (allFiles.items if hasattr(allFiles, "items") else []) items = allFiles if isinstance(allFiles, list) else (allFiles.items if hasattr(allFiles, "items") else [])
itemDicts = _filesToDicts(items) itemDicts = _filesToDicts(items)
enrichRowsWithFkLabels(itemDicts, FileItem) enrichRowsWithFkLabels(itemDicts, FileItem)
itemDicts = applyGroupScopeFilter(itemDicts, groupCtx.itemIds)
return handleFilterValuesInMemory(itemDicts, column, pagination) return handleFilterValuesInMemory(itemDicts, column, pagination)
if mode == "ids": if mode == "ids":
recordFilter = {"sysCreatedBy": managementInterface.userId} recordFilter = {"sysCreatedBy": managementInterface.userId}
return handleIdsMode(managementInterface.db, FileItem, pagination, recordFilter) return handleIdsMode(managementInterface.db, FileItem, pagination, recordFilter)
result = managementInterface.getAllFiles(pagination=paginationParams) if not groupByLevels:
# No grouping: let DB handle pagination directly (fastest path)
result = managementInterface.getAllFiles(pagination=paginationParams)
if paginationParams and hasattr(result, 'items'):
enriched = enrichRowsWithFkLabels(_filesToDicts(result.items), FileItem)
resp: dict = {
"items": enriched,
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
).model_dump(),
}
else:
items = result if isinstance(result, list) else (result.items if hasattr(result, "items") else [result])
resp = {"items": enrichRowsWithFkLabels(_filesToDicts(items), FileItem), "pagination": None}
if viewMeta:
resp["appliedView"] = viewMeta.model_dump()
return resp
if paginationParams: # Strategy B grouping: load full list, group, then slice
enriched = applyGroupScopeFilter(enrichRowsWithFkLabels(_filesToDicts(result.items), FileItem), groupCtx.itemIds) allFiles = managementInterface.getAllFiles()
return { allItems = enrichRowsWithFkLabels(
"items": enriched, _filesToDicts(allFiles if isinstance(allFiles, list) else (allFiles.items if hasattr(allFiles, "items") else [])),
"pagination": PaginationMetadata( FileItem,
currentPage=paginationParams.page, )
pageSize=paginationParams.pageSize,
totalItems=result.totalItems, from modules.routes.routeHelpers import apply_strategy_b_filters_and_sort
totalPages=result.totalPages, if paginationParams.filters or paginationParams.sort:
sort=paginationParams.sort, allItems = apply_strategy_b_filters_and_sort(allItems, paginationParams, currentUser)
filters=paginationParams.filters
).model_dump(), if not paginationParams:
"groupTree": groupCtx.groupTree, resp = {"items": allItems, "pagination": None}
} if viewMeta:
else: resp["appliedView"] = viewMeta.model_dump()
items = result if isinstance(result, list) else (result.items if hasattr(result, "items") else [result]) return resp
enriched = applyGroupScopeFilter(enrichRowsWithFkLabels(_filesToDicts(items), FileItem), groupCtx.itemIds)
return {"items": enriched, "pagination": None, "groupTree": groupCtx.groupTree} totalItems = len(allItems)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
page_items, groupLayout = buildGroupLayout(allItems, groupByLevels, paginationParams.page, paginationParams.pageSize)
resp = {
"items": page_items,
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
).model_dump(),
}
if groupLayout:
resp["groupLayout"] = groupLayout.model_dump()
if viewMeta:
resp["appliedView"] = viewMeta.model_dump()
return resp
except HTTPException: except HTTPException:
raise raise
except Exception as e: except Exception as e:
@ -348,34 +629,11 @@ def get_files(
) )
def _addFileToGroup(appInterface, fileId: str, groupId: str, contextKey: str = "files/list"): def _LEGACY_addFileToGroup_REMOVED():
"""Add a file to a group in the persisted groupTree (upsert).""" """Removed — file-group tree no longer exists. Use multi-select bulk operations."""
from modules.routes.routeHelpers import _collectItemIds pass
try:
existing = appInterface.getTableGrouping(contextKey)
if not existing:
return
nodes = [n.model_dump() if hasattr(n, 'model_dump') else n for n in existing.rootGroups]
def _add(nds):
for nd in nds:
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
if nid == groupId:
itemIds = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
if fileId not in itemIds:
itemIds.append(fileId)
if isinstance(nd, dict):
nd["itemIds"] = itemIds
else:
nd.itemIds = itemIds
return True
subs = nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", [])
if _add(subs):
return True
return False
_add(nodes)
appInterface.upsertTableGrouping(contextKey, nodes)
except Exception as e:
logger.warning(f"_addFileToGroup failed: {e}")
@router.post("/upload", status_code=status.HTTP_201_CREATED) @router.post("/upload", status_code=status.HTTP_201_CREATED)
@ -385,7 +643,6 @@ async def upload_file(
file: UploadFile = File(...), file: UploadFile = File(...),
workflowId: Optional[str] = Form(None), workflowId: Optional[str] = Form(None),
featureInstanceId: Optional[str] = Form(None), featureInstanceId: Optional[str] = Form(None),
groupId: Optional[str] = Form(None),
currentUser: User = Depends(getCurrentUser), currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
) -> JSONResponse: ) -> JSONResponse:
@ -419,12 +676,6 @@ async def upload_file(
managementInterface.updateFile(fileItem.id, {"featureInstanceId": featureInstanceId}) managementInterface.updateFile(fileItem.id, {"featureInstanceId": featureInstanceId})
fileItem.featureInstanceId = featureInstanceId fileItem.featureInstanceId = featureInstanceId
# Add to group if groupId was provided
if groupId:
import modules.interfaces.interfaceDbApp as _appIface
appInterface = _appIface.getInterface(currentUser)
_addFileToGroup(appInterface, fileItem.id, groupId)
# Determine response message based on duplicate type # Determine response message based on duplicate type
if duplicateType == "exact_duplicate": if duplicateType == "exact_duplicate":
message = f"File '{file.filename}' already exists with identical content. Reusing existing file." message = f"File '{file.filename}' already exists with identical content. Reusing existing file."
@ -462,6 +713,8 @@ async def upload_file(
fileName=fileItem.fileName, fileName=fileItem.fileName,
mimeType=fileItem.mimeType, mimeType=fileItem.mimeType,
user=currentUser, user=currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)) ))
except Exception as indexErr: except Exception as indexErr:
logger.warning(f"Auto-index trigger failed (non-blocking): {indexErr}") logger.warning(f"Auto-index trigger failed (non-blocking): {indexErr}")
@ -526,82 +779,172 @@ def batch_delete_items(
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
# ── Group bulk endpoints ────────────────────────────────────────────────────── @router.post("/batch-download")
@limiter.limit("10/minute")
def _get_group_item_ids(contextKey: str, groupId: str, appInterface) -> set: def batchDownload(
"""Collect all file IDs in a group and its sub-groups from the stored groupTree.""" request: Request,
from modules.routes.routeHelpers import _collectItemIds body: Dict[str, Any] = Body(...),
try: currentUser: User = Depends(getCurrentUser),
existing = appInterface.getTableGrouping(contextKey) context: RequestContext = Depends(getRequestContext),
if not existing: ):
return set() """Download multiple files and/or folders as a single ZIP archive,
nodes = [n.model_dump() if hasattr(n, 'model_dump') else n for n in existing.rootGroups] preserving the folder hierarchy as ZIP paths."""
result = _collectItemIds(nodes, groupId) import io, zipfile
return result or set()
except Exception as e: fileIds = body.get("fileIds") or []
logger.error(f"_get_group_item_ids failed for groupId={groupId}: {e}") folderIds = body.get("folderIds") or []
return set()
if not fileIds and not folderIds:
raise HTTPException(status_code=400, detail="fileIds or folderIds required")
@router.patch("/groups/{groupId}/scope")
@limiter.limit("60/minute") try:
def patch_group_scope( mgmt = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
folderCache: dict[str, dict] = {}
def _getFolder(fid: str):
if fid not in folderCache:
f = mgmt.getFolder(fid)
folderCache[fid] = f if f else {}
return folderCache[fid]
def _folderPath(fid: str) -> str:
"""Build the full path for a folder by walking up parentId."""
parts: list[str] = []
current = fid
visited: set[str] = set()
while current and current not in visited:
visited.add(current)
folder = _getFolder(current)
if not folder:
break
parts.append(folder.get("name", current))
current = folder.get("parentId")
parts.reverse()
return "/".join(parts)
# Collect files from requested folders (recursive)
fileEntries: list[tuple[str, str]] = []
seenFileIds: set[str] = set()
for fid in folderIds:
childFolderIds = mgmt._collectChildFolderIds(fid)
for cfid in childFolderIds:
prefix = _folderPath(cfid)
items = mgmt.db.getRecordset(FileItem, recordFilter={"folderId": cfid})
for item in items:
itemId = item.get("id") if isinstance(item, dict) else getattr(item, "id", None)
if itemId and itemId not in seenFileIds:
seenFileIds.add(itemId)
fileEntries.append((itemId, prefix))
# Loose files (not via folder selection)
for fid in fileIds:
if fid in seenFileIds:
continue
seenFileIds.add(fid)
fileMeta = mgmt.getFile(fid)
if not fileMeta:
continue
fileFolderId = fileMeta.get("folderId") if isinstance(fileMeta, dict) else getattr(fileMeta, "folderId", None)
prefix = _folderPath(fileFolderId) if fileFolderId else ""
fileEntries.append((fid, prefix))
if not fileEntries:
raise HTTPException(status_code=404, detail="No downloadable files found")
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
for fid, prefix in fileEntries:
try:
fileMeta = mgmt.getFile(fid)
fileData = mgmt.getFileData(fid)
if fileMeta and fileData:
name = (fileMeta.get("fileName") if isinstance(fileMeta, dict) else getattr(fileMeta, "fileName", fid)) or fid
zipPath = f"{prefix}/{name}" if prefix else name
zf.writestr(zipPath, fileData)
except Exception as fe:
logger.warning(f"batch_download: skipping file {fid}: {fe}")
buf.seek(0)
from fastapi.responses import StreamingResponse
return StreamingResponse(
buf,
media_type="application/zip",
headers={"Content-Disposition": 'attachment; filename="download.zip"'},
)
except HTTPException:
raise
except Exception as e:
logger.error(f"batch_download error: {e}")
raise HTTPException(status_code=500, detail=str(e))
# ── Bulk file operations (replace former group-based bulk routes) ─────────────
@router.post("/bulk/scope")
@limiter.limit("30/minute")
def bulk_set_scope(
request: Request, request: Request,
groupId: str = Path(..., description="Group ID"),
body: dict = Body(...), body: dict = Body(...),
currentUser: User = Depends(getCurrentUser), currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
): ):
"""Set scope for all files in a group (recursive).""" """Set scope for a list of files by their IDs."""
scope = body.get("scope") fileIds: list = body.get("fileIds") or []
if not scope: scope: str = body.get("scope") or ""
raise HTTPException(status_code=400, detail="scope is required") if not fileIds:
raise HTTPException(status_code=400, detail="fileIds is required")
validScopes = {"personal", "featureInstance", "mandate", "global"}
if scope not in validScopes:
raise HTTPException(status_code=400, detail=f"Invalid scope. Must be one of {validScopes}")
if scope == "global" and not context.isSysAdmin:
raise HTTPException(status_code=403, detail="Only sysadmins can set global scope")
try: try:
import modules.interfaces.interfaceDbApp as _appIface
managementInterface = interfaceDbManagement.getInterface( managementInterface = interfaceDbManagement.getInterface(
currentUser, currentUser,
mandateId=str(context.mandateId) if context.mandateId else None, mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
) )
appInterface = _appIface.getInterface(currentUser)
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
updated = 0 updated = 0
for fid in fileIds: for fid in fileIds:
try: try:
managementInterface.updateFile(fid, {"scope": scope}) managementInterface.updateFile(fid, {"scope": scope})
updated += 1 updated += 1
except Exception as e: except Exception as e:
logger.error(f"patch_group_scope: failed to update file {fid}: {e}") logger.error(f"bulk_set_scope: failed for file {fid}: {e}")
return {"groupId": groupId, "scope": scope, "filesUpdated": updated} return {"scope": scope, "filesUpdated": updated}
except HTTPException: except HTTPException:
raise raise
except Exception as e: except Exception as e:
logger.error(f"patch_group_scope error: {e}") logger.error(f"bulk_set_scope error: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@router.patch("/groups/{groupId}/neutralize") @router.post("/bulk/neutralize")
@limiter.limit("60/minute") @limiter.limit("30/minute")
def patch_group_neutralize( def bulk_set_neutralize(
request: Request, request: Request,
groupId: str = Path(..., description="Group ID"),
body: dict = Body(...), body: dict = Body(...),
currentUser: User = Depends(getCurrentUser), currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
): ):
"""Toggle neutralize for all files in a group (recursive, incl. knowledge purge/reindex).""" """Set neutralize flag for a list of files by their IDs (incl. knowledge purge/reindex)."""
fileIds: list = body.get("fileIds") or []
neutralize = body.get("neutralize") neutralize = body.get("neutralize")
if not fileIds:
raise HTTPException(status_code=400, detail="fileIds is required")
if neutralize is None: if neutralize is None:
raise HTTPException(status_code=400, detail="neutralize is required") raise HTTPException(status_code=400, detail="neutralize is required")
try: try:
import modules.interfaces.interfaceDbApp as _appIface
managementInterface = interfaceDbManagement.getInterface( managementInterface = interfaceDbManagement.getInterface(
currentUser, currentUser,
mandateId=str(context.mandateId) if context.mandateId else None, mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
) )
appInterface = _appIface.getInterface(currentUser)
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
updated = 0 updated = 0
for fid in fileIds: for fid in fileIds:
try: try:
@ -612,39 +955,37 @@ def patch_group_neutralize(
kIface = interfaceDbKnowledge.getInterface(currentUser) kIface = interfaceDbKnowledge.getInterface(currentUser)
kIface.purgeFileKnowledge(fid) kIface.purgeFileKnowledge(fid)
except Exception as ke: except Exception as ke:
logger.warning(f"patch_group_neutralize: knowledge purge failed for {fid}: {ke}") logger.warning(f"bulk_set_neutralize: knowledge purge failed for {fid}: {ke}")
updated += 1 updated += 1
except Exception as e: except Exception as e:
logger.error(f"patch_group_neutralize: failed for file {fid}: {e}") logger.error(f"bulk_set_neutralize: failed for file {fid}: {e}")
return {"groupId": groupId, "neutralize": neutralize, "filesUpdated": updated} return {"neutralize": neutralize, "filesUpdated": updated}
except HTTPException: except HTTPException:
raise raise
except Exception as e: except Exception as e:
logger.error(f"patch_group_neutralize error: {e}") logger.error(f"bulk_set_neutralize error: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@router.get("/groups/{groupId}/download") @router.post("/bulk/download-zip")
@limiter.limit("20/minute") @limiter.limit("10/minute")
async def download_group_zip( async def bulk_download_zip(
request: Request, request: Request,
groupId: str = Path(..., description="Group ID"), body: dict = Body(...),
currentUser: User = Depends(getCurrentUser), currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
): ):
"""Download all files in a group as a ZIP archive.""" """Download a list of files as a ZIP archive."""
import io, zipfile import io, zipfile
fileIds: list = body.get("fileIds") or []
if not fileIds:
raise HTTPException(status_code=400, detail="fileIds is required")
try: try:
import modules.interfaces.interfaceDbApp as _appIface
managementInterface = interfaceDbManagement.getInterface( managementInterface = interfaceDbManagement.getInterface(
currentUser, currentUser,
mandateId=str(context.mandateId) if context.mandateId else None, mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
) )
appInterface = _appIface.getInterface(currentUser)
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
if not fileIds:
raise HTTPException(status_code=404, detail="Group not found or empty")
buf = io.BytesIO() buf = io.BytesIO()
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf: with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
for fid in fileIds: for fid in fileIds:
@ -652,63 +993,21 @@ async def download_group_zip(
fileMeta = managementInterface.getFile(fid) fileMeta = managementInterface.getFile(fid)
fileData = managementInterface.getFileData(fid) fileData = managementInterface.getFileData(fid)
if fileMeta and fileData: if fileMeta and fileData:
name = (fileMeta.get("fileName") if isinstance(fileMeta, dict) else getattr(fileMeta, "fileName", fid)) or fid name = (getattr(fileMeta, "fileName", None) or fid)
zf.writestr(name, fileData) zf.writestr(name, fileData)
except Exception as fe: except Exception as fe:
logger.warning(f"download_group_zip: skipping file {fid}: {fe}") logger.warning(f"bulk_download_zip: skipping file {fid}: {fe}")
buf.seek(0) buf.seek(0)
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
return StreamingResponse( return StreamingResponse(
buf, buf,
media_type="application/zip", media_type="application/zip",
headers={"Content-Disposition": f'attachment; filename="group-{groupId}.zip"'}, headers={"Content-Disposition": 'attachment; filename="files.zip"'},
) )
except HTTPException: except HTTPException:
raise raise
except Exception as e: except Exception as e:
logger.error(f"download_group_zip error: {e}") logger.error(f"bulk_download_zip error: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/groups/{groupId}")
@limiter.limit("30/minute")
def delete_group(
request: Request,
groupId: str = Path(..., description="Group ID"),
deleteItems: bool = Query(False, description="If true, also delete all files in the group"),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext),
):
"""Remove a group from the groupTree. Optionally delete all its files."""
try:
import modules.interfaces.interfaceDbApp as _appIface
appInterface = _appIface.getInterface(currentUser)
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
# Remove group from tree
existing = appInterface.getTableGrouping("files/list")
if existing:
from modules.routes.routeHelpers import _removeGroupFromTree
newRoots = _removeGroupFromTree([n.model_dump() if hasattr(n, 'model_dump') else n for n in existing.rootGroups], groupId)
appInterface.upsertTableGrouping("files/list", newRoots)
# Optionally delete files
deletedFiles = 0
if deleteItems:
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
for fid in fileIds:
try:
managementInterface.deleteFile(fid)
deletedFiles += 1
except Exception as e:
logger.error(f"delete_group: failed to delete file {fid}: {e}")
return {"groupId": groupId, "deletedFiles": deletedFiles}
except HTTPException:
raise
except Exception as e:
logger.error(f"delete_group error: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@ -759,7 +1058,11 @@ def updateFileScope(
async def _runReindexAfterScopeChange(): async def _runReindexAfterScopeChange():
try: try:
await _autoIndexFile(fileId=fileId, fileName=fn, mimeType=mt, user=context.user) await _autoIndexFile(
fileId=fileId, fileName=fn, mimeType=mt, user=context.user,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
except Exception as ex: except Exception as ex:
logger.warning("Re-index after scope change failed for %s: %s", fileId, ex) logger.warning("Re-index after scope change failed for %s: %s", fileId, ex)
@ -837,7 +1140,11 @@ def updateFileNeutralize(
async def _runReindexAfterNeutralizeToggle(): async def _runReindexAfterNeutralizeToggle():
try: try:
await _autoIndexFile(fileId=fileId, fileName=fn, mimeType=mt, user=context.user) await _autoIndexFile(
fileId=fileId, fileName=fn, mimeType=mt, user=context.user,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
except Exception as ex: except Exception as ex:
logger.error("Re-index after neutralize toggle failed for %s: %s (file has NO index until next re-index)", fileId, ex) logger.error("Re-index after neutralize toggle failed for %s: %s (file has NO index until next re-index)", fileId, ex)
@ -909,7 +1216,7 @@ def update_file(
) -> FileItem: ) -> FileItem:
"""Update file info""" """Update file info"""
try: try:
_EDITABLE_FIELDS = {"fileName", "scope", "tags", "description", "neutralize"} _EDITABLE_FIELDS = {"fileName", "folderId", "scope", "tags", "description", "neutralize"}
safeData = {k: v for k, v in file_info.items() if k in _EDITABLE_FIELDS} safeData = {k: v for k, v in file_info.items() if k in _EDITABLE_FIELDS}
if not safeData: if not safeData:
raise HTTPException(status_code=400, detail=routeApiMsg("No editable fields provided")) raise HTTPException(status_code=400, detail=routeApiMsg("No editable fields provided"))

View file

@ -131,11 +131,9 @@ def get_mandates(
handleFilterValuesInMemory, handleIdsInMemory, handleFilterValuesInMemory, handleIdsInMemory,
handleFilterValuesMode, handleIdsMode, handleFilterValuesMode, handleIdsMode,
parseCrossFilterPagination, parseCrossFilterPagination,
handleGroupingInRequest, applyGroupScopeFilter,
) )
appInterface = interfaceDbApp.getRootInterface() appInterface = interfaceDbApp.getRootInterface()
groupCtx = handleGroupingInRequest(paginationParams, appInterface, "mandates")
def _mandateItemsForAdmin(): def _mandateItemsForAdmin():
items = [] items = []
@ -154,23 +152,18 @@ def get_mandates(
values = appInterface.db.getDistinctColumnValues(Mandate, column, crossPagination) values = appInterface.db.getDistinctColumnValues(Mandate, column, crossPagination)
return JSONResponse(content=sorted(values, key=lambda v: str(v).lower())) return JSONResponse(content=sorted(values, key=lambda v: str(v).lower()))
else: else:
mandateItems = applyGroupScopeFilter(_mandateItemsForAdmin(), groupCtx.itemIds) return handleFilterValuesInMemory(_mandateItemsForAdmin(), column, pagination)
return handleFilterValuesInMemory(mandateItems, column, pagination)
if mode == "ids": if mode == "ids":
if isPlatformAdmin: if isPlatformAdmin:
return handleIdsMode(appInterface.db, Mandate, pagination) return handleIdsMode(appInterface.db, Mandate, pagination)
else: else:
mandateItems = applyGroupScopeFilter(_mandateItemsForAdmin(), groupCtx.itemIds) return handleIdsInMemory(_mandateItemsForAdmin(), pagination)
return handleIdsInMemory(mandateItems, pagination)
if isPlatformAdmin: if isPlatformAdmin:
result = appInterface.getAllMandates(pagination=paginationParams) result = appInterface.getAllMandates(pagination=paginationParams)
items = result.items if hasattr(result, 'items') else (result if isinstance(result, list) else []) items = result.items if hasattr(result, 'items') else (result if isinstance(result, list) else [])
items = applyGroupScopeFilter( items = [i.model_dump() if hasattr(i, 'model_dump') else (i if isinstance(i, dict) else vars(i)) for i in items]
[i.model_dump() if hasattr(i, 'model_dump') else (i if isinstance(i, dict) else vars(i)) for i in items],
groupCtx.itemIds,
)
if paginationParams and hasattr(result, 'items'): if paginationParams and hasattr(result, 'items'):
return PaginatedResponse( return PaginatedResponse(
items=items, items=items,
@ -182,13 +175,11 @@ def get_mandates(
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters filters=paginationParams.filters
), ),
groupTree=groupCtx.groupTree,
) )
else: else:
return PaginatedResponse(items=items, pagination=None, groupTree=groupCtx.groupTree) return PaginatedResponse(items=items, pagination=None)
else: else:
mandateItems = applyGroupScopeFilter(_mandateItemsForAdmin(), groupCtx.itemIds) return PaginatedResponse(items=_mandateItemsForAdmin(), pagination=None)
return PaginatedResponse(items=mandateItems, pagination=None, groupTree=groupCtx.groupTree)
except HTTPException: except HTTPException:
raise raise

View file

@ -3,8 +3,10 @@
from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Query from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Query
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
from fastapi import status from fastapi import status
from fastapi.responses import JSONResponse
import logging import logging
import json import json
import math
# Import auth module # Import auth module
from modules.auth import limiter, getCurrentUser from modules.auth import limiter, getCurrentUser
@ -46,13 +48,13 @@ def get_prompts(
""" """
from modules.routes.routeHelpers import ( from modules.routes.routeHelpers import (
handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels, handleFilterValuesInMemory, handleIdsInMemory, enrichRowsWithFkLabels,
handleGroupingInRequest, applyGroupScopeFilter, resolveView, applyViewToParams, buildGroupLayout, effective_group_by_levels,
) )
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
from modules.datamodels.datamodelPagination import AppliedViewMeta
CONTEXT_KEY = "prompts" CONTEXT_KEY = "prompts"
# Parse pagination params early — needed for grouping in all modes
paginationParams = None paginationParams = None
if pagination: if pagination:
try: try:
@ -64,7 +66,13 @@ def get_prompts(
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
appInterface = getAppInterface(currentUser) appInterface = getAppInterface(currentUser)
groupCtx = handleGroupingInRequest(paginationParams, appInterface, CONTEXT_KEY)
# Resolve view and merge config into params
viewKey = paginationParams.viewKey if paginationParams else None
viewConfig, viewDisplayName = resolveView(appInterface, CONTEXT_KEY, viewKey)
viewMeta = AppliedViewMeta(viewKey=viewKey, displayName=viewDisplayName) if viewKey else None
paginationParams = applyViewToParams(paginationParams, viewConfig)
groupByLevels = effective_group_by_levels(paginationParams, viewConfig)
def _promptsToEnrichedDicts(promptItems): def _promptsToEnrichedDicts(promptItems):
dicts = [r.model_dump() if hasattr(r, 'model_dump') else (dict(r) if not isinstance(r, dict) else r) for r in promptItems] dicts = [r.model_dump() if hasattr(r, 'model_dump') else (dict(r) if not isinstance(r, dict) else r) for r in promptItems]
@ -73,43 +81,98 @@ def get_prompts(
managementInterface = interfaceDbManagement.getInterface(currentUser) managementInterface = interfaceDbManagement.getInterface(currentUser)
if mode == "groupSummary":
if not pagination:
raise HTTPException(status_code=400, detail="pagination required for groupSummary")
from modules.routes.routeHelpers import (
apply_strategy_b_filters_and_sort,
build_group_summary_groups,
)
if not groupByLevels or not groupByLevels[0].get("field"):
raise HTTPException(
status_code=400,
detail="groupByLevels[0].field required for groupSummary",
)
field = groupByLevels[0]["field"]
null_label = str(groupByLevels[0].get("nullLabel") or "")
result = managementInterface.getAllPrompts(pagination=None)
allItems = _promptsToEnrichedDicts(
result if isinstance(result, list) else (result.items if hasattr(result, "items") else [])
)
filtered = apply_strategy_b_filters_and_sort(allItems, paginationParams, currentUser)
groups_out = build_group_summary_groups(filtered, field, null_label)
return JSONResponse(content={"groups": groups_out})
if mode == "filterValues": if mode == "filterValues":
if not column: if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
result = managementInterface.getAllPrompts(pagination=None) result = managementInterface.getAllPrompts(pagination=None)
items = _promptsToEnrichedDicts(result) return handleFilterValuesInMemory(_promptsToEnrichedDicts(result), column, pagination)
items = applyGroupScopeFilter(items, groupCtx.itemIds)
return handleFilterValuesInMemory(items, column, pagination)
if mode == "ids": if mode == "ids":
result = managementInterface.getAllPrompts(pagination=None) result = managementInterface.getAllPrompts(pagination=None)
items = _promptsToEnrichedDicts(result) return handleIdsInMemory(_promptsToEnrichedDicts(result), pagination)
items = applyGroupScopeFilter(items, groupCtx.itemIds)
return handleIdsInMemory(items, pagination)
result = managementInterface.getAllPrompts(pagination=paginationParams) if not groupByLevels:
# No grouping: let DB handle pagination directly
result = managementInterface.getAllPrompts(pagination=paginationParams)
if paginationParams and hasattr(result, 'items'):
response: dict = {
"items": _promptsToEnrichedDicts(result.items),
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
).model_dump(),
}
else:
response = {"items": _promptsToEnrichedDicts(result if isinstance(result, list) else [result]), "pagination": None}
if viewMeta:
response["appliedView"] = viewMeta.model_dump()
return response
if paginationParams: # Strategy B grouping: load all, filter+sort in-memory, group, then slice
items = applyGroupScopeFilter(_promptsToEnrichedDicts(result.items), groupCtx.itemIds) result = managementInterface.getAllPrompts(pagination=None)
return { allItems = _promptsToEnrichedDicts(result if isinstance(result, list) else (result.items if hasattr(result, 'items') else []))
"items": items,
"pagination": PaginationMetadata( if not paginationParams:
currentPage=paginationParams.page, response = {"items": allItems, "pagination": None}
pageSize=paginationParams.pageSize, if viewMeta:
totalItems=result.totalItems, response["appliedView"] = viewMeta.model_dump()
totalPages=result.totalPages, return response
sort=paginationParams.sort,
filters=paginationParams.filters if paginationParams.filters or paginationParams.sort:
).model_dump(), from modules.interfaces.interfaceDbManagement import ComponentObjects
"groupTree": groupCtx.groupTree, comp = ComponentObjects()
} comp.setUserContext(currentUser)
else: if paginationParams.filters:
items = applyGroupScopeFilter(_promptsToEnrichedDicts(result), groupCtx.itemIds) allItems = comp._applyFilters(allItems, paginationParams.filters)
return { if paginationParams.sort:
"items": items, allItems = comp._applySorting(allItems, paginationParams.sort)
"pagination": None,
"groupTree": groupCtx.groupTree, totalItems = len(allItems)
} totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
page_items, groupLayout = buildGroupLayout(allItems, groupByLevels, paginationParams.page, paginationParams.pageSize)
response = {
"items": page_items,
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
).model_dump(),
}
if groupLayout:
response["groupLayout"] = groupLayout.model_dump()
if viewMeta:
response["appliedView"] = viewMeta.model_dump()
return response
@router.post("", response_model=Prompt) @router.post("", response_model=Prompt)

View file

@ -208,7 +208,6 @@ def get_users(
- GET /api/users/ (no pagination - returns all users in mandate) - GET /api/users/ (no pagination - returns all users in mandate)
- GET /api/users/?pagination={"page":1,"pageSize":10,"sort":[]} - GET /api/users/?pagination={"page":1,"pageSize":10,"sort":[]}
""" """
# Parse pagination early — needed for grouping in all modes
_paginationParams = None _paginationParams = None
if pagination: if pagination:
try: try:
@ -219,10 +218,6 @@ def get_users(
except (json.JSONDecodeError, ValueError) as e: except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
from modules.routes.routeHelpers import handleGroupingInRequest as _handleGrouping, applyGroupScopeFilter as _applyGroupScope
_appInterfaceForGrouping = interfaceDbApp.getInterface(context.user, mandateId=context.mandateId)
_groupCtx = _handleGrouping(_paginationParams, _appInterfaceForGrouping, "users")
if mode == "filterValues": if mode == "filterValues":
if not column: if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues") raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
@ -233,14 +228,12 @@ def get_users(
try: try:
paginationParams = _paginationParams paginationParams = _paginationParams
appInterface = _appInterfaceForGrouping appInterface = interfaceDbApp.getInterface(context.user, mandateId=context.mandateId)
if context.mandateId: if context.mandateId:
# Get users for specific mandate using getUsersByMandate
result = appInterface.getUsersByMandate(str(context.mandateId), paginationParams) result = appInterface.getUsersByMandate(str(context.mandateId), paginationParams)
if paginationParams and hasattr(result, 'items'): if paginationParams and hasattr(result, 'items'):
enriched = _applyGroupScope(enrichRowsWithFkLabels(_usersToDicts(result.items), User), _groupCtx.itemIds) enriched = enrichRowsWithFkLabels(_usersToDicts(result.items), User)
return { return {
"items": enriched, "items": enriched,
"pagination": PaginationMetadata( "pagination": PaginationMetadata(
@ -251,18 +244,14 @@ def get_users(
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters filters=paginationParams.filters
).model_dump(), ).model_dump(),
"groupTree": _groupCtx.groupTree,
} }
else: else:
users = result if isinstance(result, list) else result.items if hasattr(result, 'items') else [] users = result if isinstance(result, list) else result.items if hasattr(result, 'items') else []
enriched = _applyGroupScope(enrichRowsWithFkLabels(_usersToDicts(users), User), _groupCtx.itemIds) return {"items": enrichRowsWithFkLabels(_usersToDicts(users), User), "pagination": None}
return {"items": enriched, "pagination": None, "groupTree": _groupCtx.groupTree}
elif context.isPlatformAdmin: elif context.isPlatformAdmin:
# PlatformAdmin without mandateId — DB-level pagination via interface
result = appInterface.getAllUsers(paginationParams) result = appInterface.getAllUsers(paginationParams)
if paginationParams and hasattr(result, 'items'): if paginationParams and hasattr(result, 'items'):
enriched = _applyGroupScope(enrichRowsWithFkLabels(_usersToDicts(result.items), User), _groupCtx.itemIds) enriched = enrichRowsWithFkLabels(_usersToDicts(result.items), User)
return { return {
"items": enriched, "items": enriched,
"pagination": PaginationMetadata( "pagination": PaginationMetadata(
@ -273,18 +262,13 @@ def get_users(
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters filters=paginationParams.filters
).model_dump(), ).model_dump(),
"groupTree": _groupCtx.groupTree,
} }
else: else:
users = result if isinstance(result, list) else (result.items if hasattr(result, 'items') else []) users = result if isinstance(result, list) else (result.items if hasattr(result, 'items') else [])
enriched = _applyGroupScope(enrichRowsWithFkLabels(_usersToDicts(users), User), _groupCtx.itemIds) return {"items": enrichRowsWithFkLabels(_usersToDicts(users), User), "pagination": None}
return {"items": enriched, "pagination": None, "groupTree": _groupCtx.groupTree}
else: else:
# Non-SysAdmin without mandateId: aggregate users across all admin mandates
rootInterface = getRootInterface() rootInterface = getRootInterface()
userMandates = rootInterface.getUserMandates(str(context.user.id)) userMandates = rootInterface.getUserMandates(str(context.user.id))
# Find mandates where user has admin role
adminMandateIds = [] adminMandateIds = []
for um in userMandates: for um in userMandates:
umId = getattr(um, 'id', None) umId = getattr(um, 'id', None)
@ -299,10 +283,7 @@ def get_users(
break break
if not adminMandateIds: if not adminMandateIds:
raise HTTPException( raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("No admin access to any mandate"))
status_code=status.HTTP_403_FORBIDDEN,
detail=routeApiMsg("No admin access to any mandate")
)
from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel
allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds}) allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds})
@ -312,13 +293,10 @@ def get_users(
if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None)) if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
}) })
batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {} batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {}
allUsers = [ allUsers = [u.model_dump() if hasattr(u, 'model_dump') else vars(u) for u in batchUsers.values()]
u.model_dump() if hasattr(u, 'model_dump') else vars(u)
for u in batchUsers.values()
]
from modules.routes.routeHelpers import applyFiltersAndSort as _applyFiltersAndSortHelper from modules.routes.routeHelpers import applyFiltersAndSort as _applyFiltersAndSortHelper
filteredUsers = _applyGroupScope(_applyFiltersAndSortHelper(allUsers, paginationParams), _groupCtx.itemIds) filteredUsers = _applyFiltersAndSortHelper(allUsers, paginationParams)
enriched = enrichRowsWithFkLabels(filteredUsers, User) enriched = enrichRowsWithFkLabels(filteredUsers, User)
if paginationParams: if paginationParams:
@ -327,7 +305,6 @@ def get_users(
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize endIdx = startIdx + paginationParams.pageSize
return { return {
"items": enriched[startIdx:endIdx], "items": enriched[startIdx:endIdx],
"pagination": PaginationMetadata( "pagination": PaginationMetadata(
@ -338,10 +315,9 @@ def get_users(
sort=paginationParams.sort, sort=paginationParams.sort,
filters=paginationParams.filters filters=paginationParams.filters
).model_dump(), ).model_dump(),
"groupTree": _groupCtx.groupTree,
} }
else: else:
return {"items": enriched, "pagination": None, "groupTree": _groupCtx.groupTree} return {"items": enriched, "pagination": None}
except HTTPException: except HTTPException:
raise raise
except Exception as e: except Exception as e:

View file

@ -704,154 +704,260 @@ def paginateInMemory(
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Table Grouping helpers # View resolution and Strategy B grouping engine
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
from dataclasses import dataclass, field as dc_field def resolveView(interface, contextKey: str, viewKey: Optional[str]):
@dataclass
class GroupingContext:
""" """
Result of handleGroupingInRequest. Load a TableListView for the current user and contextKey.
Carries the group tree for the response and the resolved item-ID set for
group-scope filtering (None = no active group scope). Returns (config_dict, display_name):
- (None, None) when viewKey is None / empty
- (config, str | None) otherwise config may be {}; display_name from the row
Raises HTTPException(404) when viewKey is explicitly set but the view
does not exist (prevents silent fallback to ungrouped behaviour).
""" """
groupTree: Optional[list] # List[TableGroupNode] serialised as dicts — for response from fastapi import HTTPException
itemIds: Optional[set] # Set[str] when groupId was set, else None if not viewKey:
return None, None
try:
view = interface.getTableListView(contextKey=contextKey, viewKey=viewKey)
except Exception as e:
logger.warning(f"resolveView: store lookup failed for key={viewKey!r} context={contextKey!r}: {e}")
view = None
if view is None:
raise HTTPException(status_code=404, detail=f"View '{viewKey}' not found for context '{contextKey}'")
cfg = view.config or {}
dname = getattr(view, "displayName", None) or None
return cfg, dname
def _collectItemIds(nodes: list, groupId: str) -> Optional[set]: def effective_group_by_levels(
pagination_params: Optional["PaginationParams"],
view_config: Optional[dict],
) -> List[Dict[str, Any]]:
""" """
Recursively search *nodes* for a node whose id == groupId and collect Choose grouping levels for this request.
all itemIds from it and all its descendant subGroups.
Returns None if the group is not found. If the client sends ``groupByLevels`` (including ``[]``), it wins over the
saved view. If the key is omitted (``None``), use the view's levels.
""" """
for node in nodes: if pagination_params is not None:
nodeId = node.get("id") if isinstance(node, dict) else getattr(node, "id", None) req = getattr(pagination_params, "groupByLevels", None)
if nodeId == groupId: if req is not None:
ids: set = set() out: List[Dict[str, Any]] = []
_collectAllIds(node, ids) for lvl in req:
return ids if hasattr(lvl, "model_dump"):
subGroups = node.get("subGroups", []) if isinstance(node, dict) else getattr(node, "subGroups", []) out.append(lvl.model_dump())
result = _collectItemIds(subGroups, groupId) elif isinstance(lvl, dict):
if result is not None: out.append(dict(lvl))
return result else:
return None out.append(dict(lvl)) # type: ignore[arg-type]
return out
vc = (view_config or {}).get("groupByLevels") if view_config else None
return list(vc or [])
def _collectAllIds(node, ids: set) -> None: def applyViewToParams(params: Optional["PaginationParams"], viewConfig: Optional[dict]) -> Optional["PaginationParams"]:
"""Collect itemIds from a node and all its descendants into ids."""
nodeItemIds = node.get("itemIds", []) if isinstance(node, dict) else getattr(node, "itemIds", [])
for iid in nodeItemIds:
ids.add(str(iid))
subGroups = node.get("subGroups", []) if isinstance(node, dict) else getattr(node, "subGroups", [])
for child in subGroups:
_collectAllIds(child, ids)
def _removeGroupFromTree(nodes: list, groupId: str) -> list:
"""Remove a group node (and all descendants) from the tree by id."""
result = []
for node in nodes:
nodeId = node.get("id") if isinstance(node, dict) else getattr(node, "id", None)
if nodeId == groupId:
continue # skip this node (remove it)
subGroups = node.get("subGroups", []) if isinstance(node, dict) else getattr(node, "subGroups", [])
filtered_sub = _removeGroupFromTree(subGroups, groupId)
if isinstance(node, dict):
node = {**node, "subGroups": filtered_sub}
result.append(node)
return result
def handleGroupingInRequest(
paginationParams: Optional[PaginationParams],
interface,
contextKey: str,
) -> GroupingContext:
""" """
Central grouping handler call at the start of every list route that Merge a view's saved configuration into PaginationParams.
supports table grouping.
Steps (in order): Priority: explicit request fields win over view defaults.
1. If paginationParams.saveGroupTree is set: - sort: use request sort if non-empty, otherwise view sort
persist the new tree via interface.upsertTableGrouping, then clear - filters: deep-merge (request filters win per-key)
saveGroupTree from paginationParams so it is not treated as a filter. - pageSize: use request value (already set by normalize_pagination_dict)
2. Load the current group tree from the DB (used in step 3 and response).
3. If paginationParams.groupId is set:
resolve it to a Set[str] of itemIds (including all sub-groups),
then clear groupId from paginationParams so it is not treated as a
normal filter field.
4. Return a GroupingContext with groupTree (for the response) and itemIds
(for applyGroupScopeFilter).
The caller does NOT need to handle any grouping logic itself just call Returns the (mutated) params, or a new minimal PaginationParams when
applyGroupScopeFilter(items, groupCtx.itemIds) and embed groupCtx.groupTree params is None (so callers always get a valid object).
in the response dict.
""" """
from modules.datamodels.datamodelPagination import TableGroupNode from modules.datamodels.datamodelPagination import PaginationParams, SortField
if not viewConfig:
return params
groupTree = None if params is None:
itemIds = None params = PaginationParams(page=1, pageSize=25)
if paginationParams is None: # Sort: request wins if non-empty
if not params.sort and viewConfig.get("sort"):
try: try:
existing = interface.getTableGrouping(contextKey) params.sort = [
if existing: SortField(**s) if isinstance(s, dict) else s
groupTree = [n.model_dump() if hasattr(n, "model_dump") else n for n in existing.rootGroups] for s in viewConfig["sort"]
]
except Exception as e: except Exception as e:
logger.warning(f"handleGroupingInRequest: getTableGrouping failed: {e}") logger.warning(f"applyViewToParams: could not parse view sort: {e}")
return GroupingContext(groupTree=groupTree, itemIds=None)
# Step 1: persist saveGroupTree if present # Filters: deep-merge (request filters take priority per-key)
if paginationParams.saveGroupTree is not None: viewFilters = viewConfig.get("filters") or {}
try: if viewFilters:
saved = interface.upsertTableGrouping(contextKey, paginationParams.saveGroupTree) merged = dict(viewFilters)
groupTree = [n.model_dump() if hasattr(n, "model_dump") else n for n in saved.rootGroups] if params.filters:
except Exception as e: merged.update(params.filters)
logger.error(f"handleGroupingInRequest: upsertTableGrouping failed: {e}") params.filters = merged
paginationParams.saveGroupTree = None
# Step 2: load current tree (only if not already set from save above) return params
if groupTree is None:
try:
existing = interface.getTableGrouping(contextKey)
if existing:
groupTree = [n.model_dump() if hasattr(n, "model_dump") else n for n in existing.rootGroups]
except Exception as e:
logger.warning(f"handleGroupingInRequest: getTableGrouping failed: {e}")
# Step 3: resolve groupId to itemIds set
if paginationParams.groupId is not None: def apply_strategy_b_filters_and_sort(
targetGroupId = paginationParams.groupId items: List[Dict[str, Any]],
paginationParams.groupId = None # remove so it is not treated as a normal filter pagination_params: Optional[PaginationParams],
if groupTree: current_user: Any,
itemIds = _collectItemIds(groupTree, targetGroupId) ) -> List[Dict[str, Any]]:
if itemIds is None: """
logger.warning( Shared in-memory filter + sort pass for Strategy B (files/prompts/connections lists).
f"handleGroupingInRequest: groupId={targetGroupId!r} not found in tree " """
f"for contextKey={contextKey!r} — returning empty set" if not pagination_params:
) return list(items)
itemIds = set() # unknown group → show nothing rather than everything from modules.interfaces.interfaceDbManagement import ComponentObjects
comp = ComponentObjects()
comp.setUserContext(current_user)
out = list(items)
if pagination_params.filters:
out = comp._applyFilters(out, pagination_params.filters)
if pagination_params.sort:
out = comp._applySorting(out, pagination_params.sort)
return out
def build_group_summary_groups(
items: List[Dict[str, Any]],
field: str,
null_label: str = "",
) -> List[Dict[str, Any]]:
"""
Build {"value", "label", "totalCount"} for mode=groupSummary (single grouping level).
"""
from collections import defaultdict
counts: Dict[str, int] = defaultdict(int)
display_by_key: Dict[str, str] = {}
null_key = "\x00NULL"
label_attr = f"{field}Label"
for item in items:
raw = item.get(field)
if raw is None or raw == "":
nk = null_key
display = null_label
else: else:
# groupId sent but no tree saved yet → return empty (nothing belongs to any group) nk = str(raw)
logger.warning( display = None
f"handleGroupingInRequest: groupId={targetGroupId!r} set but no tree exists " lbl = item.get(label_attr)
f"for contextKey={contextKey!r} — returning empty set" if lbl is not None and lbl != "":
) display = str(lbl)
itemIds = set() if display is None:
display = nk
counts[nk] += 1
if nk not in display_by_key:
display_by_key[nk] = display
return GroupingContext(groupTree=groupTree, itemIds=itemIds) ordered_keys = sorted(
counts.keys(),
key=lambda x: (x == null_key, str(display_by_key.get(x, x)).lower()),
)
return [
{
"value": None if nk == null_key else nk,
"label": display_by_key.get(nk, nk),
"totalCount": counts[nk],
}
for nk in ordered_keys
]
def applyGroupScopeFilter(items: List[Dict[str, Any]], itemIds: Optional[set]) -> List[Dict[str, Any]]: def buildGroupLayout(
all_items: List[Dict[str, Any]],
groupByLevels: List[Dict[str, Any]],
page: int,
pageSize: int,
) -> tuple:
""" """
Filter items to those whose "id" field is in itemIds. Apply multi-level grouping to all_items, slice to the requested page,
Returns items unchanged when itemIds is None (no active group scope). and return (page_items, GroupLayout | None).
Works for both normal list items and for mode=ids / mode=filterValues flows
call it before handleIdsInMemory / handleFilterValuesInMemory. Strategy B: grouping operates on the full filtered+sorted candidate list.
Items are stably re-sorted by the group path so that members of the same
group are always contiguous (preserving the existing per-group sort order
from the caller).
Parameters
----------
all_items: fully filtered and user-sorted list of row dicts.
groupByLevels: list of {"field": str, "nullLabel": str, "direction": "asc"|"desc"} dicts.
page, pageSize: 1-based page index and page size.
Returns
-------
(page_items, GroupLayout | None)
""" """
if itemIds is None: from functools import cmp_to_key
return items from modules.datamodels.datamodelPagination import GroupBand, GroupLayout
return [item for item in items if str(item.get("id", "")) in itemIds]
if not groupByLevels:
offset = (page - 1) * pageSize
return all_items[offset:offset + pageSize], None
levels = [lvl.get("field", "") for lvl in groupByLevels if lvl.get("field")]
if not levels:
offset = (page - 1) * pageSize
return all_items[offset:offset + pageSize], None
nullLabels = {lvl.get("field", ""): lvl.get("nullLabel", "") for lvl in groupByLevels}
def _path_key(item: dict) -> tuple:
return tuple(
str(item.get(f) or "") if item.get(f) is not None else nullLabels.get(f, "")
for f in levels
)
def _item_cmp(a: dict, b: dict) -> int:
pa, pb = _path_key(a), _path_key(b)
for i in range(len(levels)):
if pa[i] != pb[i]:
asc = (groupByLevels[i].get("direction") or "asc").lower() != "desc"
if pa[i] < pb[i]:
return -1 if asc else 1
return 1 if asc else -1
return 0
# Sort by group path (per-level asc/desc); order within same path stays stable in Py3.12+
all_items.sort(key=cmp_to_key(_item_cmp))
# Build global band list from the full sorted list
bands_global: List[dict] = []
current_path: Optional[tuple] = None
current_start = 0
for i, item in enumerate(all_items):
path = _path_key(item)
if path != current_path:
if current_path is not None:
bands_global.append({"path": list(current_path), "startIdx": current_start, "endIdx": i})
current_path = path
current_start = i
if current_path is not None:
bands_global.append({"path": list(current_path), "startIdx": current_start, "endIdx": len(all_items)})
# Slice to page
page_start = (page - 1) * pageSize
page_end = page_start + pageSize
page_items = all_items[page_start:page_end]
# Find bands that have at least one row on this page
bands_on_page: List[GroupBand] = []
for band in bands_global:
inter_start = max(band["startIdx"], page_start)
inter_end = min(band["endIdx"], page_end)
if inter_start >= inter_end:
continue
path_list = band["path"]
bands_on_page.append(GroupBand(
path=path_list,
label=path_list[-1] if path_list else "",
startRowIndex=inter_start - page_start,
rowCount=inter_end - inter_start,
))
group_layout = GroupLayout(levels=levels, bands=bands_on_page) if bands_on_page else GroupLayout(levels=levels, bands=[])
return page_items, group_layout

View file

@ -128,7 +128,7 @@ async def getSharepointFolderOptionsByReference(
# Set access token on SharePoint service # Set access token on SharePoint service
if not services.sharepoint.setAccessTokenFromConnection(connection): if not services.sharepoint.setAccessTokenFromConnection(connection):
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, status_code=status.HTTP_502_BAD_GATEWAY,
detail=routeApiMsg("Failed to set SharePoint access token. Connection may be expired or invalid.") detail=routeApiMsg("Failed to set SharePoint access token. Connection may be expired or invalid.")
) )

View file

@ -0,0 +1,177 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
CRUD endpoints for saved table views (TableListView).
A view stores a named preset of filters, sort order, and groupByLevels for a
specific table (identified by contextKey). Views are per-user and optionally
per-mandate.
Route prefix: /api/table-views
"""
import logging
from typing import List, Optional
from fastapi import APIRouter, HTTPException, Depends, Body, Path, Query, Request
from fastapi import status
from modules.auth import limiter, getCurrentUser
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import TableListView
import modules.interfaces.interfaceDbApp as interfaceDbApp
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/api/table-views",
tags=["Table Views"],
responses={404: {"description": "Not found"}},
)
def _ownedOrRaise(view: Optional[TableListView], viewId: str, userId: str):
"""Raise 404 when view is missing; ownership is implicitly guaranteed by the
interface layer (views are always queried with the current userId)."""
if view is None:
raise HTTPException(status_code=404, detail=f"View '{viewId}' not found")
return view
# ---------------------------------------------------------------------------
# List views for a context
# ---------------------------------------------------------------------------
@router.get("")
@limiter.limit("60/minute")
def list_views(
request: Request,
contextKey: str = Query(..., description="Table context key, e.g. 'connections', 'files/list'"),
currentUser: User = Depends(getCurrentUser),
):
"""List all saved views for the current user and contextKey."""
iface = interfaceDbApp.getInterface(currentUser)
views = iface.getTableListViews(contextKey=contextKey)
return [v.model_dump() if hasattr(v, "model_dump") else v for v in views]
# ---------------------------------------------------------------------------
# Get one view
# ---------------------------------------------------------------------------
@router.get("/{viewKey}")
@limiter.limit("60/minute")
def get_view(
request: Request,
viewKey: str = Path(..., description="View slug"),
contextKey: str = Query(..., description="Table context key"),
currentUser: User = Depends(getCurrentUser),
):
"""Return a single saved view by its viewKey."""
iface = interfaceDbApp.getInterface(currentUser)
view = iface.getTableListView(contextKey=contextKey, viewKey=viewKey)
if view is None:
raise HTTPException(status_code=404, detail=f"View '{viewKey}' not found for context '{contextKey}'")
return view.model_dump() if hasattr(view, "model_dump") else view
# ---------------------------------------------------------------------------
# Create a view
# ---------------------------------------------------------------------------
@router.post("", status_code=status.HTTP_201_CREATED)
@limiter.limit("30/minute")
def create_view(
request: Request,
body: dict = Body(...),
currentUser: User = Depends(getCurrentUser),
):
"""
Create a new saved view.
Body fields:
- contextKey (required): table context key
- viewKey (required): short slug, unique per (user, contextKey)
- displayName (required): human-readable label
- config (optional): view config dict with keys:
schemaVersion, filters, sort, groupByLevels
"""
contextKey = body.get("contextKey")
viewKey = body.get("viewKey")
displayName = body.get("displayName")
config = body.get("config") or {}
if not contextKey:
raise HTTPException(status_code=400, detail="contextKey is required")
if not viewKey:
raise HTTPException(status_code=400, detail="viewKey is required")
if not displayName:
raise HTTPException(status_code=400, detail="displayName is required")
iface = interfaceDbApp.getInterface(currentUser)
try:
view = iface.createTableListView(
contextKey=contextKey,
viewKey=viewKey,
displayName=displayName,
config=config,
)
return view.model_dump() if hasattr(view, "model_dump") else view
except ValueError as e:
raise HTTPException(status_code=409, detail=str(e))
except Exception as e:
logger.error(f"create_view failed: {e}")
raise HTTPException(status_code=500, detail="Failed to create view")
# ---------------------------------------------------------------------------
# Update a view (by id)
# ---------------------------------------------------------------------------
@router.put("/{viewId}")
@limiter.limit("30/minute")
def update_view(
request: Request,
viewId: str = Path(..., description="View primary-key id (not viewKey)"),
body: dict = Body(...),
currentUser: User = Depends(getCurrentUser),
):
"""
Update an existing view.
Updatable fields: displayName, viewKey, config.
The contextKey cannot be changed after creation.
"""
allowed = {"displayName", "viewKey", "config"}
updates = {k: v for k, v in body.items() if k in allowed}
if not updates:
raise HTTPException(status_code=400, detail=f"No updatable fields provided. Allowed: {allowed}")
iface = interfaceDbApp.getInterface(currentUser)
try:
updated = iface.updateTableListView(viewId=viewId, updates=updates)
except Exception as e:
logger.error(f"update_view failed: {e}")
raise HTTPException(status_code=500, detail="Failed to update view")
if updated is None:
raise HTTPException(status_code=404, detail=f"View id='{viewId}' not found")
return updated.model_dump() if hasattr(updated, "model_dump") else updated
# ---------------------------------------------------------------------------
# Delete a view (by id)
# ---------------------------------------------------------------------------
@router.delete("/{viewId}", status_code=status.HTTP_204_NO_CONTENT)
@limiter.limit("30/minute")
def delete_view(
request: Request,
viewId: str = Path(..., description="View primary-key id"),
currentUser: User = Depends(getCurrentUser),
):
"""Delete a saved view by its primary-key id."""
iface = interfaceDbApp.getInterface(currentUser)
deleted = iface.deleteTableListView(viewId=viewId)
if not deleted:
raise HTTPException(status_code=404, detail=f"View id='{viewId}' not found or could not be deleted")

View file

@ -3,7 +3,7 @@
"""ActionToolAdapter: wraps existing workflow actions (dynamicMode=True) as agent tools.""" """ActionToolAdapter: wraps existing workflow actions (dynamicMode=True) as agent tools."""
import logging import logging
from typing import Dict, Any, List from typing import Dict, Any, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ( from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
ToolDefinition, ToolResult ToolDefinition, ToolResult
@ -44,7 +44,7 @@ class ActionToolAdapter:
compoundName = f"{shortName}_{actionName}" compoundName = f"{shortName}_{actionName}"
toolDef = _buildToolDefinition(compoundName, actionDef, actionInfo) toolDef = _buildToolDefinition(compoundName, actionDef, actionInfo)
handler = _createDispatchHandler(self._actionExecutor, shortName, actionName) handler = _createDispatchHandler(self._actionExecutor, shortName, actionName, self._actionExecutor.services)
toolRegistry.registerFromDefinition(toolDef, handler) toolRegistry.registerFromDefinition(toolDef, handler)
self._registeredTools.append(compoundName) self._registeredTools.append(compoundName)
registered += 1 registered += 1
@ -186,7 +186,7 @@ def _catalogTypeToJsonSchema(typeStr: str, _depth: int = 0) -> Dict[str, Any]:
return {"type": "string", "description": f"unknown type '{typeStr}' (defaulted to string)"} return {"type": "string", "description": f"unknown type '{typeStr}' (defaulted to string)"}
def _createDispatchHandler(actionExecutor, methodName: str, actionName: str): def _createDispatchHandler(actionExecutor, methodName: str, actionName: str, services=None):
"""Create an async handler that dispatches to the ActionExecutor. """Create an async handler that dispatches to the ActionExecutor.
Parameter validation and Ref-payload normalization (collapsing Parameter validation and Ref-payload normalization (collapsing
@ -204,7 +204,7 @@ def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
if "mandateId" not in args and context.get("mandateId"): if "mandateId" not in args and context.get("mandateId"):
args["mandateId"] = context["mandateId"] args["mandateId"] = context["mandateId"]
result = await actionExecutor.executeAction(methodName, actionName, args) result = await actionExecutor.executeAction(methodName, actionName, args)
data = _formatActionResult(result) data = _formatActionResult(result, services, context)
return ToolResult( return ToolResult(
toolCallId="", toolCallId="",
toolName=f"{methodName}_{actionName}", toolName=f"{methodName}_{actionName}",
@ -223,9 +223,65 @@ def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
return _handler return _handler
def _formatActionResult(result) -> str: _INLINE_CONTENT_LIMIT = 2000
"""Format an ActionResult into a text representation for the agent."""
def _persistLargeDocument(doc, services, context: Dict[str, Any]) -> Optional[str]:
"""Save an ActionDocument with large content as a workspace file.
Returns a formatted result line (with file id + docItem ref) or None
if persistence is not possible.
"""
if not services:
return None
chatService = getattr(services, "chat", None)
if not chatService:
return None
docData = getattr(doc, "documentData", None)
if not docData or not isinstance(docData, str):
return None
docName = getattr(doc, "documentName", "unnamed")
docBytes = docData.encode("utf-8")
try:
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docBytes, docName)
fiId = context.get("featureInstanceId") or getattr(services, "featureInstanceId", "")
if fiId:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"featureInstanceId": fiId})
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_attachFileAsChatDocument,
_formatToolFileResult,
_getOrCreateTempFolder,
)
tempFolderId = _getOrCreateTempFolder(chatService)
if tempFolderId:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"folderId": tempFolderId})
chatDocId = _attachFileAsChatDocument(
services, fileItem,
label=f"action_doc:{docName}",
userMessage=f"Action document: {docName}",
)
return _formatToolFileResult(
fileItem=fileItem,
chatDocId=chatDocId,
actionLabel="Produced",
extraInfo="Use readFile to read the content.",
)
except Exception as e:
logger.warning(f"_persistLargeDocument failed for {docName}: {e}")
return None
def _formatActionResult(result, services=None, context: Optional[Dict[str, Any]] = None) -> str:
"""Format an ActionResult into a text representation for the agent.
Documents whose content exceeds the inline limit are persisted as
workspace files so the agent can access them via readFile /
ai_process / searchInFileContent.
"""
parts = [] parts = []
ctx = context or {}
if result.resultLabel: if result.resultLabel:
parts.append(f"Result: {result.resultLabel}") parts.append(f"Result: {result.resultLabel}")
@ -238,10 +294,19 @@ def _formatActionResult(result) -> str:
for doc in result.documents: for doc in result.documents:
docName = getattr(doc, "documentName", "unnamed") docName = getattr(doc, "documentName", "unnamed")
docType = getattr(doc, "mimeType", "unknown") docType = getattr(doc, "mimeType", "unknown")
parts.append(f" - {docName} ({docType})")
docData = getattr(doc, "documentData", None) docData = getattr(doc, "documentData", None)
if docData and isinstance(docData, str) and len(docData) < 2000:
parts.append(f" Content: {docData[:2000]}") isLarge = docData and isinstance(docData, str) and len(docData) >= _INLINE_CONTENT_LIMIT
if isLarge:
persistedLine = _persistLargeDocument(doc, services, ctx)
if persistedLine:
parts.append(f" - {docName} ({docType})")
parts.append(f" {persistedLine}")
continue
parts.append(f" - {docName} ({docType})")
if docData and isinstance(docData, str) and len(docData) < _INLINE_CONTENT_LIMIT:
parts.append(f" Content: {docData[:_INLINE_CONTENT_LIMIT]}")
if not parts: if not parts:
parts.append("Action completed successfully." if result.success else "Action failed.") parts.append("Action completed successfully." if result.success else "Action failed.")

View file

@ -198,7 +198,10 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
if isinstance(result, _DR): if isinstance(result, _DR):
fileBytes = result.data fileBytes = result.data
fileName = result.fileName or fileName resolvedName = result.fileName or fileName
if resolvedName != fileName:
logger.debug(f"downloadFromDataSource: connector fileName={result.fileName!r} overrides arg fileName={fileName!r}")
fileName = resolvedName
else: else:
fileBytes = result fileBytes = result

View file

@ -61,34 +61,8 @@ async def _getOrCreateInstanceGroup(
featureInstanceId: str, featureInstanceId: str,
contextKey: str = "files/list", contextKey: str = "files/list",
) -> Optional[str]: ) -> Optional[str]:
"""Return groupId of the default group for a feature instance; create if needed.""" """Stub — file group tree removed. Returns None; callers that checked the result will skip group assignment."""
try: return None
existing = appInterface.getTableGrouping(contextKey)
nodes = [
n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n))
for n in (existing.rootGroups if existing else [])
]
def _find(nds):
for nd in nds:
meta = nd.get("meta", {}) if isinstance(nd, dict) else getattr(nd, "meta", {})
if (meta or {}).get("featureInstanceId") == featureInstanceId:
return nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
found = _find(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", []))
if found:
return found
return None
found = _find(nodes)
if found:
return found
newId = str(uuid.uuid4())
nodes.append({"id": newId, "name": featureInstanceId, "itemIds": [], "subGroups": [], "meta": {"featureInstanceId": featureInstanceId}})
appInterface.upsertTableGrouping(contextKey, nodes)
return newId
except Exception as e:
logger.error(f"_getOrCreateInstanceGroup: {e}")
return None
async def _getOrCreateTempGroup( async def _getOrCreateTempGroup(
@ -96,8 +70,8 @@ async def _getOrCreateTempGroup(
sessionId: str, sessionId: str,
contextKey: str = "files/list", contextKey: str = "files/list",
) -> Optional[str]: ) -> Optional[str]:
"""Return groupId of a temporary group for a session; create if needed.""" """Stub — file group tree removed. Returns None."""
return await _getOrCreateInstanceGroup(appInterface, f"_temp_{sessionId}", contextKey) return None
def _attachFileAsChatDocument( def _attachFileAsChatDocument(

View file

@ -836,7 +836,7 @@ def _registerMediaTools(registry: ToolRegistry, services):
return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"Language '{language}' not supported. Only 'python' is available.") return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"Language '{language}' not supported. Only 'python' is available.")
try: try:
from modules.serviceCenter.services.serviceAgent.sandboxExecutor import executePython from modules.serviceCenter.services.serviceAgent.sandboxExecutor import executePython
result = await executePython(code) result = await executePython(code, services=services)
if result.get("success"): if result.get("success"):
output = result.get("output", "(no output)") output = result.get("output", "(no output)")
return ToolResult(toolCallId="", toolName="executeCode", success=True, data=output) return ToolResult(toolCallId="", toolName="executeCode", success=True, data=output)
@ -886,12 +886,17 @@ def _registerMediaTools(registry: ToolRegistry, services):
readOnly=True readOnly=True
) )
from modules.serviceCenter.services.serviceAgent.sandboxExecutor import SANDBOX_ALLOWED_MODULES
moduleList = ", ".join(sorted(SANDBOX_ALLOWED_MODULES | {"io"}))
registry.register( registry.register(
"executeCode", _executeCode, "executeCode", _executeCode,
description=( description=(
"Execute Python code in a sandboxed environment for calculations and data analysis. " f"Execute Python code in a sandboxed environment for calculations and data analysis. "
"Available modules: math, statistics, json, csv, re, datetime, collections, itertools, functools, decimal, fractions, random. " f"Available modules: {moduleList}. "
"No file system, network, or OS access. Max 30s execution time. " "io is restricted to StringIO and BytesIO only (no file access). "
"Built-in readFile(fileId) returns UTF-8 content of a workspace file by its file ID "
"(use the 'file id' from tool outputs, e.g. data = readFile('019af...')). "
"No other file system, network, or OS access. Max 30s execution time. "
"Use print() to produce output." "Use print() to produce output."
), ),
parameters={ parameters={

View file

@ -312,52 +312,7 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "") fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId: if fiId:
dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId}) dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId})
if args.get("groupId"): # File group tree removed — groupId arg and instance-group assignment no longer apply
try:
appIface = chatService.interfaceDbApp
existing = appIface.getTableGrouping("files/list")
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
def _addToGroup(nds, gid, fid):
for nd in nds:
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
if nid == gid:
ids = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
if fid not in ids:
ids.append(fid)
if isinstance(nd, dict):
nd["itemIds"] = ids
return True
if _addToGroup(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", []), gid, fid):
return True
return False
_addToGroup(nodes, args["groupId"], fileItem.id)
appIface.upsertTableGrouping("files/list", nodes)
except Exception as _ge:
logger.warning(f"writeFile: failed to add file to group {args['groupId']}: {_ge}")
elif fiId:
try:
appIface = chatService.interfaceDbApp
instanceGroupId = await _getOrCreateInstanceGroup(appIface, fiId)
if instanceGroupId:
existing = appIface.getTableGrouping("files/list")
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
def _addToGroup2(nds, gid, fid):
for nd in nds:
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
if nid == gid:
ids = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
if fid not in ids:
ids.append(fid)
if isinstance(nd, dict):
nd["itemIds"] = ids
return True
if _addToGroup2(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", []), gid, fid):
return True
return False
_addToGroup2(nodes, instanceGroupId, fileItem.id)
appIface.upsertTableGrouping("files/list", nodes)
except Exception as _ge:
logger.warning(f"writeFile: failed to add file to instance group for {fiId}: {_ge}")
if args.get("tags"): if args.get("tags"):
dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]}) dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]})
@ -746,136 +701,7 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
readOnly=False readOnly=False
) )
# ---- Group tools (replaces folder-based tools) ---- # Group tree tools removed — file grouping now uses view-based display grouping (TableListView)
async def _listGroups(args: Dict[str, Any], context: Dict[str, Any]):
contextKey = args.get("contextKey", "files/list")
try:
chatService = services.chat
appInterface = chatService.interfaceDbApp
existing = appInterface.getTableGrouping(contextKey)
if not existing:
return ToolResult(toolCallId="", toolName="listGroups", success=True, data="No groups found.")
def _flatten(nodes, depth=0):
result = []
for n in nodes:
nd = n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n))
result.append({"id": nd.get("id"), "name": nd.get("name"), "depth": depth, "itemCount": len(nd.get("itemIds", []))})
result.extend(_flatten(nd.get("subGroups", []), depth + 1))
return result
groups = _flatten(existing.rootGroups)
lines = "\n".join(
f"{' ' * g['depth']}- {g['name']} (id: {g['id']}, items: {g['itemCount']})"
for g in groups
) if groups else "No groups found."
return ToolResult(toolCallId="", toolName="listGroups", success=True, data=lines)
except Exception as e:
return ToolResult(toolCallId="", toolName="listGroups", success=False, error=str(e))
async def _listItemsInGroup(args: Dict[str, Any], context: Dict[str, Any]):
groupId = args.get("groupId", "")
contextKey = args.get("contextKey", "files/list")
if not groupId:
return ToolResult(toolCallId="", toolName="listItemsInGroup", success=False, error="groupId is required")
try:
from modules.routes.routeHelpers import _collectItemIds
chatService = services.chat
appInterface = chatService.interfaceDbApp
existing = appInterface.getTableGrouping(contextKey)
if not existing:
return ToolResult(toolCallId="", toolName="listItemsInGroup", success=True, data="No groups found.")
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in existing.rootGroups]
ids = _collectItemIds(nodes, groupId)
itemList = list(ids) if ids else []
return ToolResult(
toolCallId="", toolName="listItemsInGroup", success=True,
data="\n".join(f"- {fid}" for fid in itemList) if itemList else "No items in group.",
)
except Exception as e:
return ToolResult(toolCallId="", toolName="listItemsInGroup", success=False, error=str(e))
async def _addItemsToGroup(args: Dict[str, Any], context: Dict[str, Any]):
groupId = args.get("groupId", "")
itemIds = args.get("itemIds", [])
contextKey = args.get("contextKey", "files/list")
if not groupId:
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error="groupId is required")
if not itemIds:
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error="itemIds is required")
try:
chatService = services.chat
appInterface = chatService.interfaceDbApp
existing = appInterface.getTableGrouping(contextKey)
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
def _add(nds):
for nd in nds:
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
if nid == groupId:
existing_ids = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
for fid in itemIds:
if fid not in existing_ids:
existing_ids.append(fid)
if isinstance(nd, dict):
nd["itemIds"] = existing_ids
return True
if _add(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", [])):
return True
return False
found = _add(nodes)
if not found:
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error=f"Group {groupId} not found")
appInterface.upsertTableGrouping(contextKey, nodes)
return ToolResult(
toolCallId="", toolName="addItemsToGroup", success=True,
data=f"Added {len(itemIds)} item(s) to group {groupId}",
)
except Exception as e:
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error=str(e))
registry.register(
"listGroups", _listGroups,
description="List all groups in the file grouping tree. Groups replace folders for organising files.",
parameters={
"type": "object",
"properties": {
"contextKey": {"type": "string", "description": "Grouping context key (default: 'files/list')"},
}
},
readOnly=True
)
registry.register(
"listItemsInGroup", _listItemsInGroup,
description="List all file IDs assigned to a specific group (includes sub-groups recursively).",
parameters={
"type": "object",
"properties": {
"groupId": {"type": "string", "description": "The group ID to inspect"},
"contextKey": {"type": "string", "description": "Grouping context key (default: 'files/list')"},
},
"required": ["groupId"]
},
readOnly=True
)
registry.register(
"addItemsToGroup", _addItemsToGroup,
description="Add one or more file IDs to an existing group.",
parameters={
"type": "object",
"properties": {
"groupId": {"type": "string", "description": "The group ID to add files to"},
"itemIds": {"type": "array", "items": {"type": "string"}, "description": "List of file IDs to add"},
"contextKey": {"type": "string", "description": "Grouping context key (default: 'files/list')"},
},
"required": ["groupId", "itemIds"]
},
readOnly=False
)
registry.register( registry.register(
"replaceInFile", _replaceInFile, "replaceInFile", _replaceInFile,

View file

@ -69,7 +69,15 @@ class _ServicesAdapter:
@property @property
def workflow(self): def workflow(self):
return self._context.workflow return getattr(self, "_workflow_override", None) or self._context.workflow
@workflow.setter
def workflow(self, value):
self._workflow_override = value
try:
self._context.workflow = value
except (AttributeError, TypeError):
pass
@property @property
def ai(self): def ai(self):
@ -95,6 +103,13 @@ class _ServicesAdapter:
def extraction(self): def extraction(self):
return self._getService("extraction") return self._getService("extraction")
@property
def interfaceDbComponent(self):
try:
return self.chat.interfaceDbComponent
except Exception:
return None
@property @property
def rbac(self): def rbac(self):
"""Same RbacClass as workflow hub (MethodBase permission checks during discoverMethods).""" """Same RbacClass as workflow hub (MethodBase permission checks during discoverMethods)."""

View file

@ -10,8 +10,8 @@ from typing import Dict, Any
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_PYTHON_ALLOWED_MODULES = { SANDBOX_ALLOWED_MODULES = {
"math", "statistics", "json", "csv", "re", "datetime", "math", "statistics", "json", "csv", "re", "datetime", "time",
"collections", "itertools", "functools", "decimal", "fractions", "collections", "itertools", "functools", "decimal", "fractions",
"random", "string", "textwrap", "operator", "copy", "random", "string", "textwrap", "operator", "copy",
} }
@ -19,17 +19,33 @@ _PYTHON_ALLOWED_MODULES = {
_PYTHON_BLOCKED_BUILTINS = { _PYTHON_BLOCKED_BUILTINS = {
"open", "exec", "eval", "compile", "__import__", "globals", "locals", "open", "exec", "eval", "compile", "__import__", "globals", "locals",
"getattr", "setattr", "delattr", "breakpoint", "exit", "quit", "getattr", "setattr", "delattr", "breakpoint", "exit", "quit",
"input", "memoryview", "type", "input", "memoryview",
} }
_MAX_EXECUTION_TIME_S = 30 _MAX_EXECUTION_TIME_S = 30
_MAX_OUTPUT_CHARS = 50000 _MAX_OUTPUT_CHARS = 50000
_RESTRICTED_IO = None
def _getRestrictedIo():
"""Return a restricted ``io`` module exposing only StringIO/BytesIO."""
global _RESTRICTED_IO
if _RESTRICTED_IO is None:
import types
m = types.ModuleType("io")
m.StringIO = io.StringIO
m.BytesIO = io.BytesIO
_RESTRICTED_IO = m
return _RESTRICTED_IO
def _safeImport(name, *args, **kwargs): def _safeImport(name, *args, **kwargs):
"""Restricted import that only allows whitelisted modules.""" """Restricted import that only allows whitelisted modules."""
if name not in _PYTHON_ALLOWED_MODULES: if name == "io":
raise ImportError(f"Module '{name}' is not allowed. Permitted: {', '.join(sorted(_PYTHON_ALLOWED_MODULES))}") return _getRestrictedIo()
if name not in SANDBOX_ALLOWED_MODULES:
raise ImportError(f"Module '{name}' is not allowed. Permitted: io (StringIO/BytesIO only), {', '.join(sorted(SANDBOX_ALLOWED_MODULES))}")
return __builtins__["__import__"](name, *args, **kwargs) if isinstance(__builtins__, dict) else __import__(name, *args, **kwargs) return __builtins__["__import__"](name, *args, **kwargs) if isinstance(__builtins__, dict) else __import__(name, *args, **kwargs)
@ -48,7 +64,7 @@ def _buildRestrictedGlobals() -> Dict[str, Any]:
safeBuiltins["__name__"] = "__sandbox__" safeBuiltins["__name__"] = "__sandbox__"
safeBuiltins["__builtins__"] = safeBuiltins safeBuiltins["__builtins__"] = safeBuiltins
for modName in _PYTHON_ALLOWED_MODULES: for modName in SANDBOX_ALLOWED_MODULES:
try: try:
safeBuiltins[modName] = __import__(modName) safeBuiltins[modName] = __import__(modName)
except ImportError: except ImportError:
@ -57,12 +73,27 @@ def _buildRestrictedGlobals() -> Dict[str, Any]:
return {"__builtins__": safeBuiltins} return {"__builtins__": safeBuiltins}
async def executePython(code: str) -> Dict[str, Any]: def _makeReadFile(services):
"""Create a readFile(fileId) closure bound to the current services context."""
def readFile(fileId: str) -> str:
mgmt = getattr(services, 'interfaceDbComponent', None) if services else None
if not mgmt:
raise RuntimeError("readFile: no file store available in this session")
data = mgmt.getFileData(str(fileId))
if data is None:
raise FileNotFoundError(f"File '{fileId}' not found in workspace")
return data.decode("utf-8")
return readFile
async def executePython(code: str, *, services=None) -> Dict[str, Any]:
"""Execute Python code in a restricted sandbox. Returns {success, output, error}.""" """Execute Python code in a restricted sandbox. Returns {success, output, error}."""
import asyncio import asyncio
def _run(): def _run():
restrictedGlobals = _buildRestrictedGlobals() restrictedGlobals = _buildRestrictedGlobals()
if services:
restrictedGlobals["__builtins__"]["readFile"] = _makeReadFile(services)
capturedOutput = io.StringIO() capturedOutput = io.StringIO()
oldStdout = sys.stdout oldStdout = sys.stdout
oldStderr = sys.stderr oldStderr = sys.stderr

View file

@ -57,8 +57,7 @@ from .subJsonResponseHandling import JsonResponseHandler
from .subLoopingUseCases import LoopingUseCaseRegistry from .subLoopingUseCases import LoopingUseCaseRegistry
from modules.workflows.processing.shared.stateTools import checkWorkflowStopped from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
from modules.shared.jsonContinuation import getContexts from modules.shared.jsonContinuation import getContexts
from modules.shared.jsonUtils import buildContinuationContext, extractJsonString, tryParseJson from modules.shared.jsonUtils import buildContinuationContext, tryParseJson
from modules.shared.jsonUtils import tryParseJson
from modules.shared.jsonUtils import closeJsonStructures from modules.shared.jsonUtils import closeJsonStructures
from modules.shared.jsonUtils import stripCodeFences, normalizeJsonText from modules.shared.jsonUtils import stripCodeFences, normalizeJsonText
@ -142,6 +141,8 @@ class AiCallLooper:
MAX_MERGE_FAILS = 3 MAX_MERGE_FAILS = 3
mergeFailCount = 0 # Global counter for merge failures across entire loop mergeFailCount = 0 # Global counter for merge failures across entire loop
lastValidCompletePart = None # Store last successfully parsed completePart for fallback lastValidCompletePart = None # Store last successfully parsed completePart for fallback
MAX_CONSECUTIVE_EMPTY_RESPONSES = 3
consecutive_empty_responses = 0
# Get parent operation ID for iteration operations (parentId should be operationId, not log entry ID) # Get parent operation ID for iteration operations (parentId should be operationId, not log entry ID)
parentOperationId = operationId # Use the parent's operationId directly parentOperationId = operationId # Use the parent's operationId directly
@ -284,8 +285,26 @@ class AiCallLooper:
break break
if not result or not result.strip(): if not result or not result.strip():
logger.warning(f"Iteration {iteration}: Empty response, stopping") consecutive_empty_responses += 1
break logger.warning(
"Iteration %s: Empty AI response (consecutive %s/%s) modelName=%s errorCount=%s",
iteration,
consecutive_empty_responses,
MAX_CONSECUTIVE_EMPTY_RESPONSES,
getattr(response, "modelName", None),
getattr(response, "errorCount", None),
)
if iterationOperationId:
self.services.chat.progressLogFinish(iterationOperationId, False)
if consecutive_empty_responses >= MAX_CONSECUTIVE_EMPTY_RESPONSES:
logger.error(
"Stopping loop: %s consecutive empty responses from model",
consecutive_empty_responses,
)
break
continue
consecutive_empty_responses = 0
# Check if this is a text response (not document generation) # Check if this is a text response (not document generation)
# Text responses don't need JSON parsing - return immediately after first successful response # Text responses don't need JSON parsing - return immediately after first successful response
@ -354,9 +373,8 @@ class AiCallLooper:
if lastValidCompletePart: if lastValidCompletePart:
try: try:
extracted = extractJsonString(lastValidCompletePart) parsed, parseErr, _ = tryParseJson(lastValidCompletePart)
parsed, parseErr, _ = tryParseJson(extracted) if parseErr is None:
if parseErr is None and parsed:
normalized = self._normalizeJsonStructure(parsed, useCase) normalized = self._normalizeJsonStructure(parsed, useCase)
return json.dumps(normalized, indent=2, ensure_ascii=False) return json.dumps(normalized, indent=2, ensure_ascii=False)
except Exception: except Exception:
@ -384,11 +402,10 @@ class AiCallLooper:
# This ensures retry iterations use the correct base context # This ensures retry iterations use the correct base context
lastRawResponse = candidateJson lastRawResponse = candidateJson
# Try direct parse of candidate # Try direct parse of candidate (same pipeline as structure filling / getContexts)
try: try:
extracted = extractJsonString(candidateJson) parsed, parseErr, extracted = tryParseJson(candidateJson)
parsed, parseErr, _ = tryParseJson(extracted) if parseErr is None:
if parseErr is None and parsed:
# Direct parse succeeded - FINISHED # Direct parse succeeded - FINISHED
# Commit candidate to jsonBase # Commit candidate to jsonBase
jsonBase = candidateJson jsonBase = candidateJson
@ -421,39 +438,50 @@ class AiCallLooper:
# STEP 6: DECIDE based on jsonParsingSuccess and overlapContext # STEP 6: DECIDE based on jsonParsingSuccess and overlapContext
if contexts.jsonParsingSuccess and contexts.overlapContext == "": if contexts.jsonParsingSuccess and contexts.overlapContext == "":
# JSON is complete (no cut point) - FINISHED # getContexts and downstream must agree with tryParseJson (same as structure filling).
# Use completePart for final result (closed, repaired JSON)
# No more merging needed, so we don't need the cut version
jsonBase = contexts.completePart
logger.info(f"Iteration {iteration}: jsonParsingSuccess=true, overlapContext='', JSON complete") logger.info(f"Iteration {iteration}: jsonParsingSuccess=true, overlapContext='', JSON complete")
# Store and parse completePart
lastValidCompletePart = contexts.completePart lastValidCompletePart = contexts.completePart
try: try:
extracted = extractJsonString(contexts.completePart) parsed, parseErr, extracted = tryParseJson(contexts.completePart)
parsed, parseErr, _ = tryParseJson(extracted) if parseErr is not None:
if parseErr is None and parsed: raise ValueError(str(parseErr))
normalized = self._normalizeJsonStructure(parsed, useCase) normalized = self._normalizeJsonStructure(parsed, useCase)
result = json.dumps(normalized, indent=2, ensure_ascii=False) result = json.dumps(normalized, indent=2, ensure_ascii=False)
jsonBase = contexts.completePart
if iterationOperationId: if iterationOperationId:
self.services.chat.progressLogFinish(iterationOperationId, True) self.services.chat.progressLogFinish(iterationOperationId, True)
if not useCase.finalResultHandler: if not useCase.finalResultHandler:
raise ValueError( raise ValueError(
f"Use case '{useCaseId}' is missing required 'finalResultHandler' callback." f"Use case '{useCaseId}' is missing required 'finalResultHandler' callback."
)
return useCase.finalResultHandler(
result, normalized, extracted, debugPrefix, self.services
) )
return useCase.finalResultHandler(
result, normalized, extracted, debugPrefix, self.services
)
except Exception as e: except Exception as e:
logger.warning(f"Iteration {iteration}: Failed to parse completePart: {e}") logger.warning(
f"Iteration {iteration}: completePart not serializable after getContexts success: {e}"
# Fallback: return completePart as-is )
if iterationOperationId: mergeFailCount += 1
self.services.chat.progressLogFinish(iterationOperationId, True) if mergeFailCount >= MAX_MERGE_FAILS:
return contexts.completePart logger.error(
f"Iteration {iteration}: Max failures ({MAX_MERGE_FAILS}) "
"after output pipeline mismatch"
)
if iterationOperationId:
self.services.chat.progressLogFinish(iterationOperationId, False)
return jsonBase if jsonBase else ""
if iterationOperationId:
self.services.chat.progressLogUpdate(
iterationOperationId,
0.7,
f"Output pipeline failed ({mergeFailCount}/{MAX_MERGE_FAILS}), retrying",
)
self.services.chat.progressLogFinish(iterationOperationId, True)
continue
elif contexts.jsonParsingSuccess and contexts.overlapContext != "": elif contexts.jsonParsingSuccess and contexts.overlapContext != "":
# JSON parseable but has cut point - CONTINUE to next iteration # JSON parseable but has cut point - CONTINUE to next iteration
@ -502,9 +530,8 @@ class AiCallLooper:
if lastValidCompletePart: if lastValidCompletePart:
try: try:
extracted = extractJsonString(lastValidCompletePart) parsed, parseErr, _ = tryParseJson(lastValidCompletePart)
parsed, parseErr, _ = tryParseJson(extracted) if parseErr is None:
if parseErr is None and parsed:
normalized = self._normalizeJsonStructure(parsed, useCase) normalized = self._normalizeJsonStructure(parsed, useCase)
return json.dumps(normalized, indent=2, ensure_ascii=False) return json.dumps(normalized, indent=2, ensure_ascii=False)
except Exception: except Exception:
@ -532,10 +559,36 @@ class AiCallLooper:
if iteration >= maxIterations: if iteration >= maxIterations:
logger.warning(f"AI call stopped after maximum iterations ({maxIterations})") logger.warning(f"AI call stopped after maximum iterations ({maxIterations})")
<<<<<<< HEAD
# Prefer last repaired complete JSON from getContexts (raw `result` is only the last fragment).
if lastValidCompletePart and useCase and not useCase.requiresExtraction:
try:
parsed, parseErr, extracted = tryParseJson(lastValidCompletePart)
if parseErr is None:
normalized = self._normalizeJsonStructure(parsed, useCase)
out = json.dumps(normalized, indent=2, ensure_ascii=False)
if useCase.finalResultHandler:
logger.warning(
"callAiWithLooping: max iterations — returning last valid completePart for %r",
useCaseId,
)
return useCase.finalResultHandler(
out, normalized, extracted, debugPrefix, self.services
)
except Exception as e:
logger.debug("Max-iterations fallback on completePart failed: %s", e)
=======
# This code path should never be reached because all registered use cases # This code path should never be reached because all registered use cases
# return early when JSON is complete. This would only execute for use cases that # return early when JSON is complete. This would only execute for use cases that
# require section extraction, but no such use cases are currently registered. # require section extraction, but no such use cases are currently registered.
logger.error(f"Unexpected code path: reached end of loop without return for use case '{useCaseId}'") >>>>>>> 875f8252 (ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes)
logger.error(
"End of callAiWithLooping without success for use case %r (iterations=%s, lastResultLen=%s)",
useCaseId,
iteration,
len(result) if isinstance(result, str) else 0,
)
return result if result else "" return result if result else ""
def _isJsonStringIncomplete(self, jsonString: str) -> bool: def _isJsonStringIncomplete(self, jsonString: str) -> bool:

View file

@ -54,6 +54,15 @@ def _handleCodeContentFinalResult(result: str, parsedJsonForUseCase: Any, extrac
return final_json return final_json
def _lift_section_plain_text(d: Dict[str, Any]) -> Optional[str]:
"""Models often return {\"text\": \"...\"} without an elements array; extract usable prose."""
for key in ("text", "body", "summary", "response", "output", "answer", "message", "content"):
v = d.get(key)
if isinstance(v, str) and v.strip():
return v.strip()
return None
def _normalizeSectionContentJson(parsed: Any, useCaseId: str) -> Any: def _normalizeSectionContentJson(parsed: Any, useCaseId: str) -> Any:
"""Normalize JSON structure for section_content use case.""" """Normalize JSON structure for section_content use case."""
# For section_content, expect {"elements": [...]} structure # For section_content, expect {"elements": [...]} structure
@ -77,15 +86,29 @@ def _normalizeSectionContentJson(parsed: Any, useCaseId: str) -> Any:
# Convert plain list of elements to elements structure # Convert plain list of elements to elements structure
return {"elements": parsed} return {"elements": parsed}
elif isinstance(parsed, dict): elif isinstance(parsed, dict):
# If it already has "elements", return as-is
if "elements" in parsed: if "elements" in parsed:
els = parsed.get("elements")
if isinstance(els, list) and len(els) > 0:
return parsed
lifted = _lift_section_plain_text(parsed)
if lifted:
out = dict(parsed)
out["elements"] = [{"type": "paragraph", "content": {"text": lifted}}]
logger.info(
"section_content: promoted plain-text field to elements (%d chars)",
len(lifted),
)
return out
return parsed return parsed
# If it has "type" and looks like an element, wrap in elements array if parsed.get("type"):
elif parsed.get("type"):
return {"elements": [parsed]} return {"elements": [parsed]}
# Otherwise, assume it's already in correct format lifted = _lift_section_plain_text(parsed)
else: if lifted:
return parsed return {
**parsed,
"elements": [{"type": "paragraph", "content": {"text": lifted}}],
}
return parsed
# For other use cases, return as-is (they have their own structures) # For other use cases, return as-is (they have their own structures)
return parsed return parsed

View file

@ -27,6 +27,36 @@ class _AiResponseFallback:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _elements_from_section_content_ai_json(parsed: Any) -> List[Any]:
"""Normalize section_content AI JSON (incl. models that return {\"text\": ...}) into elements."""
from modules.serviceCenter.services.serviceAi.subLoopingUseCases import _normalizeSectionContentJson
if parsed is None:
return []
if isinstance(parsed, dict):
has_nonempty_elements = (
isinstance(parsed.get("elements"), list) and len(parsed["elements"]) > 0
)
if not has_nonempty_elements:
# Valid full-document envelope (same normalized shape the renderer uses elsewhere)
docs = parsed.get("documents")
if isinstance(docs, list) and docs and isinstance(docs[0], dict):
secs = docs[0].get("sections")
if isinstance(secs, list) and secs and isinstance(secs[0], dict):
parsed = secs[0]
elif (
isinstance(parsed.get("sections"), list)
and parsed["sections"]
and isinstance(parsed["sections"][0], dict)
):
parsed = parsed["sections"][0]
norm = _normalizeSectionContentJson(parsed, "section_content")
if isinstance(norm, dict):
els = norm.get("elements")
return list(els) if isinstance(els, list) else []
return []
class StructureFiller: class StructureFiller:
"""Handles filling document structure with content.""" """Handles filling document structure with content."""
@ -524,38 +554,12 @@ class StructureFiller:
if generatedElements: if generatedElements:
elements.extend(generatedElements) elements.extend(generatedElements)
else: else:
# Fallback: Try to parse JSON response directly with repair logic logger.error(f"No elements produced for section {sectionId} (callAiWithLooping must return parseable JSON)")
try: elements.append({
from modules.shared.jsonUtils import tryParseJson, repairBrokenJson "type": "error",
"message": f"No parsed content for section {sectionId}",
# Use tryParseJson which handles extraction and basic parsing "sectionId": sectionId
fallbackElements, parseError, cleanedStr = tryParseJson(aiResponse.content) })
# If parsing failed, try repair
if parseError and isinstance(aiResponse.content, str):
logger.warning(f"Initial JSON parse failed for section {sectionId}, attempting repair: {str(parseError)}")
repairedJson = repairBrokenJson(aiResponse.content)
if repairedJson:
fallbackElements = repairedJson
parseError = None
logger.info(f"Successfully repaired JSON for section {sectionId}")
if parseError:
raise parseError
if isinstance(fallbackElements, list):
elements.extend(fallbackElements)
elif isinstance(fallbackElements, dict) and "elements" in fallbackElements:
elements.extend(fallbackElements["elements"])
elif isinstance(fallbackElements, dict) and fallbackElements.get("type"):
elements.append(fallbackElements)
except (json.JSONDecodeError, ValueError) as json_error:
logger.error(f"Error parsing JSON response for section {sectionId}: {str(json_error)}")
elements.append({
"type": "error",
"message": f"Failed to parse JSON response: {str(json_error)}",
"sectionId": sectionId
})
return elements return elements
@ -671,7 +675,7 @@ class StructureFiller:
try: try:
self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation") self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation")
operationType = OperationTypeEnum.DATA_ANALYSE operationType = OperationTypeEnum.DATA_GENERATE
options = AiCallOptions( options = AiCallOptions(
operationType=operationType, operationType=operationType,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
@ -703,22 +707,17 @@ class StructureFiller:
) )
try: try:
from modules.shared.jsonUtils import tryParseJson, repairBrokenJson from modules.shared.jsonUtils import tryParseJson
if isinstance(aiResponseJson, str) and ("---" in aiResponseJson or aiResponseJson.count("```json") > 1): if isinstance(aiResponseJson, str) and ("---" in aiResponseJson or aiResponseJson.count("```json") > 1):
generatedElements = self._extractAndMergeMultipleJsonBlocks(aiResponseJson, contentType, sectionId) generatedElements = self._extractAndMergeMultipleJsonBlocks(aiResponseJson, contentType, sectionId)
else: else:
parsedResponse, parseError, cleanedStr = tryParseJson(aiResponseJson) parsedResponse, parseError, _ = tryParseJson(aiResponseJson)
if parsedResponse is None: if parseError is not None:
logger.warning(f"Section {sectionId}: tryParseJson failed, attempting repair") logger.error(f"Section {sectionId}: tryParseJson failed: {parseError}")
repairedStr = repairBrokenJson(aiResponseJson)
parsedResponse, parseError2, _ = tryParseJson(repairedStr)
if parsedResponse and isinstance(parsedResponse, dict):
generatedElements = parsedResponse.get("elements", [])
elif parsedResponse and isinstance(parsedResponse, list):
generatedElements = parsedResponse
else:
generatedElements = [] generatedElements = []
else:
generatedElements = _elements_from_section_content_ai_json(parsedResponse)
except Exception as parseErr: except Exception as parseErr:
logger.error(f"Section {sectionId}: JSON parse error: {parseErr}") logger.error(f"Section {sectionId}: JSON parse error: {parseErr}")
generatedElements = [] generatedElements = []
@ -930,7 +929,7 @@ class StructureFiller:
self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation") self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation")
operationType = OperationTypeEnum.IMAGE_GENERATE if contentType == "image" else OperationTypeEnum.DATA_ANALYSE operationType = OperationTypeEnum.IMAGE_GENERATE if contentType == "image" else OperationTypeEnum.DATA_GENERATE
if operationType == OperationTypeEnum.IMAGE_GENERATE: if operationType == OperationTypeEnum.IMAGE_GENERATE:
maxPromptLength = 4000 maxPromptLength = 4000
@ -996,43 +995,16 @@ class StructureFiller:
) )
try: try:
# Use tryParseJson which handles extraction and basic parsing from modules.shared.jsonUtils import tryParseJson
from modules.shared.jsonUtils import tryParseJson, repairBrokenJson
# Check if response contains multiple JSON blocks (separated by --- or multiple ```json blocks)
# This can happen when AI returns multiple complete responses
if isinstance(aiResponseJson, str) and ("---" in aiResponseJson or aiResponseJson.count("```json") > 1): if isinstance(aiResponseJson, str) and ("---" in aiResponseJson or aiResponseJson.count("```json") > 1):
logger.info(f"Section {sectionId}: Detected multiple JSON blocks in response, attempting to merge") logger.info(f"Section {sectionId}: Detected multiple JSON blocks in response, attempting to merge")
generatedElements = self._extractAndMergeMultipleJsonBlocks(aiResponseJson, contentType, sectionId) generatedElements = self._extractAndMergeMultipleJsonBlocks(aiResponseJson, contentType, sectionId)
else: else:
parsedResponse, parseError, cleanedStr = tryParseJson(aiResponseJson) parsedResponse, parseError, _ = tryParseJson(aiResponseJson)
if parseError is not None:
# If parsing failed, try repair
if parseError and isinstance(aiResponseJson, str):
logger.warning(f"Initial JSON parse failed for section {sectionId}, attempting repair: {str(parseError)}")
repairedJson = repairBrokenJson(aiResponseJson)
if repairedJson:
parsedResponse = repairedJson
parseError = None
logger.info(f"Successfully repaired JSON for section {sectionId}")
if parseError:
raise parseError raise parseError
generatedElements = _elements_from_section_content_ai_json(parsedResponse)
if isinstance(parsedResponse, list):
generatedElements = parsedResponse
elif isinstance(parsedResponse, dict):
if "elements" in parsedResponse:
generatedElements = parsedResponse["elements"]
elif "sections" in parsedResponse and len(parsedResponse["sections"]) > 0:
firstSection = parsedResponse["sections"][0]
generatedElements = firstSection.get("elements", [])
elif parsedResponse.get("type"):
generatedElements = [parsedResponse]
else:
generatedElements = []
else:
generatedElements = []
aiResponse = _AiResponseFallback(aiResponseJson) aiResponse = _AiResponseFallback(aiResponseJson)
except Exception as parseError: except Exception as parseError:
@ -1112,7 +1084,7 @@ class StructureFiller:
self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation") self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation")
operationType = OperationTypeEnum.IMAGE_GENERATE if contentType == "image" else OperationTypeEnum.DATA_ANALYSE operationType = OperationTypeEnum.IMAGE_GENERATE if contentType == "image" else OperationTypeEnum.DATA_GENERATE
if operationType == OperationTypeEnum.IMAGE_GENERATE: if operationType == OperationTypeEnum.IMAGE_GENERATE:
maxPromptLength = 4000 maxPromptLength = 4000
@ -1135,6 +1107,7 @@ class StructureFiller:
processingMode=ProcessingModeEnum.DETAILED processingMode=ProcessingModeEnum.DETAILED
) )
) )
checkWorkflowStopped(self.services)
aiResponse = await self.aiService.callAi(request) aiResponse = await self.aiService.callAi(request)
generatedElements = [] generatedElements = []
@ -1179,22 +1152,16 @@ class StructureFiller:
) )
try: try:
parsedResponse = json.loads(self.services.utils.jsonExtractString(aiResponseJson)) from modules.shared.jsonUtils import tryParseJson
if isinstance(parsedResponse, list):
generatedElements = parsedResponse
elif isinstance(parsedResponse, dict):
if "elements" in parsedResponse:
generatedElements = parsedResponse["elements"]
elif "sections" in parsedResponse and len(parsedResponse["sections"]) > 0:
firstSection = parsedResponse["sections"][0]
generatedElements = firstSection.get("elements", [])
elif parsedResponse.get("type"):
generatedElements = [parsedResponse]
else:
generatedElements = []
else:
generatedElements = []
parsedResponse, parseError, _ = tryParseJson(aiResponseJson)
if parseError is not None:
logger.error(
f"Error parsing response from _callAiWithLooping for section {sectionId}: {parseError}"
)
generatedElements = []
else:
generatedElements = _elements_from_section_content_ai_json(parsedResponse)
aiResponse = _AiResponseFallback(aiResponseJson) aiResponse = _AiResponseFallback(aiResponseJson)
except Exception as parseError: except Exception as parseError:
logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}") logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}")
@ -1371,7 +1338,7 @@ class StructureFiller:
self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation") self.services.chat.progressLogUpdate(sectionOperationId, 0.4, "Calling AI for content generation")
operationType = OperationTypeEnum.IMAGE_GENERATE if contentType == "image" else OperationTypeEnum.DATA_ANALYSE operationType = OperationTypeEnum.IMAGE_GENERATE if contentType == "image" else OperationTypeEnum.DATA_GENERATE
if operationType == OperationTypeEnum.IMAGE_GENERATE: if operationType == OperationTypeEnum.IMAGE_GENERATE:
maxPromptLength = 4000 maxPromptLength = 4000
@ -1439,22 +1406,16 @@ class StructureFiller:
) )
try: try:
parsedResponse = json.loads(self.services.utils.jsonExtractString(aiResponseJson)) from modules.shared.jsonUtils import tryParseJson
if isinstance(parsedResponse, list):
generatedElements = parsedResponse
elif isinstance(parsedResponse, dict):
if "elements" in parsedResponse:
generatedElements = parsedResponse["elements"]
elif "sections" in parsedResponse and len(parsedResponse["sections"]) > 0:
firstSection = parsedResponse["sections"][0]
generatedElements = firstSection.get("elements", [])
elif parsedResponse.get("type"):
generatedElements = [parsedResponse]
else:
generatedElements = []
else:
generatedElements = []
parsedResponse, parseError, _ = tryParseJson(aiResponseJson)
if parseError is not None:
logger.error(
f"Error parsing response from _callAiWithLooping for section {sectionId}: {parseError}"
)
generatedElements = []
else:
generatedElements = _elements_from_section_content_ai_json(parsedResponse)
aiResponse = _AiResponseFallback(aiResponseJson) aiResponse = _AiResponseFallback(aiResponseJson)
except Exception as parseError: except Exception as parseError:
logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}") logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}")

View file

@ -90,8 +90,7 @@ class StructureGenerator:
) )
try: try:
# Baue Chapter-Struktur-Prompt mit Content-Index structurePrompt, templateStructure = self._buildChapterStructurePrompt(
structurePrompt = self._buildChapterStructurePrompt(
userPrompt=userPrompt, userPrompt=userPrompt,
contentParts=contentParts, contentParts=contentParts,
outputFormat=outputFormat outputFormat=outputFormat
@ -108,12 +107,6 @@ class StructureGenerator:
resultFormat="json" resultFormat="json"
) )
structurePrompt, templateStructure = self._buildChapterStructurePrompt(
userPrompt=userPrompt,
contentParts=contentParts,
outputFormat=outputFormat
)
# Create prompt builder for continuation support # Create prompt builder for continuation support
async def buildChapterStructurePromptWithContinuation( async def buildChapterStructurePromptWithContinuation(
continuationContext: Any, continuationContext: Any,
@ -196,6 +189,13 @@ CRITICAL:
contentParts=None # Do not pass ContentParts - only metadata needed, not content extraction contentParts=None # Do not pass ContentParts - only metadata needed, not content extraction
) )
if not isinstance(aiResponseJson, str) or not aiResponseJson.strip():
raise ValueError(
"Structure generation returned no JSON text from the model (empty response after retries). "
"Check the AI provider, allowed models, billing, and debug artifact "
"'chapter_structure_generation_response'."
)
# Parse the complete JSON response (looping system already handles completion) # Parse the complete JSON response (looping system already handles completion)
extractedJson = self.services.utils.jsonExtractString(aiResponseJson) extractedJson = self.services.utils.jsonExtractString(aiResponseJson)
parsedJson, parseError, cleanedJson = self.services.utils.jsonTryParse(extractedJson) parsedJson, parseError, cleanedJson = self.services.utils.jsonTryParse(extractedJson)
@ -215,7 +215,12 @@ CRITICAL:
raise ValueError(f"Failed to parse JSON structure after repair: {str(parseError)}") raise ValueError(f"Failed to parse JSON structure after repair: {str(parseError)}")
else: else:
logger.error(f"Failed to repair JSON. Parse error: {str(parseError)}") logger.error(f"Failed to repair JSON. Parse error: {str(parseError)}")
logger.error(f"Cleaned JSON preview (first 500 chars): {cleanedJson[:500]}") raw_preview = (extractedJson or "")[:500]
logger.error(
"Raw extract preview (first 500 chars): %r",
raw_preview,
)
logger.error(f"Cleaned JSON preview (first 500 chars): {cleanedJson[:500]!r}")
raise ValueError(f"Failed to parse JSON structure: {str(parseError)}") raise ValueError(f"Failed to parse JSON structure: {str(parseError)}")
else: else:
structure = parsedJson structure = parsedJson

View file

@ -23,7 +23,11 @@ class ChatService:
from modules.interfaces.interfaceDbManagement import getInterface as getComponentInterface from modules.interfaces.interfaceDbManagement import getInterface as getComponentInterface
from modules.interfaces.interfaceDbChat import getInterface as getChatInterface from modules.interfaces.interfaceDbChat import getInterface as getChatInterface
self.interfaceDbApp = getAppInterface(context.user, mandateId=context.mandate_id) self.interfaceDbApp = getAppInterface(context.user, mandateId=context.mandate_id)
self.interfaceDbComponent = getComponentInterface(context.user, mandateId=context.mandate_id) self.interfaceDbComponent = getComponentInterface(
context.user,
mandateId=context.mandate_id,
featureInstanceId=context.feature_instance_id,
)
self.interfaceDbChat = getChatInterface( self.interfaceDbChat = getChatInterface(
context.user, context.user,
mandateId=context.mandate_id, mandateId=context.mandate_id,
@ -36,6 +40,26 @@ class ChatService:
"""Workflow from context (stable during workflow execution).""" """Workflow from context (stable during workflow execution)."""
return self._context.workflow return self._context.workflow
def _chat_document_from_management_file(self, file_id: str) -> Optional[ChatDocument]:
"""Build a ChatDocument when docItem references a management FileItem (e.g. automation uploads) without a chat message."""
try:
fi = self.interfaceDbComponent.getFile(file_id)
except Exception as e:
logger.debug("getFile(%s) failed: %s", file_id, e)
return None
if fi is None:
return None
wf = self._workflow
wf_id = wf.id if wf else "no-workflow"
return ChatDocument(
id=file_id,
messageId=f"_filestore:{wf_id}",
fileId=fi.id,
fileName=fi.fileName or "document",
fileSize=int(fi.fileSize or 0),
mimeType=fi.mimeType or "application/octet-stream",
)
def getChatDocumentsFromDocumentList(self, documentList) -> List[ChatDocument]: def getChatDocumentsFromDocumentList(self, documentList) -> List[ChatDocument]:
"""Get ChatDocuments from a DocumentReferenceList. """Get ChatDocuments from a DocumentReferenceList.
@ -126,14 +150,28 @@ class ChatService:
if message.documents: if message.documents:
for doc in message.documents: for doc in message.documents:
if doc.id == docId: if doc.id == docId or getattr(doc, "fileId", None) == docId:
allDocuments.append(doc) allDocuments.append(doc)
docFound = True docFound = True
logger.debug(f"Matched document reference '{docRef}' to document {doc.id} (fileName: {getattr(doc, 'fileName', 'unknown')}) by documentId") logger.debug(
f"Matched document reference '{docRef}' to document {doc.id} "
f"(fileName: {getattr(doc, 'fileName', 'unknown')}) by id/fileId"
)
break break
if docFound: if docFound:
break break
if not docFound:
synth = self._chat_document_from_management_file(docId)
if synth is not None:
allDocuments.append(synth)
docFound = True
logger.info(
"Resolved document reference %r via FileItem %s (automation / transient workflow)",
docRef,
docId,
)
# Fallback: If not found by documentId and it looks like a filename (has file extension), try filename matching # Fallback: If not found by documentId and it looks like a filename (has file extension), try filename matching
# This handles cases where AI incorrectly generates docItem:filename.docx # This handles cases where AI incorrectly generates docItem:filename.docx
if not docFound and '.' in docId and len(parts) == 2: if not docFound and '.' in docId and len(parts) == 2:
@ -485,34 +523,12 @@ class ChatService:
return results return results
def listGroups(self, contextKey: str = "files/list") -> list: def listGroups(self, contextKey: str = "files/list") -> list:
"""List all groups in the groupTree for the current context.""" """Stub — file group tree removed. Returns empty list."""
try: return []
existing = self.interfaceDbApp.getTableGrouping(contextKey)
if not existing:
return []
def _flatten(nodes, depth=0):
result = []
for n in nodes:
nd = n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n))
result.append({"id": nd.get("id"), "name": nd.get("name"), "depth": depth, "itemCount": len(nd.get("itemIds", []))})
result.extend(_flatten(nd.get("subGroups", []), depth + 1))
return result
return _flatten(existing.rootGroups)
except Exception as e:
return []
def listFilesInGroup(self, groupId: str, contextKey: str = "files/list") -> list: def listFilesInGroup(self, groupId: str, contextKey: str = "files/list") -> list:
"""List file IDs in a specific group (recursive).""" """Stub — file group tree removed. Returns empty list."""
try: return []
from modules.routes.routeHelpers import _collectItemIds
existing = self.interfaceDbApp.getTableGrouping(contextKey)
if not existing:
return []
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in existing.rootGroups]
ids = _collectItemIds(nodes, groupId)
return list(ids) if ids else []
except Exception:
return []
# ---- DataSource CRUD ---- # ---- DataSource CRUD ----

View file

@ -166,12 +166,28 @@ class ClickupService:
page: int = 0, page: int = 0,
include_closed: bool = False, include_closed: bool = False,
subtasks: bool = True, subtasks: bool = True,
dateCreatedGt: Optional[int] = None,
dateCreatedLt: Optional[int] = None,
dateUpdatedGt: Optional[int] = None,
dateUpdatedLt: Optional[int] = None,
customFields: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
params: Dict[str, Any] = { params: Dict[str, Any] = {
"page": page, "page": page,
"subtasks": str(subtasks).lower(), "subtasks": str(subtasks).lower(),
"include_closed": str(include_closed).lower(), "include_closed": str(include_closed).lower(),
} }
if dateCreatedGt is not None:
params["date_created_gt"] = dateCreatedGt
if dateCreatedLt is not None:
params["date_created_lt"] = dateCreatedLt
if dateUpdatedGt is not None:
params["date_updated_gt"] = dateUpdatedGt
if dateUpdatedLt is not None:
params["date_updated_lt"] = dateUpdatedLt
if customFields:
import json as _json
params["custom_fields"] = _json.dumps(customFields)
return await self._request("GET", f"/list/{list_id}/task", params=params) return await self._request("GET", f"/list/{list_id}/task", params=params)
async def getTask(self, task_id: str, *, include_subtasks: bool = True) -> Dict[str, Any]: async def getTask(self, task_id: str, *, include_subtasks: bool = True) -> Dict[str, Any]:

View file

@ -79,7 +79,15 @@ class RendererCodeCsv(BaseCodeRenderer):
return renderedDocs return renderedDocs
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
""" """
Render method for document generation compatibility. Render method for document generation compatibility.
Delegates to document renderer if needed, or handles code files directly. Delegates to document renderer if needed, or handles code files directly.
@ -94,7 +102,7 @@ class RendererCodeCsv(BaseCodeRenderer):
# Document generation path - delegate to document renderer # Document generation path - delegate to document renderer
from .rendererCsv import RendererCsv from .rendererCsv import RendererCsv
documentRenderer = RendererCsv(self.services) documentRenderer = RendererCsv(self.services)
return await documentRenderer.render(extractedContent, title, userPrompt, aiService) return await documentRenderer.render(extractedContent, title, userPrompt, aiService, style=style)
def _validateAndFixCsv(self, content: str) -> str: def _validateAndFixCsv(self, content: str) -> str:
"""Validate CSV structure and fix common issues.""" """Validate CSV structure and fix common issues."""

View file

@ -91,7 +91,15 @@ class RendererCodeJson(BaseCodeRenderer):
return renderedDocs return renderedDocs
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
""" """
Render method for document generation compatibility. Render method for document generation compatibility.
Delegates to document renderer if needed, or handles code files directly. Delegates to document renderer if needed, or handles code files directly.
@ -107,7 +115,7 @@ class RendererCodeJson(BaseCodeRenderer):
# Import here to avoid circular dependency # Import here to avoid circular dependency
from .rendererJson import RendererJson from .rendererJson import RendererJson
documentRenderer = RendererJson(self.services) documentRenderer = RendererJson(self.services)
return await documentRenderer.render(extractedContent, title, userPrompt, aiService) return await documentRenderer.render(extractedContent, title, userPrompt, aiService, style=style)
def _extractJsonStatistics(self, parsed: Any) -> Dict[str, Any]: def _extractJsonStatistics(self, parsed: Any) -> Dict[str, Any]:
"""Extract JSON statistics for validation (object count, array count, key count).""" """Extract JSON statistics for validation (object count, array count, key count)."""

View file

@ -78,11 +78,20 @@ class RendererCodeXml(BaseCodeRenderer):
return renderedDocs return renderedDocs
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
""" """
Render method for document generation compatibility. Render method for document generation compatibility.
For XML, we only support code generation (no document renderer exists yet). For XML, we only support code generation (no document renderer exists yet).
""" """
_ = style
# Check if this is code generation (has files array) # Check if this is code generation (has files array)
if "files" in extractedContent: if "files" in extractedContent:
# Code generation path - use renderCodeFiles # Code generation path - use renderCodeFiles

View file

@ -39,8 +39,27 @@ class RendererCsv(BaseRenderer):
""" """
return ["table", "code_block"] return ["table", "code_block"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: <<<<<<< HEAD
=======
<<<<<<< HEAD
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]:
=======
>>>>>>> 875f8252 (ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes)
async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
<<<<<<< HEAD
=======
>>>>>>> 0659d0d2 (ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes)
>>>>>>> 875f8252 (ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes)
"""Render extracted JSON content to CSV format. Produces one CSV file per table section.""" """Render extracted JSON content to CSV format. Produces one CSV file per table section."""
_ = style
try: try:
# Validate JSON structure # Validate JSON structure
if not self._validateJsonStructure(extractedContent): if not self._validateJsonStructure(extractedContent):

View file

@ -43,8 +43,17 @@ class RendererImage(BaseRenderer):
""" """
return ["image"] return ["image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to image format using AI image generation.""" """Render extracted JSON content to image format using AI image generation."""
_ = style
try: try:
# Generate AI image from content # Generate AI image from content
imageContent = await self._generateAiImage(extractedContent, title, userPrompt, aiService) imageContent = await self._generateAiImage(extractedContent, title, userPrompt, aiService)

View file

@ -42,8 +42,17 @@ class RendererJson(BaseRenderer):
# Return all types except image # Return all types except image
return [st for st in supportedSectionTypes if st != "image"] return [st for st in supportedSectionTypes if st != "image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to JSON format.""" """Render extracted JSON content to JSON format."""
_ = style
try: try:
# The extracted content should already be JSON from the AI # The extracted content should already be JSON from the AI
# Just validate and format it # Just validate and format it

View file

@ -40,8 +40,17 @@ class RendererMarkdown(BaseRenderer):
from modules.datamodels.datamodelJson import supportedSectionTypes from modules.datamodels.datamodelJson import supportedSectionTypes
return [st for st in supportedSectionTypes if st != "image"] return [st for st in supportedSectionTypes if st != "image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to Markdown format.""" """Render extracted JSON content to Markdown format."""
_ = style
try: try:
# Generate markdown from JSON structure # Generate markdown from JSON structure
markdownContent = self._generateMarkdownFromJson(extractedContent, title) markdownContent = self._generateMarkdownFromJson(extractedContent, title)

View file

@ -8,7 +8,7 @@ import re
from .documentRendererBaseTemplate import BaseRenderer from .documentRendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument from modules.datamodels.datamodelDocument import RenderedDocument
from typing import Dict, Any, List, Optional from typing import Dict, Any, List, Optional, Union
class RendererText(BaseRenderer): class RendererText(BaseRenderer):
"""Renders content to plain text format with format-specific extraction.""" """Renders content to plain text format with format-specific extraction."""
@ -76,8 +76,17 @@ class RendererText(BaseRenderer):
# Text renderer accepts all types except images # Text renderer accepts all types except images
return [st for st in supportedSectionTypes if st != "image"] return [st for st in supportedSectionTypes if st != "image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to plain text format.""" """Render extracted JSON content to plain text format."""
_ = style # unified style from renderReport; plain text ignores formatting hints
try: try:
# Generate text from JSON structure # Generate text from JSON structure
textContent = self._generateTextFromJson(extractedContent, title) textContent = self._generateTextFromJson(extractedContent, title)
@ -187,8 +196,10 @@ class RendererText(BaseRenderer):
textParts.append(f"[Reference: {label}]") textParts.append(f"[Reference: {label}]")
continue continue
elif element_type == "extracted_text": elif element_type == "extracted_text":
# Extracted text format # Extracted text format (str or raw bytes from ContentPart)
content = element.get("content", "") content = element.get("content", "")
if isinstance(content, (bytes, bytearray, memoryview)):
content = bytes(content).decode("utf-8", errors="replace")
source = element.get("source", "") source = element.get("source", "")
if content: if content:
source_text = f" (Source: {source})" if source else "" source_text = f" (Source: {source})" if source else ""
@ -263,16 +274,16 @@ class RendererText(BaseRenderer):
textParts = [] textParts = []
# Create table header # Create table header
headerLine = " | ".join(str(header) for header in headers) headerLine = " | ".join(self._tableCellToPlainText(h) for h in headers)
textParts.append(headerLine) textParts.append(headerLine)
# Add separator line # Add separator line
separatorLine = " | ".join("-" * len(str(header)) for header in headers) separatorLine = " | ".join("-" * len(self._tableCellToPlainText(h)) for h in headers)
textParts.append(separatorLine) textParts.append(separatorLine)
# Add data rows # Add data rows
for row in rows: for row in rows:
rowLine = " | ".join(str(cellData) for cellData in row) rowLine = " | ".join(self._tableCellToPlainText(cellData) for cellData in row)
textParts.append(rowLine) textParts.append(rowLine)
return '\n'.join(textParts) return '\n'.join(textParts)
@ -299,6 +310,9 @@ class RendererText(BaseRenderer):
textParts.append(f"- {self._stripMarkdownForPlainText(item)}") textParts.append(f"- {self._stripMarkdownForPlainText(item)}")
elif isinstance(item, dict) and "text" in item: elif isinstance(item, dict) and "text" in item:
textParts.append(f"- {self._stripMarkdownForPlainText(item['text'])}") textParts.append(f"- {self._stripMarkdownForPlainText(item['text'])}")
elif isinstance(item, list):
# markdownToDocumentJson: each item is List[InlineRun]
textParts.append(f"- {self._inlineRunsToPlainText(item)}")
return '\n'.join(textParts) return '\n'.join(textParts)
@ -311,21 +325,26 @@ class RendererText(BaseRenderer):
try: try:
# Extract from nested content structure: element.content.{text, level} # Extract from nested content structure: element.content.{text, level}
content = headingData.get("content", {}) content = headingData.get("content", {})
if not isinstance(content, dict): if isinstance(content, dict) and content:
text = self._stripMarkdownForPlainText(content.get("text", ""))
level = content.get("level", 1)
else:
# AI shorthand: {"type":"heading","text":"...","level":2}
text = self._stripMarkdownForPlainText(str(headingData.get("text", "") or ""))
level = headingData.get("level", 1)
if not text:
return "" return ""
text = self._stripMarkdownForPlainText(content.get("text", ""))
level = content.get("level", 1)
if text: try:
level = max(1, min(6, level)) level_i = int(level) if level is not None else 1
if level == 1: except (TypeError, ValueError):
return f"{text}\n{'=' * len(text)}" level_i = 1
elif level == 2: level_i = max(1, min(6, level_i))
return f"{text}\n{'-' * len(text)}" if level_i == 1:
else: return f"{text}\n{'=' * len(text)}"
return f"{'#' * level} {text}" if level_i == 2:
return f"{text}\n{'-' * len(text)}"
return "" return f"{'#' * level_i} {text}"
except Exception as e: except Exception as e:
self.logger.warning(f"Error rendering heading: {str(e)}") self.logger.warning(f"Error rendering heading: {str(e)}")
@ -345,12 +364,65 @@ class RendererText(BaseRenderer):
text = re.sub(r'`([^`]+)`', r'\1', text) text = re.sub(r'`([^`]+)`', r'\1', text)
return text.strip() return text.strip()
def _inlineRunsToPlainText(self, runs: Union[List[Any], Any]) -> str:
"""Flatten InlineRun dicts (from markdownToDocumentJson) to a single string."""
if runs is None:
return ""
if isinstance(runs, dict):
runs = [runs]
if not isinstance(runs, list):
return self._stripMarkdownForPlainText(str(runs))
parts: List[str] = []
for run in runs:
if not isinstance(run, dict):
parts.append(str(run))
continue
t = run.get("type") or "text"
val = run.get("value", "")
if t == "text":
parts.append(str(val))
elif t in ("bold", "italic", "code"):
parts.append(str(val))
elif t == "link":
parts.append(str(val))
elif t == "image":
parts.append(f"[{val}]")
else:
parts.append(str(val))
return "".join(parts)
def _tableCellToPlainText(self, cell: Any) -> str:
"""Table header/cell: plain str, legacy dict, or List[InlineRun]."""
if cell is None:
return ""
if isinstance(cell, str):
return self._stripMarkdownForPlainText(cell)
if isinstance(cell, list):
return self._inlineRunsToPlainText(cell)
if isinstance(cell, dict) and "text" in cell:
return self._stripMarkdownForPlainText(str(cell["text"]))
return self._stripMarkdownForPlainText(str(cell))
def _renderJsonParagraph(self, paragraphData: Dict[str, Any]) -> str: def _renderJsonParagraph(self, paragraphData: Dict[str, Any]) -> str:
"""Render a JSON paragraph to text. Strips markdown for plain text output.""" """Render a JSON paragraph to text. Strips markdown for plain text output."""
try: try:
# Extract from nested content structure # Models often return {"type":"paragraph","text":"..."} without nested "content"
content = paragraphData.get("content", {}) top = paragraphData.get("text")
raw_content = paragraphData.get("content", {})
if isinstance(top, str) and top.strip():
if raw_content is None or raw_content == {}:
return self._stripMarkdownForPlainText(top)
if isinstance(raw_content, dict):
if not (raw_content.get("text") or raw_content.get("inlineRuns")):
return self._stripMarkdownForPlainText(top)
content = raw_content
if content is None:
content = {}
if isinstance(content, dict): if isinstance(content, dict):
runs = self._inlineRunsFromContent(content)
if runs:
return self._stripMarkdownForPlainText(self._inlineRunsToPlainText(runs))
text = content.get("text", "") text = content.get("text", "")
elif isinstance(content, str): elif isinstance(content, str):
text = content text = content

View file

@ -2172,11 +2172,13 @@ def getContexts(
>>> print(contexts.overlapContext) # "" (empty - JSON is complete) >>> print(contexts.overlapContext) # "" (empty - JSON is complete)
>>> print(contexts.jsonParsingSuccess) # True >>> print(contexts.jsonParsingSuccess) # True
""" """
# First, check if original JSON is already complete (parseable without modification) # Completeness must use the same pipeline as callers (fences, balanced extract, normalization).
from modules.shared.jsonUtils import tryParseJson as _utils_try_parse_json
jsonIsComplete = False jsonIsComplete = False
if truncatedJson and truncatedJson.strip(): if truncatedJson and truncatedJson.strip():
parsed, error = _tryParseJson(truncatedJson.strip()) _parsed_hdr, error_hdr, _ = _utils_try_parse_json(truncatedJson)
if error is None: if error_hdr is None:
jsonIsComplete = True jsonIsComplete = True
logger.debug("Original JSON is already complete (no cut point)") logger.debug("Original JSON is already complete (no cut point)")
@ -2193,28 +2195,27 @@ def getContexts(
jsonParsingSuccess = False jsonParsingSuccess = False
if completePart and completePart.strip(): if completePart and completePart.strip():
# First attempt: parse as-is parsed, error, _ = _utils_try_parse_json(completePart)
parsed, error = _tryParseJson(completePart)
if error is None: if error is None:
jsonParsingSuccess = True jsonParsingSuccess = True
else: else:
# Second attempt: repair internal errors and retry logger.debug(f"Initial parse failed: {error}, attempting internal repair")
logger.debug(f"Initial parse failed: {error}, attempting repair")
repairedCompletePart = _repairInternalJsonErrors(completePart) repairedCompletePart = _repairInternalJsonErrors(completePart)
parsed, error, _ = _utils_try_parse_json(repairedCompletePart)
parsed, error = _tryParseJson(repairedCompletePart)
if error is None: if error is None:
# Repair succeeded - use repaired version
completePart = repairedCompletePart completePart = repairedCompletePart
jsonParsingSuccess = True jsonParsingSuccess = True
logger.debug("JSON repair successful") logger.debug("JSON repair successful")
else: else:
# Repair also failed - keep original completePart, mark as failed
logger.debug(f"JSON repair also failed: {error}") logger.debug(f"JSON repair also failed: {error}")
jsonParsingSuccess = False jsonParsingSuccess = False
# If completePart parses successfully, the merged/candidate JSON is structurally complete
# after repair/closing — overlap from extractContinuationContexts on the *raw* candidate
# would falsely signal truncation and trap callAiWithLooping in continuation iterations.
if jsonParsingSuccess:
overlap = ""
return JsonContinuationContexts( return JsonContinuationContexts(
overlapContext=overlap, overlapContext=overlap,
hierarchyContext=hierarchy, hierarchyContext=hierarchy,

View file

@ -393,9 +393,10 @@ async def executeGraph(
ordered_ids = [n.get("id") for n in ordered if n.get("id")] ordered_ids = [n.get("id") for n in ordered if n.get("id")]
logger.info("executeGraph topoSort order: %s", ordered_ids) logger.info("executeGraph topoSort order: %s", ordered_ids)
nodeOutputs: Dict[str, Any] = dict(initialNodeOutputs or {}) # Normalize resumed human-node output BEFORE copying into nodeOutputs — otherwise
# normalizeToSchema only updates initialNodeOutputs and loop/refs still see raw
# e.g. input.upload {files} without coerced DocumentList.documents.
is_resume = startAfterNodeId is not None is_resume = startAfterNodeId is not None
if is_resume and initialNodeOutputs and startAfterNodeId: if is_resume and initialNodeOutputs and startAfterNodeId:
resumedNode = next((n for n in nodes if n.get("id") == startAfterNodeId), None) resumedNode = next((n for n in nodes if n.get("id") == startAfterNodeId), None)
if resumedNode: if resumedNode:
@ -408,6 +409,8 @@ async def executeGraph(
initialNodeOutputs[startAfterNodeId] = normalizeToSchema(resumedOutput, schema) initialNodeOutputs[startAfterNodeId] = normalizeToSchema(resumedOutput, schema)
except Exception as valErr: except Exception as valErr:
logger.warning("executeGraph resume: schema validation failed for %s: %s", startAfterNodeId, valErr) logger.warning("executeGraph resume: schema validation failed for %s: %s", startAfterNodeId, valErr)
nodeOutputs: Dict[str, Any] = dict(initialNodeOutputs or {})
if not runId and automation2_interface and workflowId and not is_resume: if not runId and automation2_interface and workflowId and not is_resume:
run_context = { run_context = {
"connectionMap": connectionMap, "connectionMap": connectionMap,

View file

@ -1,7 +1,8 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions. # Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions.
# #
# Typed Port System: explicit DataRefs / static parameters only (no runtime wire-handover). # Typed Port System: explicit DataRefs / static parameters; optional ``documentList`` from input port 0
# when the param is empty (same idea as IOExecutor wire fill).
# ``materializeConnectionRefs`` (see pickNotPushMigration) may still rewrite empty connectionReference at run start. # ``materializeConnectionRefs`` (see pickNotPushMigration) may still rewrite empty connectionReference at run start.
import json import json
@ -18,6 +19,25 @@ from modules.serviceCenter.services.serviceBilling.mainServiceBilling import Bil
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _coerce_document_data_to_bytes(raw: Any) -> Optional[bytes]:
"""Normalize documentData (bytes/str/buffer) for DB file persistence."""
if raw is None:
return None
if isinstance(raw, bytes):
return raw if len(raw) > 0 else None
if isinstance(raw, bytearray):
b = bytes(raw)
return b if len(b) > 0 else None
if isinstance(raw, memoryview):
b = raw.tobytes()
return b if len(b) > 0 else None
if isinstance(raw, str):
b = raw.encode("utf-8")
return b if len(b) > 0 else None
return None
_USER_CONNECTION_ID_RE = re.compile( _USER_CONNECTION_ID_RE = re.compile(
r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$",
re.IGNORECASE, re.IGNORECASE,
@ -219,6 +239,78 @@ def _getOutputSchemaName(nodeDef: Dict) -> str:
return port0.get("schema", "ActionResult") return port0.get("schema", "ActionResult")
def _extract_wired_document_list(inp: Any) -> Optional[Dict[str, Any]]:
"""
Build a DocumentList-shaped dict from upstream node output (matches IOExecutor wire behavior).
Handles DocumentList, human upload shapes (file / files / fileIds), FileList, loop file items.
During flow.loop body execution the loop node's output is
{items, count, currentItem, currentIndex}; wired document actions must use currentItem.
"""
if inp is None:
return None
from modules.features.graphicalEditor.portTypes import (
unwrapTransit,
_coerce_document_list_upload_fields,
_file_record_to_document,
)
data = unwrapTransit(inp)
if isinstance(data, str):
one = _file_record_to_document(data)
return {"documents": [one], "count": 1} if one else None
if not isinstance(data, dict):
return None
d = dict(data)
_coerce_document_list_upload_fields(d)
# Per-iteration payload from executionEngine (flow.loop → downstream in loop body)
if "currentItem" in d:
ci = d.get("currentItem")
if ci is not None:
nested = _extract_wired_document_list(ci)
if nested:
return nested
docs = d.get("documents")
if isinstance(docs, list) and len(docs) > 0:
return {"documents": docs, "count": d.get("count", len(docs))}
raw_list = d.get("documentList")
if isinstance(raw_list, list) and len(raw_list) > 0 and isinstance(raw_list[0], dict):
return {"documents": raw_list, "count": len(raw_list)}
doc_id = d.get("documentId") or d.get("id")
if doc_id and str(doc_id).strip():
one: Dict[str, Any] = {"id": str(doc_id).strip()}
fn = d.get("fileName") or d.get("name")
if fn:
one["name"] = str(fn)
mt = d.get("mimeType")
if mt:
one["mimeType"] = str(mt)
return {"documents": [one], "count": 1}
files = d.get("files")
if isinstance(files, list) and files:
collected = []
for item in files:
conv = _file_record_to_document(item) if isinstance(item, dict) else None
if conv:
collected.append(conv)
if collected:
return {"documents": collected, "count": len(collected)}
return None
def _document_list_param_is_empty(val: Any) -> bool:
if val is None or val == "":
return True
if isinstance(val, list) and len(val) == 0:
return True
if isinstance(val, dict):
if val.get("documents") or val.get("references") or val.get("items"):
return False
if val.get("documentId") or val.get("id"):
return False
return True
return False
class ActionNodeExecutor: class ActionNodeExecutor:
"""Execute action nodes by mapping to method actions via ActionExecutor.""" """Execute action nodes by mapping to method actions via ActionExecutor."""
@ -260,6 +352,17 @@ class ActionNodeExecutor:
if pName and pName not in resolvedParams and "default" in pDef: if pName and pName not in resolvedParams and "default" in pDef:
resolvedParams[pName] = pDef["default"] resolvedParams[pName] = pDef["default"]
_param_names = {p.get("name") for p in nodeDef.get("parameters", []) if p.get("name")}
if "documentList" in _param_names and _document_list_param_is_empty(resolvedParams.get("documentList")):
_src_map = (context.get("inputSources") or {}).get(nodeId) or {}
_entry = _src_map.get(0)
if _entry:
_src_node_id, _ = _entry
_upstream = (context.get("nodeOutputs") or {}).get(_src_node_id)
_wired = _extract_wired_document_list(_upstream)
if _wired:
resolvedParams["documentList"] = _wired
# 3. Resolve connectionReference # 3. Resolve connectionReference
chatService = getattr(self.services, "chat", None) chatService = getattr(self.services, "chat", None)
_resolveConnectionParam(resolvedParams, chatService, self.services) _resolveConnectionParam(resolvedParams, chatService, self.services)
@ -323,18 +426,33 @@ class ActionNodeExecutor:
for d in (result.documents or []): for d in (result.documents or []):
dumped = d.model_dump() if hasattr(d, "model_dump") else dict(d) if isinstance(d, dict) else d dumped = d.model_dump() if hasattr(d, "model_dump") else dict(d) if isinstance(d, dict) else d
rawData = getattr(d, "documentData", None) if hasattr(d, "documentData") else (dumped.get("documentData") if isinstance(dumped, dict) else None) rawData = getattr(d, "documentData", None) if hasattr(d, "documentData") else (dumped.get("documentData") if isinstance(dumped, dict) else None)
if isinstance(dumped, dict) and isinstance(rawData, bytes) and len(rawData) > 0: rawBytes = _coerce_document_data_to_bytes(rawData)
if isinstance(dumped, dict) and rawBytes:
try: try:
from modules.interfaces.interfaceDbManagement import getInterface as _getMgmtInterface from modules.interfaces.interfaceDbManagement import getInterface as _getMgmtInterface
from modules.interfaces.interfaceDbApp import getInterface as _getAppInterface
from modules.security.rootAccess import getRootUser from modules.security.rootAccess import getRootUser
_userId = context.get("userId") _userId = context.get("userId")
_mandateId = context.get("mandateId") _mandateId = context.get("mandateId")
_instanceId = context.get("instanceId") _instanceId = context.get("instanceId")
_mgmt = _getMgmtInterface(getRootUser(), mandateId=_mandateId, featureInstanceId=_instanceId) _owner = None
if _userId:
try:
_umap = _getAppInterface(getRootUser()).getUsersByIds([str(_userId)])
_owner = _umap.get(str(_userId))
except Exception as _ue:
logger.warning("Could not resolve workflow user for file persistence: %s", _ue)
if _owner is None:
_owner = getRootUser()
logger.debug(
"Persisting workflow document as root user (no resolved owner userId=%r)",
_userId,
)
_mgmt = _getMgmtInterface(_owner, mandateId=_mandateId, featureInstanceId=_instanceId)
_docName = dumped.get("documentName") or f"workflow-result-{nodeId}.bin" _docName = dumped.get("documentName") or f"workflow-result-{nodeId}.bin"
_mimeType = dumped.get("mimeType") or "application/octet-stream" _mimeType = dumped.get("mimeType") or "application/octet-stream"
_fileItem = _mgmt.createFile(_docName, _mimeType, rawData) _fileItem = _mgmt.createFile(_docName, _mimeType, rawBytes)
_mgmt.createFileData(_fileItem.id, rawData) _mgmt.createFileData(_fileItem.id, rawBytes)
dumped["fileId"] = _fileItem.id dumped["fileId"] = _fileItem.id
dumped["id"] = _fileItem.id dumped["id"] = _fileItem.id
dumped["fileName"] = _fileItem.fileName dumped["fileName"] = _fileItem.fileName
@ -345,6 +463,20 @@ class ActionNodeExecutor:
dumped["_hasBinaryData"] = True dumped["_hasBinaryData"] = True
docsList.append(dumped) docsList.append(dumped)
# Clean DocumentList shape for document nodes (match file.create: documents + count, no AiResult fields)
if outputSchema == "DocumentList" and nodeType in ("ai.generateDocument", "ai.convertDocument"):
if not result.success:
return _normalizeError(
RuntimeError(str(result.error or "document action failed")),
outputSchema,
)
list_out: Dict[str, Any] = {
"documents": docsList,
"count": len(docsList),
}
_attachConnectionProvenance(list_out, resolvedParams, outputSchema, chatService, self.services)
return normalizeToSchema(list_out, outputSchema)
extractedContext = "" extractedContext = ""
if result.documents: if result.documents:
doc = result.documents[0] doc = result.documents[0]
@ -377,7 +509,11 @@ class ActionNodeExecutor:
if nodeType.startswith("ai."): if nodeType.startswith("ai."):
out["prompt"] = promptText out["prompt"] = promptText
out["response"] = extractedContext out["response"] = extractedContext
out["context"] = f"{promptText}\n\n{extractedContext}" if promptText and extractedContext else (extractedContext or promptText) inputContext = resolvedParams.get("context")
if inputContext is not None:
out["context"] = inputContext if isinstance(inputContext, str) else json.dumps(inputContext, ensure_ascii=False, default=str)
else:
out["context"] = ""
# Structured output # Structured output
if extractedContext: if extractedContext:
try: try:

View file

@ -45,10 +45,12 @@ class IOExecutor:
if 0 in inputSources: if 0 in inputSources:
srcId, _ = inputSources[0] srcId, _ = inputSources[0]
inp = nodeOutputs.get(srcId) inp = nodeOutputs.get(srcId)
from modules.workflows.automation2.executors.actionNodeExecutor import _getDocumentsFromUpstream from modules.workflows.automation2.executors.actionNodeExecutor import _extract_wired_document_list
docs = _getDocumentsFromUpstream(inp) if isinstance(inp, dict) else []
wired = _extract_wired_document_list(inp)
docs = (wired or {}).get("documents") if isinstance(wired, dict) else None
if docs: if docs:
resolvedParams.setdefault("documentList", docs) resolvedParams.setdefault("documentList", wired)
elif inp is not None: elif inp is not None:
resolvedParams.setdefault("input", inp) resolvedParams.setdefault("input", inp)

View file

@ -7,6 +7,50 @@ from typing import Dict, List, Any, Tuple, Set, Optional
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _ai_result_text_from_documents(d: Dict[str, Any]) -> Optional[str]:
"""Extract plain-text body from AiResult-style ``documents[0].documentData``."""
docs = d.get("documents")
if not isinstance(docs, list) or not docs:
return None
d0 = docs[0]
raw: Any = None
if isinstance(d0, dict):
raw = d0.get("documentData")
elif d0 is not None:
raw = getattr(d0, "documentData", None)
if raw is None:
return None
if isinstance(raw, bytes):
try:
t = raw.decode("utf-8").strip()
return t or None
except (UnicodeDecodeError, ValueError):
return None
if isinstance(raw, str):
s = raw.strip()
return s or None
return None
def _ref_coalesce_empty_ai_result_text(data: Any, path: List[Any], resolved: Any) -> Any:
"""If a ref targets AiResult text fields but resolves empty/missing, fall back to documents.
Needed when: optional ``responseData`` is absent (no synthetic ``{}``), ``response`` is
still empty but ``documents`` hold the model output, or legacy graphs bind responseData only.
"""
if resolved not in (None, ""):
return resolved
if not isinstance(data, dict) or not path:
return resolved
head = path[0]
if head not in ("response", "responseData", "context"):
return resolved
if head == "context" and len(path) != 1:
return resolved
fb = _ai_result_text_from_documents(data)
return fb if fb is not None else resolved
def parseGraph(graph: Dict[str, Any]) -> Tuple[List[Dict], List[Dict], Set[str]]: def parseGraph(graph: Dict[str, Any]) -> Tuple[List[Dict], List[Dict], Set[str]]:
""" """
Parse graph into nodes, connections, and node IDs. Parse graph into nodes, connections, and node IDs.
@ -356,14 +400,15 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
data = data.get("data", data) data = data.get("data", data)
plist = list(path) plist = list(path)
resolved = _get_by_path(data, plist) resolved = _get_by_path(data, plist)
if ( if resolved is None and isinstance(data, dict) and plist:
resolved is None if plist[0] == "payload" and len(plist) > 1:
and isinstance(data, dict) # Strip explicit "payload" prefix (legacy DataPicker paths)
and plist resolved = _get_by_path(data, plist[1:])
and plist[0] == "payload" elif "payload" in data and isinstance(data["payload"], dict):
and len(plist) > 1 # Form nodes store fields under {"payload": {fieldName: …}}.
): # DataPicker emits bare field paths like ["url"]; try under payload.
resolved = _get_by_path(data, plist[1:]) resolved = _get_by_path(data["payload"], plist)
resolved = _ref_coalesce_empty_ai_result_text(data, plist, resolved)
return resolveParameterReferences(resolved, nodeOutputs) return resolveParameterReferences(resolved, nodeOutputs)
return value return value
if value.get("type") == "value": if value.get("type") == "value":
@ -386,17 +431,34 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
if len(parts) < 2: if len(parts) < 2:
return json.dumps(data) if isinstance(data, (dict, list)) else str(data) return json.dumps(data) if isinstance(data, (dict, list)) else str(data)
rest = ".".join(parts[1:]) rest = ".".join(parts[1:])
if data is None:
def _walk(root, keys):
cur = root
for k in keys:
if isinstance(cur, dict) and k in cur:
cur = cur[k]
elif isinstance(cur, (list, tuple)) and k.isdigit():
cur = cur[int(k)]
else:
return None
return cur
keys = rest.split(".")
result = _walk(data, keys)
# Form nodes store fields under {"payload": {field: …}}.
# Fall back to looking under "payload" when the direct path misses.
if result is None and isinstance(data, dict) and "payload" in data:
result = _walk(data["payload"], keys)
if result is None:
return m.group(0) return m.group(0)
for k in rest.split("."): return str(result) if not isinstance(result, (dict, list)) else json.dumps(result, ensure_ascii=False)
if isinstance(data, dict) and k in data:
data = data[k]
elif isinstance(data, (list, tuple)) and k.isdigit():
data = data[int(k)]
else:
return m.group(0)
return str(data) if data is not None else m.group(0)
return re.sub(r"\{\{\s*([^}]+)\s*\}\}", repl, value) return re.sub(r"\{\{\s*([^}]+)\s*\}\}", repl, value)
if isinstance(value, list): if isinstance(value, list):
# contextBuilder: list where every item is a `{"type":"ref",...}` envelope.
# Resolve each ref and join the serialised parts into a single prompt string.
if value and all(isinstance(v, dict) and v.get("type") == "ref" for v in value):
from modules.workflows.methods.methodAi._common import serialize_context
parts = [serialize_context(resolveParameterReferences(v, nodeOutputs)) for v in value]
return "\n\n".join(p for p in parts if p)
return [resolveParameterReferences(v, nodeOutputs) for v in value] return [resolveParameterReferences(v, nodeOutputs) for v in value]
return value return value

View file

@ -3,6 +3,30 @@
"""Shared helpers for AI workflow actions.""" """Shared helpers for AI workflow actions."""
import json
from typing import Any
def serialize_context(val: Any) -> str:
"""Convert any context value to a readable string for use in AI prompts.
- None / empty string ""
- empty dict (no keys) "" (avoids literal "{}" in file.create / prompts)
- str as-is
- dict / list pretty-printed JSON
- anything else str()
"""
if val is None or val == "" or val == []:
return ""
if isinstance(val, dict) and len(val) == 0:
return ""
if isinstance(val, str):
return val.strip()
try:
return json.dumps(val, ensure_ascii=False, indent=2)
except Exception:
return str(val)
def applyCommonAiParams(parameters: dict, request) -> None: def applyCommonAiParams(parameters: dict, request) -> None:
"""Apply common AI parameters (requireNeutralization, allowedModels) from node to request.""" """Apply common AI parameters (requireNeutralization, allowedModels) from node to request."""

View file

@ -14,11 +14,13 @@ from modules.serviceCenter.services.serviceBilling.mainServiceBilling import Bil
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult: async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult:
prompt = parameters.get("prompt") from modules.workflows.methods.methodAi._common import serialize_context
if not prompt: base_prompt = (parameters.get("prompt") or "").strip()
context_val = serialize_context(parameters.get("context"))
prompt = f"Kontext:\n{context_val}\n\n{base_prompt}" if context_val else base_prompt
if not prompt.strip():
return ActionResult.isFailure(error="prompt is required") return ActionResult.isFailure(error="prompt is required")
documentList = parameters.get("documentList", [])
# Optional: if omitted, formats determined from prompt by AI # Optional: if omitted, formats determined from prompt by AI
resultType = parameters.get("resultType") resultType = parameters.get("resultType")
@ -31,19 +33,15 @@ async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult:
parentOperationId = parameters.get('parentOperationId') parentOperationId = parameters.get('parentOperationId')
try: try:
# Convert documentList to DocumentReferenceList if needed from modules.datamodels.datamodelDocref import coerceDocumentReferenceList
docRefList = None
if documentList:
from modules.datamodels.datamodelDocref import DocumentReferenceList
if isinstance(documentList, DocumentReferenceList): raw_dl = parameters.get("documentList")
docRefList = documentList if raw_dl is None or raw_dl == "":
elif isinstance(documentList, str): docRefList = None
docRefList = DocumentReferenceList.from_string_list([documentList]) else:
elif isinstance(documentList, list): docRefList = coerceDocumentReferenceList(raw_dl)
docRefList = DocumentReferenceList.from_string_list(documentList) if not docRefList.references:
else: docRefList = None
docRefList = DocumentReferenceList(references=[])
# Prepare title # Prepare title
title = "Generated Code" title = "Generated Code"

View file

@ -14,14 +14,18 @@ from modules.serviceCenter.services.serviceBilling.mainServiceBilling import Bil
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult: async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
prompt = parameters.get("prompt") from modules.workflows.methods.methodAi._common import serialize_context
if not prompt: base_prompt = (parameters.get("prompt") or "").strip()
context_val = serialize_context(parameters.get("context"))
prompt = f"Kontext:\n{context_val}\n\n{base_prompt}" if context_val else base_prompt
if not prompt.strip():
return ActionResult.isFailure(error="prompt is required") return ActionResult.isFailure(error="prompt is required")
documentList = parameters.get("documentList", [])
documentType = parameters.get("documentType") documentType = parameters.get("documentType")
# Optional: if omitted, formats determined from prompt by AI # Prefer explicit outputFormat (flow UI); resultType remains for legacy / API callers.
resultType = parameters.get("resultType") resultType = parameters.get("outputFormat") or parameters.get("resultType")
if isinstance(resultType, str):
resultType = resultType.strip().lstrip(".").lower() or None
if not resultType: if not resultType:
logger.debug("resultType not provided - formats will be determined from prompt by AI") logger.debug("resultType not provided - formats will be determined from prompt by AI")
@ -32,22 +36,23 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
parentOperationId = parameters.get('parentOperationId') parentOperationId = parameters.get('parentOperationId')
try: try:
# Convert documentList to DocumentReferenceList if needed # Convert documentList to DocumentReferenceList (handles dict {"documents": [...]}, list of ids, str, etc.)
docRefList = None from modules.datamodels.datamodelDocref import coerceDocumentReferenceList
if documentList:
from modules.datamodels.datamodelDocref import DocumentReferenceList
if isinstance(documentList, DocumentReferenceList): raw_dl = parameters.get("documentList")
docRefList = documentList if raw_dl is None or raw_dl == "":
elif isinstance(documentList, str): docRefList = None
docRefList = DocumentReferenceList.from_string_list([documentList]) else:
elif isinstance(documentList, list): docRefList = coerceDocumentReferenceList(raw_dl)
docRefList = DocumentReferenceList.from_string_list(documentList) if not docRefList.references:
else: docRefList = None
docRefList = DocumentReferenceList(references=[])
# Prepare title title_raw = parameters.get("title")
title = parameters.get("documentType") or "Generated Document" title = (title_raw.strip() if isinstance(title_raw, str) else "") or None
if not title and isinstance(documentType, str) and documentType.strip():
title = documentType.strip()
if not title:
title = "Generated Document"
# Call AI service for document generation # Call AI service for document generation
# callAiContent handles documentList internally via Phases 5A-5E # callAiContent handles documentList internally via Phases 5A-5E
@ -95,6 +100,8 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
"actionType": "ai.generateDocument", "actionType": "ai.generateDocument",
"documentType": documentType, "documentType": documentType,
"resultType": resultType, "resultType": resultType,
"outputFormat": resultType,
"title": title,
} }
)) ))
@ -116,14 +123,15 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
docName = sanitized docName = sanitized
# Determine mime type # Determine mime type
rt = resultTypeFallback
mimeType = "text/plain" mimeType = "text/plain"
if resultType == "html": if rt == "html":
mimeType = "text/html" mimeType = "text/html"
elif resultType == "json": elif rt == "json":
mimeType = "application/json" mimeType = "application/json"
elif resultType == "pdf": elif rt == "pdf":
mimeType = "application/pdf" mimeType = "application/pdf"
elif resultType == "md": elif rt == "md":
mimeType = "text/markdown" mimeType = "text/markdown"
documents.append(ActionDocument( documents.append(ActionDocument(
@ -134,6 +142,8 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
"actionType": "ai.generateDocument", "actionType": "ai.generateDocument",
"documentType": documentType, "documentType": documentType,
"resultType": resultType, "resultType": resultType,
"outputFormat": resultType,
"title": title,
} }
)) ))

View file

@ -75,8 +75,10 @@ def _action_docs_to_content_parts(services, docs: List[Any]) -> List[ContentPart
def _resolve_file_refs_to_content_parts(services, fileIdRefs) -> List[ContentPart]: def _resolve_file_refs_to_content_parts(services, fileIdRefs) -> List[ContentPart]:
"""Fetch files by ID from the file store and extract content. """Fetch files by ID from the file store and extract content.
Used for automation2 workflows where documents are file-store references, Used ONLY for automation2 workflows where documents are file-store
not chat message attachments.""" references, not chat message attachments. In the agent/chat context,
``DocumentItemReference`` holds ChatDocument IDs that must be resolved
via ``getChatDocumentsFromDocumentList`` instead."""
from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrategy from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrategy
mgmt = getattr(services, 'interfaceDbComponent', None) mgmt = getattr(services, 'interfaceDbComponent', None)
@ -171,16 +173,24 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
f"to DocumentReferenceList with {len(documentList.references)} references" f"to DocumentReferenceList with {len(documentList.references)} references"
) )
# Resolve DocumentItemReferences (file-ID refs from automation2) directly # DocumentItemReferences carry either file-store IDs (automation2)
# from the file store. These cannot be resolved via chat messages. # or ChatDocument IDs (agent context with docItem: refs).
# Route based on context: if a chat workflow with messages exists,
# let getChatDocumentsFromDocumentList handle them (it resolves
# docItem:uuid via workflow.messages). Otherwise fall through to
# the file-store path for automation2.
from modules.datamodels.datamodelDocref import DocumentItemReference from modules.datamodels.datamodelDocref import DocumentItemReference
fileIdRefs = [r for r in documentList.references if isinstance(r, DocumentItemReference)] fileIdRefs = [r for r in documentList.references if isinstance(r, DocumentItemReference)]
if fileIdRefs: if fileIdRefs:
extractedParts = _resolve_file_refs_to_content_parts(self.services, fileIdRefs) chatService = getattr(self.services, 'chat', None)
if extractedParts: workflow = getattr(chatService, '_workflow', None) if chatService else None
inline_content_parts = (inline_content_parts or []) + extractedParts hasChatContext = workflow and getattr(workflow, 'messages', None)
remaining = [r for r in documentList.references if not isinstance(r, DocumentItemReference)] if not hasChatContext:
documentList = DocumentReferenceList(references=remaining) extractedParts = _resolve_file_refs_to_content_parts(self.services, fileIdRefs)
if extractedParts:
inline_content_parts = (inline_content_parts or []) + extractedParts
remaining = [r for r in documentList.references if not isinstance(r, DocumentItemReference)]
documentList = DocumentReferenceList(references=remaining)
# Optional: if omitted, formats determined from prompt. Default "txt" is validation fallback only. # Optional: if omitted, formats determined from prompt. Default "txt" is validation fallback only.
resultType = parameters.get("resultType") resultType = parameters.get("resultType")
@ -210,17 +220,12 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
mimeMap = {"txt": "text/plain", "json": "application/json", "html": "text/html", "md": "text/markdown", "csv": "text/csv", "xml": "application/xml"} mimeMap = {"txt": "text/plain", "json": "application/json", "html": "text/html", "md": "text/markdown", "csv": "text/csv", "xml": "application/xml"}
output_mime_type = mimeMap.get(normalized_result_type, "text/plain") if normalized_result_type else "text/plain" output_mime_type = mimeMap.get(normalized_result_type, "text/plain") if normalized_result_type else "text/plain"
# Normalize context: workflow refs may resolve to dict/list instead of str # Normalize context: serialize any non-string value (dict/list/int/…) to text
paramContext = parameters.get("context") from modules.workflows.methods.methodAi._common import serialize_context
if paramContext is not None and not isinstance(paramContext, str): paramContext = serialize_context(parameters.get("context"))
try: parameters["context"] = paramContext
paramContext = json.dumps(paramContext, ensure_ascii=False, default=str) if paramContext:
parameters["context"] = paramContext logger.info(f"ai.process: context serialized ({len(paramContext)} chars)")
logger.info(f"ai.process: Serialized non-string context ({type(parameters.get('context')).__name__}) to JSON ({len(paramContext)} chars)")
except Exception as e:
logger.warning(f"ai.process: Failed to serialize context: {e}")
paramContext = str(paramContext)
parameters["context"] = paramContext
# Phase 7.3: Pass documentList and/or contentParts to AI service # Phase 7.3: Pass documentList and/or contentParts to AI service
contentParts: Optional[List[ContentPart]] = inline_content_parts contentParts: Optional[List[ContentPart]] = inline_content_parts
@ -247,7 +252,7 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
self.services.chat.progressLogUpdate(operationId, 0.6, "Calling AI (simple mode)") self.services.chat.progressLogUpdate(operationId, 0.6, "Calling AI (simple mode)")
context_parts = [] context_parts = []
paramContext = parameters.get("context") paramContext = parameters.get("context") # already serialized above
if paramContext and isinstance(paramContext, str) and paramContext.strip(): if paramContext and isinstance(paramContext, str) and paramContext.strip():
context_parts.append(paramContext.strip()) context_parts.append(paramContext.strip())
if documentList and len(documentList.references) > 0: if documentList and len(documentList.references) > 0:

View file

@ -13,10 +13,42 @@ from modules.serviceCenter.services.serviceBilling.mainServiceBilling import Bil
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _build_research_prompt(parameters: Dict[str, Any]) -> str:
"""Assemble the final research prompt from prompt + optional context/documentList."""
from modules.workflows.methods.methodAi._common import serialize_context
base_prompt = (parameters.get("prompt") or "").strip()
context_val = serialize_context(parameters.get("context"))
doc_list = parameters.get("documentList")
parts: list[str] = []
if context_val:
parts.append(f"Kontext:\n{context_val}")
# Extract text from documentList items if provided
if doc_list:
docs: list = []
if isinstance(doc_list, dict):
docs = doc_list.get("documents", []) or doc_list.get("items", [])
elif isinstance(doc_list, list):
docs = doc_list
doc_texts = []
for d in docs:
if isinstance(d, dict):
text = d.get("documentData") or d.get("text") or d.get("content") or ""
if text and isinstance(text, str):
doc_texts.append(text.strip())
if doc_texts:
parts.append("Dokumente:\n" + "\n---\n".join(doc_texts))
parts.append(base_prompt)
return "\n\n".join(p for p in parts if p)
async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult: async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
operationId = None operationId = None
try: try:
prompt = parameters.get("prompt") prompt = _build_research_prompt(parameters)
if not prompt: if not prompt:
return ActionResult.isFailure(error="Research prompt is required") return ActionResult.isFailure(error="Research prompt is required")

View file

@ -289,6 +289,30 @@ class MethodAi(MethodBase):
required=True, required=True,
description="Description of the document to generate" description="Description of the document to generate"
), ),
"outputFormat": WorkflowActionParameter(
name="outputFormat",
type="str",
frontendType=FrontendType.SELECT,
frontendOptions=["docx", "pdf", "txt", "html", "md"],
required=False,
default="docx",
description="Rendered output format (same choices as file.create). If omitted alongside resultType, the model may infer format from the prompt."
),
"title": WorkflowActionParameter(
name="title",
type="str",
frontendType=FrontendType.TEXT,
required=False,
description="Document title / metadata (optional); used as generation title and for file naming hints."
),
"context": WorkflowActionParameter(
name="context",
type="Any",
frontendType=FrontendType.TEXTAREA,
required=False,
default="",
description="Additional structured or text context from upstream steps; serialized into the prompt."
),
"documentList": WorkflowActionParameter( "documentList": WorkflowActionParameter(
name="documentList", name="documentList",
type="DocumentList", type="DocumentList",
@ -302,16 +326,15 @@ class MethodAi(MethodBase):
frontendType=FrontendType.SELECT, frontendType=FrontendType.SELECT,
frontendOptions=["letter", "memo", "proposal", "contract", "report", "email"], frontendOptions=["letter", "memo", "proposal", "contract", "report", "email"],
required=False, required=False,
description="Type of document" description="Type of document (content hint for the model); used as title fallback when title is empty."
), ),
"resultType": WorkflowActionParameter( "resultType": WorkflowActionParameter(
name="resultType", name="resultType",
type="str", type="str",
frontendType=FrontendType.TEXT, frontendType=FrontendType.TEXT,
required=False, required=False,
default="txt", description="Legacy/API output format extension (e.g. txt, docx). Ignored when outputFormat is set."
description="Output format (e.g., txt, html, pdf, docx, md, json, csv, xlsx, pptx, png, jpg). Optional: if omitted, formats are determined from prompt by AI. Default \"txt\" is validation fallback only. With per-document format determination, AI can determine different formats for different documents based on prompt." ),
)
}, },
execute=generateDocument.__get__(self, self.__class__) execute=generateDocument.__get__(self, self.__class__)
), ),

View file

@ -31,8 +31,30 @@ async def list_tasks(self, parameters: Dict[str, Any]) -> ActionResult:
page = int(parameters.get("page") or 0) page = int(parameters.get("page") or 0)
include_closed = bool(parameters.get("includeClosed", False)) include_closed = bool(parameters.get("includeClosed", False))
dateFilters = {}
for key in ("dateCreatedGt", "dateCreatedLt", "dateUpdatedGt", "dateUpdatedLt"):
val = parameters.get(key)
if val is not None and str(val).strip():
try:
dateFilters[key] = int(val)
except (ValueError, TypeError):
pass
rawCustomFields = parameters.get("customFields")
customFields = None
if rawCustomFields:
if isinstance(rawCustomFields, str):
try:
customFields = json.loads(rawCustomFields)
except json.JSONDecodeError:
return ActionResult.isFailure(error="customFields must be valid JSON array")
elif isinstance(rawCustomFields, list):
customFields = rawCustomFields
data = await self.services.clickup.getTasksInList( data = await self.services.clickup.getTasksInList(
list_id, page=page, include_closed=include_closed, subtasks=True list_id, page=page, include_closed=include_closed, subtasks=True,
**dateFilters, customFields=customFields,
) )
if isinstance(data, dict) and data.get("error"): if isinstance(data, dict) and data.get("error"):
return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or ""))

View file

@ -66,6 +66,41 @@ class MethodClickup(MethodBase):
default=False, default=False,
description="Include closed tasks", description="Include closed tasks",
), ),
"dateCreatedGt": WorkflowActionParameter(
name="dateCreatedGt",
type="int",
frontendType=FrontendType.NUMBER,
required=False,
description="Filter: created after this Unix ms timestamp",
),
"dateCreatedLt": WorkflowActionParameter(
name="dateCreatedLt",
type="int",
frontendType=FrontendType.NUMBER,
required=False,
description="Filter: created before this Unix ms timestamp",
),
"dateUpdatedGt": WorkflowActionParameter(
name="dateUpdatedGt",
type="int",
frontendType=FrontendType.NUMBER,
required=False,
description="Filter: updated after this Unix ms timestamp",
),
"dateUpdatedLt": WorkflowActionParameter(
name="dateUpdatedLt",
type="int",
frontendType=FrontendType.NUMBER,
required=False,
description="Filter: updated before this Unix ms timestamp",
),
"customFields": WorkflowActionParameter(
name="customFields",
type="str",
frontendType=FrontendType.TEXTAREA,
required=False,
description='JSON array of custom field filters per ClickUp API, e.g. [{"field_id":"abc","operator":"=","value":"123"}]',
),
}, },
execute=list_tasks.__get__(self, self.__class__), execute=list_tasks.__get__(self, self.__class__),
), ),

View file

@ -35,6 +35,12 @@ def _persistDocumentsToUserFiles(
return return
if not mgmt: if not mgmt:
return return
logger.info(
"file.create persist: mgmt=%s id(mgmt)=%s has_createFileData=%s",
type(mgmt).__name__,
id(mgmt),
hasattr(mgmt, "createFileData"),
)
for doc in action_documents: for doc in action_documents:
try: try:
doc_data = doc.documentData if hasattr(doc, "documentData") else doc.get("documentData") doc_data = doc.documentData if hasattr(doc, "documentData") else doc.get("documentData")
@ -54,8 +60,15 @@ def _persistDocumentsToUserFiles(
or doc.get("mimeType") or doc.get("mimeType")
or "application/octet-stream" or "application/octet-stream"
) )
logger.info(
"file.create persist: calling createFile name=%s bytes=%s",
doc_name,
len(content),
)
file_item = mgmt.createFile(doc_name, mime, content) file_item = mgmt.createFile(doc_name, mime, content)
mgmt.createFileData(file_item.id, content) logger.info("file.create persist: createFile returned id=%s", file_item.id)
ok = mgmt.createFileData(file_item.id, content)
logger.info("file.create persist: createFileData returned %s for id=%s", ok, file_item.id)
meta = getattr(doc, "validationMetadata", None) or doc.get("validationMetadata") or {} meta = getattr(doc, "validationMetadata", None) or doc.get("validationMetadata") or {}
if isinstance(meta, dict): if isinstance(meta, dict):
meta["fileId"] = file_item.id meta["fileId"] = file_item.id
@ -74,12 +87,15 @@ async def create(self, parameters: Dict[str, Any]) -> ActionResult:
Create a file from context (text/markdown from upstream AI node). Create a file from context (text/markdown from upstream AI node).
Uses GenerationService.renderReport to produce docx, pdf, txt, md, html, xlsx, etc. Uses GenerationService.renderReport to produce docx, pdf, txt, md, html, xlsx, etc.
""" """
context = parameters.get("context", "") or parameters.get("text", "") or "" from modules.workflows.methods.methodAi._common import serialize_context
if not isinstance(context, str): raw_context = parameters.get("context", "") or parameters.get("text", "") or ""
context = str(context) if context else "" context = serialize_context(raw_context)
context = context.strip()
if not context: if not context:
logger.warning(
"file.create: context empty after resolve — check DataRefs (e.g. Antworttext / "
"documents[0].documentData from the AI step)."
)
return ActionResult.isFailure(error="context is required (connect an AI node or provide text)") return ActionResult.isFailure(error="context is required (connect an AI node or provide text)")
outputFormat = (parameters.get("outputFormat") or "docx").strip().lower().lstrip(".") outputFormat = (parameters.get("outputFormat") or "docx").strip().lower().lstrip(".")

View file

@ -14,7 +14,8 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
to = parameters.get("to") or [] # Optional for drafts - can save draft without recipients to = parameters.get("to") or [] # Optional for drafts - can save draft without recipients
context = parameters.get("context") from modules.workflows.methods.methodAi._common import serialize_context
context = serialize_context(parameters.get("context")) or None
documentList = parameters.get("documentList") or [] documentList = parameters.get("documentList") or []
replySourceDocuments = parameters.get("replySourceDocuments") or [] # Original email(s) for reply attachment replySourceDocuments = parameters.get("replySourceDocuments") or [] # Original email(s) for reply attachment
# ``attachments`` (added in 2026-04 for the PWG pilot) is a list of # ``attachments`` (added in 2026-04 for the PWG pilot) is a list of

View file

@ -0,0 +1,58 @@
"""Stage 0: verify FileFolder table + FileItem.folderId column in management DB.
Run from the gateway directory (same as uvicorn):
python -m scripts.stage0_filefolder_schema_check
"""
from modules.connectors.connectorDbPostgre import getCachedConnector
from modules.shared.configuration import APP_CONFIG
managementDatabase = "poweron_management"
dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data")
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
c = getCachedConnector(
dbHost=dbHost,
dbDatabase=managementDatabase,
dbUser=dbUser,
dbPassword=dbPassword,
dbPort=dbPort,
userId=None,
)
if not c or not c.connection:
print("STAGE0: DB_CONNECTION=none (check config.ini / .env)")
raise SystemExit(2)
cur = c.connection.cursor()
def _scalar(cur):
row = cur.fetchone()
if row is None:
return None
if isinstance(row, dict):
return next(iter(row.values()))
return row[0]
cur.execute(
"""
SELECT EXISTS (
SELECT 1 FROM information_schema.tables
WHERE table_name = 'FileFolder'
) AS ok
"""
)
print("STAGE0: FileFolder_table=", _scalar(cur))
cur.execute(
"""
SELECT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'FileItem' AND column_name = 'folderId'
) AS ok
"""
)
print("STAGE0: FileItem_folderId_column=", _scalar(cur))
cur.close()

View file

@ -0,0 +1,327 @@
# Copyright (c) 2026 Patrick Motsch
# All rights reserved.
"""Unit tests for folder RBAC two-user matrix (ownership & scope visibility)."""
import uuid
import pytest
from unittest.mock import Mock, patch, MagicMock
from typing import Dict, Any, List, Optional
from modules.datamodels.datamodelFiles import FileFolder, FileItem
from modules.datamodels.datamodelUam import User, UserPermissions, AccessLevel
from modules.interfaces.interfaceDbManagement import ComponentObjects, FileNotFoundError
_MANDATE_ID = "mandate-test-1"
_FEATURE_INSTANCE_ID = "fi-test-1"
_USER_A = "user-a-id"
_USER_B = "user-b-id"
# ── Fakes & helpers ──────────────────────────────────────────────────────────
class _FakeDb:
"""In-memory database mock."""
def __init__(self):
self._tables: Dict[str, Dict[str, Dict[str, Any]]] = {}
self.connection = MagicMock()
def getRecordset(self, modelClass, recordFilter=None):
tableName = modelClass.__name__
records = list(self._tables.get(tableName, {}).values())
if not recordFilter:
return records
return [
r for r in records
if all(r.get(k) == v for k, v in recordFilter.items())
]
def recordCreate(self, modelClass, data):
tableName = modelClass.__name__
self._tables.setdefault(tableName, {})
rec = data.model_dump() if hasattr(data, "model_dump") else dict(data)
rec.setdefault("id", str(uuid.uuid4()))
self._tables[tableName][rec["id"]] = rec
return rec
def recordModify(self, modelClass, recordId, updates):
tbl = self._tables.get(modelClass.__name__, {})
if recordId in tbl:
tbl[recordId].update(updates)
return True
return False
def recordDelete(self, modelClass, recordId):
tbl = self._tables.get(modelClass.__name__, {})
if recordId in tbl:
del tbl[recordId]
return True
return False
def updateContext(self, userId):
pass
def _ensure_connection(self):
pass
def _ensureTableExists(self, modelClass):
return True
def seed(self, modelClass, record: Dict[str, Any]):
tableName = modelClass.__name__
self._tables.setdefault(tableName, {})
self._tables[tableName][record["id"]] = dict(record)
def _makeUser(userId, username="testuser"):
return User(id=userId, username=username, language="en")
def _makeRbac(
createLevel=AccessLevel.ALL,
readLevel=AccessLevel.ALL,
updateLevel=AccessLevel.MY,
deleteLevel=AccessLevel.MY,
):
"""Default: regular user can read all, but write only own records."""
rbac = Mock()
perms = UserPermissions(
view=True,
read=readLevel,
create=createLevel,
update=updateLevel,
delete=deleteLevel,
)
rbac.getUserPermissions.return_value = perms
return rbac
def _buildComponent(userId, fakeDb, rbac=None):
with patch.object(ComponentObjects, "__init__", lambda self: None):
comp = ComponentObjects()
comp.db = fakeDb
comp.currentUser = _makeUser(userId)
comp.userId = userId
comp.mandateId = _MANDATE_ID
comp.featureInstanceId = _FEATURE_INSTANCE_ID
comp.rbac = rbac or _makeRbac()
comp.userLanguage = "en"
return comp
def _makeFolder(
folderId=None, name="Folder", parentId=None,
userId=_USER_A, scope="personal", neutralize=False,
):
return {
"id": folderId or str(uuid.uuid4()),
"name": name,
"parentId": parentId,
"mandateId": _MANDATE_ID,
"featureInstanceId": _FEATURE_INSTANCE_ID,
"scope": scope,
"neutralize": neutralize,
"sysCreatedBy": userId,
"sysCreatedAt": 1700000000.0,
"sysModifiedAt": 1700000000.0,
"sysModifiedBy": None,
}
def _makeFile(fileId=None, folderId=None, userId=_USER_A, scope="personal"):
return {
"id": fileId or str(uuid.uuid4()),
"fileName": "test.txt",
"mimeType": "text/plain",
"fileHash": "abc123",
"fileSize": 100,
"folderId": folderId,
"mandateId": _MANDATE_ID,
"featureInstanceId": _FEATURE_INSTANCE_ID,
"scope": scope,
"neutralize": False,
"sysCreatedBy": userId,
"sysCreatedAt": 1700000000.0,
"sysModifiedAt": 1700000000.0,
"sysModifiedBy": None,
"tags": None,
"description": None,
"status": None,
}
def _scopeAwareMock(fakeDb):
"""Side-effect for getRecordsetWithRBAC that simulates scope-based visibility.
Visibility rules:
- Owner (sysCreatedBy == currentUser.id) always sees the record
- scope='global' -> visible to everyone
- scope='mandate' -> visible when mandateId matches
- scope='featureInstance' -> visible when featureInstanceId matches
- scope='personal' -> owner only (already covered above)
"""
def _fn(connector, modelClass, currentUser, recordFilter=None, **kwargs):
requestMandateId = kwargs.get("mandateId", _MANDATE_ID)
requestFiId = kwargs.get("featureInstanceId", _FEATURE_INSTANCE_ID)
allRecords = fakeDb.getRecordset(modelClass, recordFilter=recordFilter)
visible = []
for rec in allRecords:
if rec.get("sysCreatedBy") == currentUser.id:
visible.append(rec)
continue
scope = rec.get("scope", "personal")
if scope == "global":
visible.append(rec)
elif scope == "mandate" and rec.get("mandateId") == requestMandateId:
visible.append(rec)
elif scope == "featureInstance" and rec.get("featureInstanceId") == requestFiId:
visible.append(rec)
return visible
return _fn
# ── Test class ───────────────────────────────────────────────────────────────
@patch("modules.interfaces.interfaceDbManagement.getRecordsetWithRBAC")
class TestFolderRbac:
"""Two-user matrix: ownership, scope visibility, and write-access guards."""
# ── 1. Ownership visibility ───────────────────────────────────────────
def testUserAFolderInOwnTreeNotInUserBOwnTree(self, mockRbacGet):
"""User A's personal folder appears in A's own tree, not in B's."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", name="A-Folder", userId=_USER_A))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compA = _buildComponent(_USER_A, fakeDb)
ownA = compA.getOwnFolderTree()
assert any(f["id"] == "fa-1" for f in ownA)
compB = _buildComponent(_USER_B, fakeDb)
ownB = compB.getOwnFolderTree()
assert not any(f["id"] == "fa-1" for f in ownB)
# ── 2. Scope change -> shared visibility ──────────────────────────────
def testScopeChangeToMandateMakesVisibleToUserB(self, mockRbacGet):
"""Changing scope from personal to mandate makes the folder appear
in User B's shared tree."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", scope="personal", userId=_USER_A))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
sharedBefore = compB.getSharedFolderTree()
assert not any(f["id"] == "fa-1" for f in sharedBefore)
fakeDb.recordModify(FileFolder, "fa-1", {"scope": "mandate"})
sharedAfter = compB.getSharedFolderTree()
assert any(f["id"] == "fa-1" for f in sharedAfter)
# ── 3-7. Non-owner cannot mutate ──────────────────────────────────────
def testUserBCannotRenameFolderOfUserA(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", scope="mandate", userId=_USER_A))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
with pytest.raises(PermissionError):
compB.renameFolder("fa-1", "Hijacked")
def testUserBCannotMoveFolderOfUserA(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", scope="mandate", userId=_USER_A))
fakeDb.seed(FileFolder, _makeFolder(folderId="fb-1", scope="mandate", userId=_USER_B))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
with pytest.raises(PermissionError):
compB.moveFolder("fa-1", "fb-1")
def testUserBCannotDeleteFolderOfUserA(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", scope="mandate", userId=_USER_A))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
with pytest.raises(PermissionError):
compB.deleteFolderCascade("fa-1")
def testUserBCannotPatchScopeOnFolderOfUserA(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", scope="mandate", userId=_USER_A))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
with pytest.raises(PermissionError):
compB.patchFolderScope("fa-1", "personal")
def testUserBCannotPatchNeutralizeOnFolderOfUserA(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="fa-1", scope="mandate", userId=_USER_A))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
with pytest.raises(PermissionError):
compB.patchFolderNeutralize("fa-1", True)
# ── 8. contextOrphan ──────────────────────────────────────────────────
def testContextOrphanWhenParentFolderNotShared(self, mockRbacGet):
"""User A's parent folder is personal, child folder is mandate.
User B sees only the child, flagged as contextOrphan."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(
folderId="parent-f", name="Private Parent", userId=_USER_A, scope="personal",
))
fakeDb.seed(FileFolder, _makeFolder(
folderId="child-f", name="Shared Child", userId=_USER_A,
parentId="parent-f", scope="mandate",
))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
shared = compB.getSharedFolderTree()
assert len(shared) == 1
assert shared[0]["id"] == "child-f"
assert shared[0]["contextOrphan"] is True
# ── 9. Shared folder children visible ─────────────────────────────────
def testSharedFolderMakesChildrenVisible(self, mockRbacGet):
"""When User A shares a folder tree (scope=mandate), all child folders
become visible in User B's shared tree."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(
folderId="root-f", name="Root", userId=_USER_A, scope="mandate",
))
fakeDb.seed(FileFolder, _makeFolder(
folderId="child1-f", name="Child 1", userId=_USER_A,
parentId="root-f", scope="mandate",
))
fakeDb.seed(FileFolder, _makeFolder(
folderId="child2-f", name="Child 2", userId=_USER_A,
parentId="root-f", scope="mandate",
))
fakeDb.seed(FileFolder, _makeFolder(
folderId="grandchild-f", name="Grandchild", userId=_USER_A,
parentId="child1-f", scope="mandate",
))
mockRbacGet.side_effect = _scopeAwareMock(fakeDb)
compB = _buildComponent(_USER_B, fakeDb)
shared = compB.getSharedFolderTree()
sharedIds = {f["id"] for f in shared}
assert sharedIds == {"root-f", "child1-f", "child2-f", "grandchild-f"}
byId = {f["id"]: f for f in shared}
assert byId["root-f"]["contextOrphan"] is False
assert byId["child1-f"]["contextOrphan"] is False
assert byId["child2-f"]["contextOrphan"] is False
assert byId["grandchild-f"]["contextOrphan"] is False

View file

@ -0,0 +1,392 @@
# Copyright (c) 2026 Patrick Motsch
# All rights reserved.
"""Unit tests for folder CRUD operations in ComponentObjects."""
import uuid
import pytest
from unittest.mock import Mock, patch, MagicMock
from typing import Dict, Any, List, Optional
from modules.datamodels.datamodelFiles import FileFolder, FileItem
from modules.datamodels.datamodelUam import User, UserPermissions, AccessLevel
from modules.interfaces.interfaceDbManagement import ComponentObjects, FileNotFoundError
_MANDATE_ID = "mandate-test-1"
_FEATURE_INSTANCE_ID = "fi-test-1"
_USER_ID = "user-a-id"
# ── Fakes & helpers ──────────────────────────────────────────────────────────
class _FakeDb:
"""In-memory database mock that mimics DatabaseConnector for unit tests."""
def __init__(self):
self._tables: Dict[str, Dict[str, Dict[str, Any]]] = {}
self.connection = MagicMock()
def getRecordset(self, modelClass, recordFilter=None):
tableName = modelClass.__name__
records = list(self._tables.get(tableName, {}).values())
if not recordFilter:
return records
return [
r for r in records
if all(r.get(k) == v for k, v in recordFilter.items())
]
def recordCreate(self, modelClass, data):
tableName = modelClass.__name__
self._tables.setdefault(tableName, {})
rec = data.model_dump() if hasattr(data, "model_dump") else dict(data)
rec.setdefault("id", str(uuid.uuid4()))
self._tables[tableName][rec["id"]] = rec
return rec
def recordModify(self, modelClass, recordId, updates):
tableName = modelClass.__name__
tbl = self._tables.get(tableName, {})
if recordId in tbl:
tbl[recordId].update(updates)
return True
return False
def recordDelete(self, modelClass, recordId):
tableName = modelClass.__name__
tbl = self._tables.get(tableName, {})
if recordId in tbl:
del tbl[recordId]
return True
return False
def updateContext(self, userId):
pass
def _ensure_connection(self):
pass
def _ensureTableExists(self, modelClass):
return True
def seed(self, modelClass, record: Dict[str, Any]):
tableName = modelClass.__name__
self._tables.setdefault(tableName, {})
self._tables[tableName][record["id"]] = dict(record)
def _makeUser(userId=_USER_ID, username="testuser"):
return User(id=userId, username=username, language="en")
def _makeRbac(
createLevel=AccessLevel.ALL,
readLevel=AccessLevel.ALL,
updateLevel=AccessLevel.ALL,
deleteLevel=AccessLevel.ALL,
):
rbac = Mock()
perms = UserPermissions(
view=True,
read=readLevel,
create=createLevel,
update=updateLevel,
delete=deleteLevel,
)
rbac.getUserPermissions.return_value = perms
return rbac
def _buildComponent(
userId=_USER_ID,
fakeDb=None,
rbac=None,
mandateId=_MANDATE_ID,
featureInstanceId=_FEATURE_INSTANCE_ID,
):
"""Construct a ComponentObjects with mocked internals (no real DB)."""
with patch.object(ComponentObjects, "__init__", lambda self: None):
comp = ComponentObjects()
comp.db = fakeDb or _FakeDb()
comp.currentUser = _makeUser(userId)
comp.userId = userId
comp.mandateId = mandateId
comp.featureInstanceId = featureInstanceId
comp.rbac = rbac or _makeRbac()
comp.userLanguage = "en"
return comp
def _rbacFromFakeDb(fakeDb):
"""Side-effect for getRecordsetWithRBAC that delegates to _FakeDb."""
def _fn(connector, modelClass, currentUser, recordFilter=None, **kwargs):
return fakeDb.getRecordset(modelClass, recordFilter=recordFilter)
return _fn
def _makeFolder(
folderId=None, name="Folder", parentId=None,
userId=_USER_ID, scope="personal", neutralize=False,
):
return {
"id": folderId or str(uuid.uuid4()),
"name": name,
"parentId": parentId,
"mandateId": _MANDATE_ID,
"featureInstanceId": _FEATURE_INSTANCE_ID,
"scope": scope,
"neutralize": neutralize,
"sysCreatedBy": userId,
"sysCreatedAt": 1700000000.0,
"sysModifiedAt": 1700000000.0,
"sysModifiedBy": None,
}
def _makeFile(fileId=None, folderId=None, userId=_USER_ID, scope="personal"):
return {
"id": fileId or str(uuid.uuid4()),
"fileName": "test.txt",
"mimeType": "text/plain",
"fileHash": "abc123",
"fileSize": 100,
"folderId": folderId,
"mandateId": _MANDATE_ID,
"featureInstanceId": _FEATURE_INSTANCE_ID,
"scope": scope,
"neutralize": False,
"sysCreatedBy": userId,
"sysCreatedAt": 1700000000.0,
"sysModifiedAt": 1700000000.0,
"sysModifiedBy": None,
"tags": None,
"description": None,
"status": None,
}
# ── Test class ───────────────────────────────────────────────────────────────
@patch("modules.interfaces.interfaceDbManagement.getRecordsetWithRBAC")
class TestFolderCrud:
"""Tests for folder create / rename / move / delete / patch operations."""
# ── Create ────────────────────────────────────────────────────────────
def testCreateFolderHappyPath(self, mockRbacGet):
fakeDb = _FakeDb()
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.createFolder("Test Folder")
assert result["name"] == "Test Folder"
assert result["scope"] == "personal"
assert result["parentId"] is None
assert result["mandateId"] == _MANDATE_ID
def testCreateFolderWithParent(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="parent-1", name="Parent"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.createFolder("Child Folder", parentId="parent-1")
assert result["name"] == "Child Folder"
assert result["parentId"] == "parent-1"
def testCreateFolderMissingNameNoInterfaceValidation(self, mockRbacGet):
"""Interface does not validate empty name; the route layer returns 400."""
fakeDb = _FakeDb()
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.createFolder("")
assert result["name"] == ""
# ── Rename ────────────────────────────────────────────────────────────
def testRenameFolderHappyPath(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1", name="Old Name"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.renameFolder("f-1", "New Name")
assert result["name"] == "New Name"
assert fakeDb.getRecordset(FileFolder, {"id": "f-1"})[0]["name"] == "New Name"
def testRenameFolderNotFound(self, mockRbacGet):
fakeDb = _FakeDb()
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
with pytest.raises(FileNotFoundError):
comp.renameFolder("nonexistent", "New Name")
# ── Move ──────────────────────────────────────────────────────────────
def testMoveFolderHappyPath(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1", name="Movable"))
fakeDb.seed(FileFolder, _makeFolder(folderId="t-1", name="Target"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.moveFolder("f-1", "t-1")
assert result["parentId"] == "t-1"
assert fakeDb.getRecordset(FileFolder, {"id": "f-1"})[0]["parentId"] == "t-1"
def testMoveFolderToRoot(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1", name="Nested", parentId="old"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.moveFolder("f-1", None)
assert result["parentId"] is None
def testMoveFolderCircularReference(self, mockRbacGet):
"""A -> B -> C: moving A under C creates a cycle."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="a", name="A", parentId=None))
fakeDb.seed(FileFolder, _makeFolder(folderId="b", name="B", parentId="a"))
fakeDb.seed(FileFolder, _makeFolder(folderId="c", name="C", parentId="b"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
with pytest.raises(ValueError, match="circular reference"):
comp.moveFolder("a", "c")
# ── Delete cascade ────────────────────────────────────────────────────
def testDeleteFolderCascade(self, mockRbacGet):
"""Deleting root folder removes root + child + their files."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="root", name="Root"))
fakeDb.seed(FileFolder, _makeFolder(folderId="child", name="Child", parentId="root"))
fakeDb.seed(FileItem, _makeFile(fileId="file-1", folderId="root"))
fakeDb.seed(FileItem, _makeFile(fileId="file-2", folderId="child"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.deleteFolderCascade("root")
assert result["deletedFolders"] == 2
assert result["deletedFiles"] == 2
def testDeleteFolderNotFound(self, mockRbacGet):
fakeDb = _FakeDb()
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
with pytest.raises(FileNotFoundError):
comp.deleteFolderCascade("nonexistent")
# ── Patch scope ───────────────────────────────────────────────────────
def testPatchScopeNoCascade(self, mockRbacGet):
"""Change folder scope without cascading to files."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1", scope="personal"))
fakeDb.seed(FileItem, _makeFile(fileId="file-1", folderId="f-1"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.patchFolderScope("f-1", "mandate", cascadeToFiles=False)
assert result["scope"] == "mandate"
assert result["filesUpdated"] == 0
assert fakeDb.getRecordset(FileFolder, {"id": "f-1"})[0]["scope"] == "mandate"
assert fakeDb.getRecordset(FileItem, {"id": "file-1"})[0]["scope"] == "personal"
def testPatchScopeWithCascade(self, mockRbacGet):
"""cascadeToFiles=True updates only owned files in the folder."""
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1", scope="personal"))
fakeDb.seed(FileItem, _makeFile(fileId="own-file", folderId="f-1"))
fakeDb.seed(FileItem, _makeFile(fileId="other-file", folderId="f-1", userId="user-b"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.patchFolderScope("f-1", "mandate", cascadeToFiles=True)
assert result["filesUpdated"] == 1
assert fakeDb.getRecordset(FileItem, {"id": "own-file"})[0]["scope"] == "mandate"
assert fakeDb.getRecordset(FileItem, {"id": "other-file"})[0]["scope"] == "personal"
def testPatchScopeInvalid(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
with pytest.raises(ValueError, match="Invalid scope"):
comp.patchFolderScope("f-1", "invalid_scope")
# ── Patch neutralize ──────────────────────────────────────────────────
def testPatchNeutralizeToggle(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="f-1", neutralize=False))
fakeDb.seed(FileItem, _makeFile(fileId="file-1", folderId="f-1"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
resultOn = comp.patchFolderNeutralize("f-1", True)
assert resultOn["neutralize"] is True
assert resultOn["filesUpdated"] == 1
assert fakeDb.getRecordset(FileFolder, {"id": "f-1"})[0]["neutralize"] is True
assert fakeDb.getRecordset(FileItem, {"id": "file-1"})[0]["neutralize"] is True
resultOff = comp.patchFolderNeutralize("f-1", False)
assert resultOff["neutralize"] is False
assert fakeDb.getRecordset(FileItem, {"id": "file-1"})[0]["neutralize"] is False
# ── Tree queries ──────────────────────────────────────────────────────
def testGetOwnFolderTree(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="own-1", name="Mine"))
fakeDb.seed(FileFolder, _makeFolder(folderId="other-1", name="Theirs", userId="user-b"))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.getOwnFolderTree()
assert len(result) == 1
assert result[0]["id"] == "own-1"
def testGetSharedFolderTreeWithContextOrphan(self, mockRbacGet):
fakeDb = _FakeDb()
fakeDb.seed(FileFolder, _makeFolder(folderId="own", name="Own"))
fakeDb.seed(FileFolder, _makeFolder(
folderId="shared-root", name="Shared Root", userId="user-b", scope="mandate",
))
fakeDb.seed(FileFolder, _makeFolder(
folderId="shared-child", name="Shared Child", userId="user-b",
parentId="shared-root", scope="mandate",
))
fakeDb.seed(FileFolder, _makeFolder(
folderId="orphan", name="Orphan", userId="user-b",
parentId="invisible-parent", scope="mandate",
))
comp = _buildComponent(fakeDb=fakeDb)
mockRbacGet.side_effect = _rbacFromFakeDb(fakeDb)
result = comp.getSharedFolderTree()
ids = {r["id"] for r in result}
assert "own" not in ids
assert "shared-root" in ids
assert "shared-child" in ids
assert "orphan" in ids
byId = {r["id"]: r for r in result}
assert byId["shared-root"]["contextOrphan"] is False
assert byId["shared-child"]["contextOrphan"] is False
assert byId["orphan"]["contextOrphan"] is True