file tree ersetzt durch gruppierung im formgenerator
This commit is contained in:
parent
72d3175f49
commit
06d9910ecd
14 changed files with 795 additions and 1224 deletions
|
|
@ -1,82 +0,0 @@
|
||||||
# Copyright (c) 2025 Patrick Motsch
|
|
||||||
# All rights reserved.
|
|
||||||
"""FileFolder: hierarchical folder structure for file organization."""
|
|
||||||
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from modules.datamodels.datamodelBase import PowerOnModel
|
|
||||||
from modules.shared.i18nRegistry import i18nModel
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
|
|
||||||
@i18nModel("Dateiordner")
|
|
||||||
class FileFolder(PowerOnModel):
|
|
||||||
"""Hierarchischer Ordner fuer die Dateiverwaltung."""
|
|
||||||
id: str = Field(
|
|
||||||
default_factory=lambda: str(uuid.uuid4()),
|
|
||||||
description="Primary key",
|
|
||||||
json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
|
|
||||||
)
|
|
||||||
name: str = Field(
|
|
||||||
description="Folder name",
|
|
||||||
json_schema_extra={"label": "Name", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
|
|
||||||
)
|
|
||||||
parentId: Optional[str] = Field(
|
|
||||||
default=None,
|
|
||||||
description="Parent folder ID (null = root)",
|
|
||||||
json_schema_extra={
|
|
||||||
"label": "Uebergeordneter Ordner",
|
|
||||||
"frontend_type": "text",
|
|
||||||
"frontend_readonly": False,
|
|
||||||
"frontend_required": False,
|
|
||||||
"fk_target": {"db": "poweron_management", "table": "FileFolder", "labelField": "name"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
mandateId: Optional[str] = Field(
|
|
||||||
default=None,
|
|
||||||
description="Mandate context",
|
|
||||||
json_schema_extra={
|
|
||||||
"label": "Mandanten-ID",
|
|
||||||
"frontend_type": "text",
|
|
||||||
"frontend_readonly": True,
|
|
||||||
"frontend_required": False,
|
|
||||||
"fk_target": {"db": "poweron_app", "table": "Mandate", "labelField": "label"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
featureInstanceId: Optional[str] = Field(
|
|
||||||
default=None,
|
|
||||||
description="Feature instance context",
|
|
||||||
json_schema_extra={
|
|
||||||
"label": "Feature-Instanz-ID",
|
|
||||||
"frontend_type": "text",
|
|
||||||
"frontend_readonly": True,
|
|
||||||
"frontend_required": False,
|
|
||||||
"fk_target": {"db": "poweron_app", "table": "FeatureInstance", "labelField": "label"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
scope: str = Field(
|
|
||||||
default="personal",
|
|
||||||
description="Data visibility scope: personal, featureInstance, mandate, global. Inherited by files in this folder.",
|
|
||||||
json_schema_extra={
|
|
||||||
"label": "Sichtbarkeit",
|
|
||||||
"frontend_type": "select",
|
|
||||||
"frontend_readonly": False,
|
|
||||||
"frontend_required": False,
|
|
||||||
"frontend_options": [
|
|
||||||
{"value": "personal", "label": "Persönlich"},
|
|
||||||
{"value": "featureInstance", "label": "Feature-Instanz"},
|
|
||||||
{"value": "mandate", "label": "Mandant"},
|
|
||||||
{"value": "global", "label": "Global"},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
neutralize: bool = Field(
|
|
||||||
default=False,
|
|
||||||
description="Whether files in this folder should be neutralized before AI processing. Inherited by new/moved files.",
|
|
||||||
json_schema_extra={
|
|
||||||
"label": "Neutralisieren",
|
|
||||||
"frontend_type": "checkbox",
|
|
||||||
"frontend_readonly": False,
|
|
||||||
"frontend_required": False,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
@ -68,17 +68,6 @@ class FileItem(PowerOnModel):
|
||||||
description="Tags for categorization and search",
|
description="Tags for categorization and search",
|
||||||
json_schema_extra={"label": "Tags", "frontend_type": "tags", "frontend_readonly": False, "frontend_required": False},
|
json_schema_extra={"label": "Tags", "frontend_type": "tags", "frontend_readonly": False, "frontend_required": False},
|
||||||
)
|
)
|
||||||
folderId: Optional[str] = Field(
|
|
||||||
default=None,
|
|
||||||
description="ID of the parent folder",
|
|
||||||
json_schema_extra={
|
|
||||||
"label": "Ordner-ID",
|
|
||||||
"frontend_type": "text",
|
|
||||||
"frontend_readonly": False,
|
|
||||||
"frontend_required": False,
|
|
||||||
"fk_target": {"db": "poweron_management", "table": "FileFolder", "labelField": "name"},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
description: Optional[str] = Field(
|
description: Optional[str] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="User-provided description of the file",
|
description="User-provided description of the file",
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
|
||||||
PortField(name="listId", type="str", description="ClickUp-Listen-ID"),
|
PortField(name="listId", type="str", description="ClickUp-Listen-ID"),
|
||||||
PortField(name="name", type="str", required=False, description="Listenname"),
|
PortField(name="name", type="str", required=False, description="Listenname"),
|
||||||
PortField(name="spaceId", type="str", required=False, description="Space-ID"),
|
PortField(name="spaceId", type="str", required=False, description="Space-ID"),
|
||||||
PortField(name="folderId", type="str", required=False, description="Ordner-ID"),
|
PortField(name="groupId", type="str", required=False, description="Gruppen-ID für die Gruppierungszuordnung"),
|
||||||
PortField(name="connection", type="ConnectionRef", required=False,
|
PortField(name="connection", type="ConnectionRef", required=False,
|
||||||
description="ClickUp-Verbindung"),
|
description="ClickUp-Verbindung"),
|
||||||
]),
|
]),
|
||||||
|
|
|
||||||
|
|
@ -1202,7 +1202,7 @@ async def patchWorkspaceWorkflowAttachments(
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# File and folder list endpoints
|
# File endpoints
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
@router.get("/{instanceId}/files")
|
@router.get("/{instanceId}/files")
|
||||||
|
|
@ -1210,7 +1210,6 @@ async def patchWorkspaceWorkflowAttachments(
|
||||||
async def listWorkspaceFiles(
|
async def listWorkspaceFiles(
|
||||||
request: Request,
|
request: Request,
|
||||||
instanceId: str = Path(...),
|
instanceId: str = Path(...),
|
||||||
folderId: Optional[str] = Query(None),
|
|
||||||
tags: Optional[str] = Query(None),
|
tags: Optional[str] = Query(None),
|
||||||
search: Optional[str] = Query(None),
|
search: Optional[str] = Query(None),
|
||||||
context: RequestContext = Depends(getRequestContext),
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
|
@ -1265,30 +1264,6 @@ async def getFileContent(
|
||||||
return Response(content=content, media_type=mimeType)
|
return Response(content=content, media_type=mimeType)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{instanceId}/folders")
|
|
||||||
@limiter.limit("300/minute")
|
|
||||||
async def listWorkspaceFolders(
|
|
||||||
request: Request,
|
|
||||||
instanceId: str = Path(...),
|
|
||||||
parentId: Optional[str] = Query(None),
|
|
||||||
context: RequestContext = Depends(getRequestContext),
|
|
||||||
):
|
|
||||||
_mandateId, _ = _validateInstanceAccess(instanceId, context)
|
|
||||||
try:
|
|
||||||
from modules.serviceCenter import getService
|
|
||||||
from modules.serviceCenter.context import ServiceCenterContext
|
|
||||||
ctx = ServiceCenterContext(
|
|
||||||
user=context.user,
|
|
||||||
mandate_id=_mandateId or "",
|
|
||||||
feature_instance_id=instanceId,
|
|
||||||
)
|
|
||||||
chatService = getService("chat", ctx)
|
|
||||||
folders = chatService.listFolders(parentId=parentId)
|
|
||||||
return JSONResponse({"folders": folders or []})
|
|
||||||
except Exception:
|
|
||||||
return JSONResponse({"folders": []})
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{instanceId}/datasources")
|
@router.get("/{instanceId}/datasources")
|
||||||
@limiter.limit("300/minute")
|
@limiter.limit("300/minute")
|
||||||
async def listWorkspaceDataSources(
|
async def listWorkspaceDataSources(
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,6 @@ from modules.security.rbac import RbacClass
|
||||||
from modules.datamodels.datamodelRbac import AccessRuleContext
|
from modules.datamodels.datamodelRbac import AccessRuleContext
|
||||||
from modules.datamodels.datamodelUam import AccessLevel
|
from modules.datamodels.datamodelUam import AccessLevel
|
||||||
from modules.datamodels.datamodelFiles import FilePreview, FileItem, FileData
|
from modules.datamodels.datamodelFiles import FilePreview, FileItem, FileData
|
||||||
from modules.datamodels.datamodelFileFolder import FileFolder
|
|
||||||
from modules.datamodels.datamodelUtils import Prompt
|
from modules.datamodels.datamodelUtils import Prompt
|
||||||
from modules.datamodels.datamodelMessaging import (
|
from modules.datamodels.datamodelMessaging import (
|
||||||
MessagingSubscription,
|
MessagingSubscription,
|
||||||
|
|
@ -1103,15 +1102,12 @@ class ComponentObjects:
|
||||||
return newfileName
|
return newfileName
|
||||||
counter += 1
|
counter += 1
|
||||||
|
|
||||||
def createFile(self, name: str, mimeType: str, content: bytes, folderId: Optional[str] = None) -> FileItem:
|
def createFile(self, name: str, mimeType: str, content: bytes) -> FileItem:
|
||||||
"""Creates a new file entry if user has permission. Computes fileHash and fileSize from content.
|
"""Creates a new file entry if user has permission. Computes fileHash and fileSize from content.
|
||||||
|
|
||||||
Duplicate check: if a file with the same user + fileHash + fileName already exists,
|
Duplicate check: if a file with the same user + fileHash + fileName already exists,
|
||||||
the existing file is returned instead of creating a new one.
|
the existing file is returned instead of creating a new one.
|
||||||
Same hash with different name is allowed (intentional copy by user).
|
Same hash with different name is allowed (intentional copy by user).
|
||||||
|
|
||||||
Args:
|
|
||||||
folderId: Optional parent folder ID. None/empty means the root folder.
|
|
||||||
"""
|
"""
|
||||||
if not self.checkRbacPermission(FileItem, "create"):
|
if not self.checkRbacPermission(FileItem, "create"):
|
||||||
raise PermissionError("No permission to create files")
|
raise PermissionError("No permission to create files")
|
||||||
|
|
@ -1139,11 +1135,6 @@ class ComponentObjects:
|
||||||
else:
|
else:
|
||||||
scope = "personal"
|
scope = "personal"
|
||||||
|
|
||||||
# Normalize folderId: treat empty string as "no folder" (= root) – NULL in DB
|
|
||||||
normalizedFolderId: Optional[str] = folderId
|
|
||||||
if isinstance(normalizedFolderId, str) and not normalizedFolderId.strip():
|
|
||||||
normalizedFolderId = None
|
|
||||||
|
|
||||||
fileItem = FileItem(
|
fileItem = FileItem(
|
||||||
mandateId=mandateId,
|
mandateId=mandateId,
|
||||||
featureInstanceId=featureInstanceId,
|
featureInstanceId=featureInstanceId,
|
||||||
|
|
@ -1152,7 +1143,6 @@ class ComponentObjects:
|
||||||
mimeType=mimeType,
|
mimeType=mimeType,
|
||||||
fileSize=fileSize,
|
fileSize=fileSize,
|
||||||
fileHash=fileHash,
|
fileHash=fileHash,
|
||||||
folderId=normalizedFolderId,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Store in database
|
# Store in database
|
||||||
|
|
@ -1277,382 +1267,47 @@ class ComponentObjects:
|
||||||
self.db.connection.rollback()
|
self.db.connection.rollback()
|
||||||
raise FileDeletionError(f"Error deleting files in batch: {str(e)}")
|
raise FileDeletionError(f"Error deleting files in batch: {str(e)}")
|
||||||
|
|
||||||
# ---- Folder methods ----
|
def _ensureFeatureInstanceGroup(self, featureInstanceId: str, contextKey: str = "files/list") -> Optional[str]:
|
||||||
|
"""Return the groupId of the default group for a feature instance.
|
||||||
_RESERVED_FOLDER_NAMES = {"(Global)"}
|
Creates the group if it doesn't exist yet."""
|
||||||
|
|
||||||
def _validateFolderName(self, name: str, parentId: Optional[str], excludeFolderId: Optional[str] = None):
|
|
||||||
"""Ensures folder name is not reserved and is unique within parent."""
|
|
||||||
if name in self._RESERVED_FOLDER_NAMES:
|
|
||||||
raise ValueError(f"Folder name '{name}' is reserved")
|
|
||||||
if not name or not name.strip():
|
|
||||||
raise ValueError("Folder name cannot be empty")
|
|
||||||
existingFolders = self.db.getRecordset(FileFolder, recordFilter={"parentId": parentId or ""})
|
|
||||||
for f in existingFolders:
|
|
||||||
if f.get("name") == name and f.get("id") != excludeFolderId:
|
|
||||||
raise ValueError(f"Folder '{name}' already exists in this directory")
|
|
||||||
|
|
||||||
def _isDescendantOf(self, folderId: str, ancestorId: str) -> bool:
|
|
||||||
"""Checks if folderId is a descendant of ancestorId (circular reference check)."""
|
|
||||||
visited = set()
|
|
||||||
currentId = folderId
|
|
||||||
while currentId:
|
|
||||||
if currentId == ancestorId:
|
|
||||||
return True
|
|
||||||
if currentId in visited:
|
|
||||||
break
|
|
||||||
visited.add(currentId)
|
|
||||||
folders = self.db.getRecordset(FileFolder, recordFilter={"id": currentId})
|
|
||||||
if not folders:
|
|
||||||
break
|
|
||||||
currentId = folders[0].get("parentId")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _ensureFeatureInstanceFolder(self, featureInstanceId: str, mandateId: str = "") -> Optional[str]:
|
|
||||||
"""Return the folder ID for a feature instance, creating it on first use.
|
|
||||||
The folder is named after the feature instance label."""
|
|
||||||
existing = self.db.getRecordset(
|
|
||||||
FileFolder,
|
|
||||||
recordFilter={
|
|
||||||
"featureInstanceId": featureInstanceId,
|
|
||||||
"sysCreatedBy": self.userId or "",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if existing:
|
|
||||||
return existing[0].get("id")
|
|
||||||
|
|
||||||
# Resolve the instance label for the folder name
|
|
||||||
folderName = featureInstanceId[:8]
|
|
||||||
try:
|
try:
|
||||||
from modules.datamodels.datamodelFeatures import FeatureInstance
|
import modules.interfaces.interfaceDbApp as _appIface
|
||||||
from modules.security.rootAccess import getRootDbAppConnector
|
appInterface = _appIface.getInterface(self._currentUser)
|
||||||
dbApp = getRootDbAppConnector()
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
instances = dbApp.getRecordset(FeatureInstance, recordFilter={"id": featureInstanceId})
|
nodes = [n.model_dump() if hasattr(n, 'model_dump') else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
|
||||||
if instances:
|
# Look for group with name matching featureInstanceId
|
||||||
folderName = instances[0].get("label") or folderName
|
def _find(nds):
|
||||||
|
for nd in nds:
|
||||||
|
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
|
||||||
|
nmeta = nd.get("meta", {}) if isinstance(nd, dict) else getattr(nd, "meta", {})
|
||||||
|
if (nmeta or {}).get("featureInstanceId") == featureInstanceId:
|
||||||
|
return nid
|
||||||
|
subs = nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", [])
|
||||||
|
result = _find(subs)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
return None
|
||||||
|
found = _find(nodes)
|
||||||
|
if found:
|
||||||
|
return found
|
||||||
|
# Create new group
|
||||||
|
import uuid
|
||||||
|
newId = str(uuid.uuid4())
|
||||||
|
newGroup = {
|
||||||
|
"id": newId,
|
||||||
|
"name": featureInstanceId,
|
||||||
|
"itemIds": [],
|
||||||
|
"subGroups": [],
|
||||||
|
"meta": {"featureInstanceId": featureInstanceId},
|
||||||
|
}
|
||||||
|
nodes.append(newGroup)
|
||||||
|
appInterface.upsertTableGrouping(contextKey, nodes)
|
||||||
|
return newId
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Could not resolve feature instance label: {e}")
|
logger.error(f"_ensureFeatureInstanceGroup failed: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
folder = FileFolder(
|
def copyFile(self, sourceFileId: str, newFileName: Optional[str] = None) -> FileItem:
|
||||||
name=folderName,
|
|
||||||
parentId=None,
|
|
||||||
mandateId=mandateId,
|
|
||||||
featureInstanceId=featureInstanceId,
|
|
||||||
)
|
|
||||||
created = self.db.recordCreate(FileFolder, folder)
|
|
||||||
return created.get("id") if isinstance(created, dict) else getattr(created, "id", None)
|
|
||||||
|
|
||||||
def getFolder(self, folderId: str) -> Optional[Dict[str, Any]]:
|
|
||||||
"""Returns a folder by ID if it belongs to the current user."""
|
|
||||||
folders = self.db.getRecordset(FileFolder, recordFilter={"id": folderId, "sysCreatedBy": self.userId or ""})
|
|
||||||
return folders[0] if folders else None
|
|
||||||
|
|
||||||
def listFolders(self, parentId: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
||||||
"""List folders visible to the current user.
|
|
||||||
Own folders are always returned. Other users' folders are only
|
|
||||||
returned when they contain files visible to the current user.
|
|
||||||
Each folder is enriched with ``fileCount``."""
|
|
||||||
recordFilter = {}
|
|
||||||
if parentId is not None:
|
|
||||||
recordFilter["parentId"] = parentId
|
|
||||||
folders = self.db.getRecordset(FileFolder, recordFilter=recordFilter if recordFilter else None)
|
|
||||||
|
|
||||||
if not folders:
|
|
||||||
return folders
|
|
||||||
|
|
||||||
folderIds = [f["id"] for f in folders if f.get("id")]
|
|
||||||
fileCounts: Dict[str, int] = {}
|
|
||||||
try:
|
|
||||||
from modules.interfaces.interfaceRbac import buildFilesScopeWhereClause
|
|
||||||
scopeClause = buildFilesScopeWhereClause(
|
|
||||||
self.currentUser, "FileItem", self.db,
|
|
||||||
self.mandateId, self.featureInstanceId,
|
|
||||||
[], [],
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db._ensure_connection()
|
|
||||||
with self.db.connection.cursor() as cursor:
|
|
||||||
baseQuery = (
|
|
||||||
'SELECT "folderId", COUNT(*) AS cnt '
|
|
||||||
'FROM "FileItem" '
|
|
||||||
'WHERE "folderId" = ANY(%s)'
|
|
||||||
)
|
|
||||||
queryValues: list = [folderIds]
|
|
||||||
|
|
||||||
if scopeClause:
|
|
||||||
baseQuery += ' AND (' + scopeClause["condition"] + ')'
|
|
||||||
queryValues.extend(scopeClause["values"])
|
|
||||||
|
|
||||||
baseQuery += ' GROUP BY "folderId"'
|
|
||||||
cursor.execute(baseQuery, queryValues)
|
|
||||||
for row in cursor.fetchall():
|
|
||||||
fileCounts[row["folderId"]] = row["cnt"]
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Could not count files per folder: {e}")
|
|
||||||
|
|
||||||
userId = self.userId or ""
|
|
||||||
result = []
|
|
||||||
for folder in folders:
|
|
||||||
fc = fileCounts.get(folder.get("id", ""), 0)
|
|
||||||
folder["fileCount"] = fc
|
|
||||||
isOwn = folder.get("sysCreatedBy") == userId
|
|
||||||
if isOwn or fc > 0:
|
|
||||||
result.append(folder)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
def createFolder(self, name: str, parentId: Optional[str] = None) -> Dict[str, Any]:
|
|
||||||
"""Create a new folder with unique name validation."""
|
|
||||||
self._validateFolderName(name, parentId)
|
|
||||||
folder = FileFolder(
|
|
||||||
name=name,
|
|
||||||
parentId=parentId,
|
|
||||||
mandateId=self.mandateId or "",
|
|
||||||
featureInstanceId=self.featureInstanceId or "",
|
|
||||||
)
|
|
||||||
return self.db.recordCreate(FileFolder, folder)
|
|
||||||
|
|
||||||
def renameFolder(self, folderId: str, newName: str) -> bool:
|
|
||||||
"""Rename a folder with unique name validation."""
|
|
||||||
folder = self.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise FileNotFoundError(f"Folder {folderId} not found")
|
|
||||||
self._validateFolderName(newName, folder.get("parentId"), excludeFolderId=folderId)
|
|
||||||
return self.db.recordModify(FileFolder, folderId, {"name": newName})
|
|
||||||
|
|
||||||
def updateFolder(self, folderId: str, updateData: Dict[str, Any]) -> bool:
|
|
||||||
"""
|
|
||||||
Update folder metadata (e.g. ``scope``, ``neutralize``). Owner-only,
|
|
||||||
same access model as renameFolder/moveFolder. Use ``renameFolder`` for
|
|
||||||
``name`` changes (uniqueness validation) and ``moveFolder`` for
|
|
||||||
``parentId`` changes (cycle/uniqueness validation).
|
|
||||||
"""
|
|
||||||
if not updateData:
|
|
||||||
return True
|
|
||||||
folder = self.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise FileNotFoundError(f"Folder {folderId} not found")
|
|
||||||
forbiddenKeys = {"id", "sysCreatedBy", "sysCreatedAt", "sysUpdatedAt"}
|
|
||||||
cleaned: Dict[str, Any] = {k: v for k, v in updateData.items() if k not in forbiddenKeys}
|
|
||||||
if "name" in cleaned:
|
|
||||||
self._validateFolderName(cleaned["name"], folder.get("parentId"), excludeFolderId=folderId)
|
|
||||||
return self.db.recordModify(FileFolder, folderId, cleaned)
|
|
||||||
|
|
||||||
def moveFolder(self, folderId: str, targetParentId: Optional[str] = None) -> bool:
|
|
||||||
"""Move a folder to a new parent, with circular reference and unique name checks."""
|
|
||||||
folder = self.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise FileNotFoundError(f"Folder {folderId} not found")
|
|
||||||
if targetParentId and self._isDescendantOf(targetParentId, folderId):
|
|
||||||
raise ValueError("Cannot move folder into its own subtree")
|
|
||||||
self._validateFolderName(folder.get("name", ""), targetParentId, excludeFolderId=folderId)
|
|
||||||
return self.db.recordModify(FileFolder, folderId, {"parentId": targetParentId})
|
|
||||||
|
|
||||||
def moveFilesBatch(self, fileIds: List[str], targetFolderId: Optional[str] = None) -> Dict[str, Any]:
|
|
||||||
"""Move multiple files with one SQL update.
|
|
||||||
Owner can always move; non-owners need RBAC ALL level."""
|
|
||||||
uniqueIds = [str(fid) for fid in dict.fromkeys(fileIds or []) if fid]
|
|
||||||
if not uniqueIds:
|
|
||||||
return {"movedFiles": 0}
|
|
||||||
|
|
||||||
if targetFolderId:
|
|
||||||
targetFolder = self.getFolder(targetFolderId)
|
|
||||||
if not targetFolder:
|
|
||||||
raise FileNotFoundError(f"Target folder {targetFolderId} not found")
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.db._ensure_connection()
|
|
||||||
with self.db.connection.cursor() as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
'SELECT "id", "sysCreatedBy" FROM "FileItem" WHERE "id" = ANY(%s)',
|
|
||||||
(uniqueIds,),
|
|
||||||
)
|
|
||||||
rows = cursor.fetchall()
|
|
||||||
foundIds = {row["id"] for row in rows}
|
|
||||||
missing = sorted(set(uniqueIds) - foundIds)
|
|
||||||
if missing:
|
|
||||||
raise FileNotFoundError(f"Files not found: {missing}")
|
|
||||||
|
|
||||||
for row in rows:
|
|
||||||
self._requireFileWriteAccess(row, row["id"], "update")
|
|
||||||
|
|
||||||
accessibleIds = [row["id"] for row in rows]
|
|
||||||
cursor.execute(
|
|
||||||
'UPDATE "FileItem" SET "folderId" = %s, "sysModifiedAt" = %s, "sysModifiedBy" = %s '
|
|
||||||
'WHERE "id" = ANY(%s)',
|
|
||||||
(targetFolderId, getUtcTimestamp(), self.userId or "", accessibleIds),
|
|
||||||
)
|
|
||||||
movedFiles = cursor.rowcount
|
|
||||||
|
|
||||||
self.db.connection.commit()
|
|
||||||
return {"movedFiles": movedFiles}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error moving files in batch: {e}")
|
|
||||||
self.db.connection.rollback()
|
|
||||||
raise FileError(f"Error moving files in batch: {str(e)}")
|
|
||||||
|
|
||||||
def moveFoldersBatch(self, folderIds: List[str], targetParentId: Optional[str] = None) -> Dict[str, Any]:
|
|
||||||
"""Move multiple folders with one SQL update after validation."""
|
|
||||||
uniqueIds = [str(fid) for fid in dict.fromkeys(folderIds or []) if fid]
|
|
||||||
if not uniqueIds:
|
|
||||||
return {"movedFolders": 0}
|
|
||||||
|
|
||||||
foldersToMove: List[Dict[str, Any]] = []
|
|
||||||
for folderId in uniqueIds:
|
|
||||||
folder = self.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise FileNotFoundError(f"Folder {folderId} not found")
|
|
||||||
if targetParentId and self._isDescendantOf(targetParentId, folderId):
|
|
||||||
raise ValueError("Cannot move folder into its own subtree")
|
|
||||||
foldersToMove.append(folder)
|
|
||||||
|
|
||||||
existingInTarget = self.db.getRecordset(
|
|
||||||
FileFolder,
|
|
||||||
recordFilter={"parentId": targetParentId or "", "sysCreatedBy": self.userId or ""},
|
|
||||||
)
|
|
||||||
existingNames = {f.get("name"): f.get("id") for f in existingInTarget}
|
|
||||||
movingNames: Dict[str, str] = {}
|
|
||||||
movingIds = set(uniqueIds)
|
|
||||||
|
|
||||||
for folder in foldersToMove:
|
|
||||||
name = folder.get("name", "")
|
|
||||||
folderId = folder.get("id")
|
|
||||||
if name in movingNames and movingNames[name] != folderId:
|
|
||||||
raise ValueError(f"Folder '{name}' already exists in this move batch")
|
|
||||||
movingNames[name] = folderId
|
|
||||||
|
|
||||||
existingId = existingNames.get(name)
|
|
||||||
if existingId and existingId not in movingIds:
|
|
||||||
raise ValueError(f"Folder '{name}' already exists in target directory")
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.db._ensure_connection()
|
|
||||||
with self.db.connection.cursor() as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
'UPDATE "FileFolder" SET "parentId" = %s, "sysModifiedAt" = %s, "sysModifiedBy" = %s '
|
|
||||||
'WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
|
|
||||||
(targetParentId, getUtcTimestamp(), self.userId or "", uniqueIds, self.userId or ""),
|
|
||||||
)
|
|
||||||
movedFolders = cursor.rowcount
|
|
||||||
|
|
||||||
self.db.connection.commit()
|
|
||||||
return {"movedFolders": movedFolders}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error moving folders in batch: {e}")
|
|
||||||
self.db.connection.rollback()
|
|
||||||
raise FileError(f"Error moving folders in batch: {str(e)}")
|
|
||||||
|
|
||||||
def deleteFolder(self, folderId: str, recursive: bool = False) -> Dict[str, Any]:
|
|
||||||
"""Delete a folder. If recursive, deletes all contents. Returns summary of deletions."""
|
|
||||||
folder = self.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise FileNotFoundError(f"Folder {folderId} not found")
|
|
||||||
|
|
||||||
childFolders = self.db.getRecordset(FileFolder, recordFilter={"parentId": folderId, "sysCreatedBy": self.userId or ""})
|
|
||||||
childFiles = self._getFilesByCurrentUser(recordFilter={"folderId": folderId})
|
|
||||||
|
|
||||||
if not recursive and (childFolders or childFiles):
|
|
||||||
raise ValueError(
|
|
||||||
f"Folder '{folder.get('name')}' is not empty "
|
|
||||||
f"({len(childFiles)} files, {len(childFolders)} subfolders). "
|
|
||||||
f"Use recursive=true to delete contents."
|
|
||||||
)
|
|
||||||
|
|
||||||
deletedFiles = 0
|
|
||||||
deletedFolders = 0
|
|
||||||
|
|
||||||
if recursive:
|
|
||||||
for subFolder in childFolders:
|
|
||||||
subResult = self.deleteFolder(subFolder["id"], recursive=True)
|
|
||||||
deletedFiles += subResult.get("deletedFiles", 0)
|
|
||||||
deletedFolders += subResult.get("deletedFolders", 0)
|
|
||||||
for childFile in childFiles:
|
|
||||||
try:
|
|
||||||
self.deleteFile(childFile["id"])
|
|
||||||
deletedFiles += 1
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Failed to delete file {childFile['id']} during folder deletion: {e}")
|
|
||||||
|
|
||||||
self.db.recordDelete(FileFolder, folderId)
|
|
||||||
deletedFolders += 1
|
|
||||||
|
|
||||||
return {"deletedFiles": deletedFiles, "deletedFolders": deletedFolders}
|
|
||||||
|
|
||||||
def deleteFoldersBatch(self, folderIds: List[str], recursive: bool = True) -> Dict[str, Any]:
|
|
||||||
"""Delete multiple folders and their content in batched SQL calls."""
|
|
||||||
uniqueIds = [str(fid) for fid in dict.fromkeys(folderIds or []) if fid]
|
|
||||||
if not uniqueIds:
|
|
||||||
return {"deletedFiles": 0, "deletedFolders": 0}
|
|
||||||
|
|
||||||
if not recursive:
|
|
||||||
deletedFiles = 0
|
|
||||||
deletedFolders = 0
|
|
||||||
for folderId in uniqueIds:
|
|
||||||
result = self.deleteFolder(folderId, recursive=False)
|
|
||||||
deletedFiles += result.get("deletedFiles", 0)
|
|
||||||
deletedFolders += result.get("deletedFolders", 0)
|
|
||||||
return {"deletedFiles": deletedFiles, "deletedFolders": deletedFolders}
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.db._ensure_connection()
|
|
||||||
with self.db.connection.cursor() as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
'SELECT "id" FROM "FileFolder" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
|
|
||||||
(uniqueIds, self.userId or ""),
|
|
||||||
)
|
|
||||||
rootAccessibleIds = [row["id"] for row in cursor.fetchall()]
|
|
||||||
if len(rootAccessibleIds) != len(uniqueIds):
|
|
||||||
missingIds = sorted(set(uniqueIds) - set(rootAccessibleIds))
|
|
||||||
raise FileNotFoundError(f"Folders not found or not accessible: {missingIds}")
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
"""
|
|
||||||
WITH RECURSIVE folder_tree AS (
|
|
||||||
SELECT "id"
|
|
||||||
FROM "FileFolder"
|
|
||||||
WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s
|
|
||||||
UNION ALL
|
|
||||||
SELECT child."id"
|
|
||||||
FROM "FileFolder" child
|
|
||||||
INNER JOIN folder_tree ft ON child."parentId" = ft."id"
|
|
||||||
WHERE child."sysCreatedBy" = %s
|
|
||||||
)
|
|
||||||
SELECT DISTINCT "id" FROM folder_tree
|
|
||||||
""",
|
|
||||||
(rootAccessibleIds, self.userId or "", self.userId or ""),
|
|
||||||
)
|
|
||||||
allFolderIds = [row["id"] for row in cursor.fetchall()]
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
'SELECT "id" FROM "FileItem" WHERE "folderId" = ANY(%s) AND "sysCreatedBy" = %s',
|
|
||||||
(allFolderIds, self.userId or ""),
|
|
||||||
)
|
|
||||||
allFileIds = [row["id"] for row in cursor.fetchall()]
|
|
||||||
|
|
||||||
if allFileIds:
|
|
||||||
cursor.execute('DELETE FROM "FileData" WHERE "id" = ANY(%s)', (allFileIds,))
|
|
||||||
cursor.execute(
|
|
||||||
'DELETE FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
|
|
||||||
(allFileIds, self.userId or ""),
|
|
||||||
)
|
|
||||||
deletedFiles = cursor.rowcount
|
|
||||||
else:
|
|
||||||
deletedFiles = 0
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
'DELETE FROM "FileFolder" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
|
|
||||||
(allFolderIds, self.userId or ""),
|
|
||||||
)
|
|
||||||
deletedFolders = cursor.rowcount
|
|
||||||
|
|
||||||
self.db.connection.commit()
|
|
||||||
return {"deletedFiles": deletedFiles, "deletedFolders": deletedFolders}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error deleting folders in batch: {e}")
|
|
||||||
self.db.connection.rollback()
|
|
||||||
raise FileDeletionError(f"Error deleting folders in batch: {str(e)}")
|
|
||||||
|
|
||||||
def copyFile(self, sourceFileId: str, targetFolderId: Optional[str] = None, newFileName: Optional[str] = None) -> FileItem:
|
|
||||||
"""Create a full duplicate of a file (FileItem + FileData)."""
|
"""Create a full duplicate of a file (FileItem + FileData)."""
|
||||||
sourceFile = self.getFile(sourceFileId)
|
sourceFile = self.getFile(sourceFileId)
|
||||||
if not sourceFile:
|
if not sourceFile:
|
||||||
|
|
@ -1665,11 +1320,6 @@ class ComponentObjects:
|
||||||
fileName = newFileName or sourceFile.fileName
|
fileName = newFileName or sourceFile.fileName
|
||||||
copiedFile = self.createFile(fileName, sourceFile.mimeType, sourceData)
|
copiedFile = self.createFile(fileName, sourceFile.mimeType, sourceData)
|
||||||
|
|
||||||
if targetFolderId:
|
|
||||||
self.updateFile(copiedFile.id, {"folderId": targetFolderId})
|
|
||||||
elif sourceFile.folderId:
|
|
||||||
self.updateFile(copiedFile.id, {"folderId": sourceFile.folderId})
|
|
||||||
|
|
||||||
self.createFileData(copiedFile.id, sourceData)
|
self.createFileData(copiedFile.id, sourceData)
|
||||||
return copiedFile
|
return copiedFile
|
||||||
|
|
||||||
|
|
@ -1884,18 +1534,14 @@ class ComponentObjects:
|
||||||
logger.error(f"Error getting file content: {str(e)}")
|
logger.error(f"Error getting file content: {str(e)}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def saveUploadedFile(self, fileContent: bytes, fileName: str, folderId: Optional[str] = None) -> tuple[FileItem, str]:
|
def saveUploadedFile(self, fileContent: bytes, fileName: str) -> tuple[FileItem, str]:
|
||||||
"""Saves an uploaded file if user has permission.
|
"""Saves an uploaded file if user has permission."""
|
||||||
|
|
||||||
Args:
|
|
||||||
folderId: Optional parent folder ID. None means root folder.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
# Check file creation permission
|
# Check file creation permission
|
||||||
if not self.checkRbacPermission(FileItem, "create"):
|
if not self.checkRbacPermission(FileItem, "create"):
|
||||||
raise PermissionError("No permission to upload files")
|
raise PermissionError("No permission to upload files")
|
||||||
|
|
||||||
logger.debug(f"Starting upload process for file: {fileName} (folderId={folderId!r})")
|
logger.debug(f"Starting upload process for file: {fileName}")
|
||||||
|
|
||||||
if not isinstance(fileContent, bytes):
|
if not isinstance(fileContent, bytes):
|
||||||
logger.error(f"Invalid fileContent type: {type(fileContent)}")
|
logger.error(f"Invalid fileContent type: {type(fileContent)}")
|
||||||
|
|
@ -1921,7 +1567,6 @@ class ComponentObjects:
|
||||||
name=fileName,
|
name=fileName,
|
||||||
mimeType=mimeType,
|
mimeType=mimeType,
|
||||||
content=fileContent,
|
content=fileContent,
|
||||||
folderId=folderId,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Save binary data
|
# Save binary data
|
||||||
|
|
|
||||||
|
|
@ -204,7 +204,6 @@ TABLE_NAMESPACE = {
|
||||||
# Files - benutzer-eigen
|
# Files - benutzer-eigen
|
||||||
"FileItem": "files",
|
"FileItem": "files",
|
||||||
"FileData": "files",
|
"FileData": "files",
|
||||||
"FileFolder": "files",
|
|
||||||
# Automation - benutzer-eigen
|
# Automation - benutzer-eigen
|
||||||
"AutomationDefinition": "automation",
|
"AutomationDefinition": "automation",
|
||||||
"AutomationTemplate": "automation",
|
"AutomationTemplate": "automation",
|
||||||
|
|
@ -529,8 +528,7 @@ def getRecordsetPaginatedWithRBAC(
|
||||||
if val is None:
|
if val is None:
|
||||||
# val=None in pagination.filters means "match empty/null"
|
# val=None in pagination.filters means "match empty/null"
|
||||||
# (same convention as connectorDbPostgre._buildPaginationClauses).
|
# (same convention as connectorDbPostgre._buildPaginationClauses).
|
||||||
# Covers both historical empty-string values and true NULLs
|
# Covers both historical empty-string values and true NULLs.
|
||||||
# e.g. root-folder files where folderId may be "" or NULL.
|
|
||||||
whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')')
|
whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')')
|
||||||
continue
|
continue
|
||||||
if isinstance(val, dict):
|
if isinstance(val, dict):
|
||||||
|
|
@ -689,8 +687,7 @@ def getDistinctColumnValuesWithRBAC(
|
||||||
if val is None:
|
if val is None:
|
||||||
# val=None in pagination.filters means "match empty/null"
|
# val=None in pagination.filters means "match empty/null"
|
||||||
# (same convention as connectorDbPostgre._buildPaginationClauses).
|
# (same convention as connectorDbPostgre._buildPaginationClauses).
|
||||||
# Covers both historical empty-string values and true NULLs
|
# Covers both historical empty-string values and true NULLs.
|
||||||
# e.g. root-folder files where folderId may be "" or NULL.
|
|
||||||
whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')')
|
whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')')
|
||||||
continue
|
continue
|
||||||
if isinstance(val, dict):
|
if isinstance(val, dict):
|
||||||
|
|
|
||||||
0
modules/migrations/__init__.py
Normal file
0
modules/migrations/__init__.py
Normal file
240
modules/migrations/migrate_folders_to_groups.py
Normal file
240
modules/migrations/migrate_folders_to_groups.py
Normal file
|
|
@ -0,0 +1,240 @@
|
||||||
|
"""
|
||||||
|
One-time migration: Convert FileFolder tree + FileItem.folderId → table_groupings.
|
||||||
|
|
||||||
|
Run this BEFORE dropping the physical FileFolder table and FileItem.folderId column
|
||||||
|
from the database (those are separate Alembic/SQL steps).
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python -m modules.migrations.migrate_folders_to_groups [--dry-run] [--verbose]
|
||||||
|
|
||||||
|
Steps:
|
||||||
|
1. For each distinct (userId, mandateId) combination that has FileFolder records:
|
||||||
|
a. Build the full folder tree (recursive)
|
||||||
|
b. Write it as a TableGroupNode tree into table_groupings (contextKey='files/list')
|
||||||
|
– merges with any existing groups rather than overwriting
|
||||||
|
c. For each FileItem with a folderId that maps into this tree,
|
||||||
|
add its id to the matching group's itemIds
|
||||||
|
2. Print a summary (rows migrated, groups created, files assigned)
|
||||||
|
3. If not --dry-run: commits the inserts/updates
|
||||||
|
NOTE: Schema changes (ALTER TABLE DROP COLUMN, DROP TABLE) are intentionally
|
||||||
|
NOT performed by this script. Run the corresponding Alembic migration
|
||||||
|
(migrations/versions/xxxx_drop_folder_columns.py) afterwards.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# ── Helpers ──────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _build_tree(folders: list, parent_id: Optional[str]) -> list:
|
||||||
|
"""Recursively build TableGroupNode-compatible dicts from a flat folder list."""
|
||||||
|
children = [f for f in folders if f.get("parentId") == parent_id]
|
||||||
|
result = []
|
||||||
|
for folder in children:
|
||||||
|
node = {
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"name": folder["name"],
|
||||||
|
"itemIds": [],
|
||||||
|
"subGroups": _build_tree(folders, folder["id"]),
|
||||||
|
"meta": {"migratedFromFolderId": folder["id"]},
|
||||||
|
}
|
||||||
|
result.append(node)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _assign_files_to_nodes(nodes: list, files_by_folder: dict) -> list:
|
||||||
|
"""Recursively assign file IDs to group nodes based on folder mapping."""
|
||||||
|
for node in nodes:
|
||||||
|
folder_id = (node.get("meta") or {}).get("migratedFromFolderId")
|
||||||
|
if folder_id and folder_id in files_by_folder:
|
||||||
|
node["itemIds"] = list(files_by_folder[folder_id])
|
||||||
|
node["subGroups"] = _assign_files_to_nodes(node.get("subGroups", []), files_by_folder)
|
||||||
|
return nodes
|
||||||
|
|
||||||
|
|
||||||
|
def _count_items(nodes: list) -> int:
|
||||||
|
total = 0
|
||||||
|
for node in nodes:
|
||||||
|
total += len(node.get("itemIds", []))
|
||||||
|
total += _count_items(node.get("subGroups", []))
|
||||||
|
return total
|
||||||
|
|
||||||
|
|
||||||
|
def _now_ts() -> str:
|
||||||
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
return getUtcTimestamp()
|
||||||
|
|
||||||
|
|
||||||
|
# ── Main migration ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def run_migration(dry_run: bool = True, verbose: bool = False):
|
||||||
|
"""Main migration entry point."""
|
||||||
|
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
|
||||||
|
logger.info(f"Starting folder→group migration (dry_run={dry_run})")
|
||||||
|
|
||||||
|
from modules.connectors.connectorDbPostgre import getCachedConnector
|
||||||
|
|
||||||
|
connector = getCachedConnector()
|
||||||
|
if not connector or not connector.connection:
|
||||||
|
logger.error("Could not obtain a DB connection. Aborting.")
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = connector.connection
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
# ── 1. Check that the source tables still exist ───────────────────────────
|
||||||
|
cur.execute("""
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM information_schema.tables
|
||||||
|
WHERE table_name = 'FileFolder'
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
folder_table_exists = cur.fetchone()[0]
|
||||||
|
|
||||||
|
cur.execute("""
|
||||||
|
SELECT EXISTS (
|
||||||
|
SELECT 1 FROM information_schema.columns
|
||||||
|
WHERE table_name = 'FileItem' AND column_name = 'folderId'
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
folder_column_exists = cur.fetchone()[0]
|
||||||
|
|
||||||
|
if not folder_table_exists and not folder_column_exists:
|
||||||
|
logger.info("FileFolder table and FileItem.folderId column not found — migration already applied or not needed.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not folder_table_exists:
|
||||||
|
logger.warning("FileFolder table missing but FileItem.folderId column still present. Only file assignments will be migrated.")
|
||||||
|
if not folder_column_exists:
|
||||||
|
logger.warning("FileItem.folderId column missing but FileFolder table still present. Only group tree structure will be migrated.")
|
||||||
|
|
||||||
|
# ── 2. Load all folders ───────────────────────────────────────────────────
|
||||||
|
folders_by_user: dict = {}
|
||||||
|
if folder_table_exists:
|
||||||
|
cur.execute('SELECT "id", "name", "parentId", "sysCreatedBy", "mandateId" FROM "FileFolder"')
|
||||||
|
for row in cur.fetchall():
|
||||||
|
fid, fname, parent_id, user_id, mandate_id = row
|
||||||
|
key = (str(user_id), str(mandate_id) if mandate_id else "")
|
||||||
|
folders_by_user.setdefault(key, []).append({
|
||||||
|
"id": fid, "name": fname, "parentId": parent_id,
|
||||||
|
})
|
||||||
|
logger.info(f"Loaded folders for {len(folders_by_user)} (user, mandate) combinations")
|
||||||
|
|
||||||
|
# ── 3. Load file→folder assignments ──────────────────────────────────────
|
||||||
|
files_by_key: dict = {}
|
||||||
|
if folder_column_exists:
|
||||||
|
cur.execute(
|
||||||
|
'SELECT "id", "folderId", "sysCreatedBy", "mandateId" FROM "FileItem" WHERE "folderId" IS NOT NULL AND "folderId" != \'\''
|
||||||
|
)
|
||||||
|
for row in cur.fetchall():
|
||||||
|
file_id, folder_id, user_id, mandate_id = row
|
||||||
|
key = (str(user_id), str(mandate_id) if mandate_id else "")
|
||||||
|
files_by_key.setdefault(key, {}).setdefault(folder_id, []).append(file_id)
|
||||||
|
total_files = sum(
|
||||||
|
sum(len(v) for v in d.values()) for d in files_by_key.values()
|
||||||
|
)
|
||||||
|
logger.info(f"Found {total_files} file→folder assignments across {len(files_by_key)} (user, mandate) combos")
|
||||||
|
|
||||||
|
# ── 4. Combine and upsert groupings ──────────────────────────────────────
|
||||||
|
all_keys = set(folders_by_user.keys()) | set(files_by_key.keys())
|
||||||
|
stats = {"groups_created": 0, "groupings_upserted": 0, "files_assigned": 0}
|
||||||
|
|
||||||
|
for key in all_keys:
|
||||||
|
user_id, mandate_id = key
|
||||||
|
folders = folders_by_user.get(key, [])
|
||||||
|
files_by_folder = files_by_key.get(key, {})
|
||||||
|
|
||||||
|
# Build tree
|
||||||
|
roots = _build_tree(folders, None)
|
||||||
|
roots = _assign_files_to_nodes(roots, files_by_folder)
|
||||||
|
|
||||||
|
# Handle files in unknown folders (folder no longer in tree)
|
||||||
|
known_folder_ids = {f["id"] for f in folders}
|
||||||
|
for folder_id, file_ids in files_by_folder.items():
|
||||||
|
if folder_id not in known_folder_ids:
|
||||||
|
# Orphaned files: put them in an "Orphaned" group
|
||||||
|
roots.append({
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"name": f"Orphaned (folder {folder_id[:8]}…)",
|
||||||
|
"itemIds": file_ids,
|
||||||
|
"subGroups": [],
|
||||||
|
"meta": {"migratedFromFolderId": folder_id, "orphaned": True},
|
||||||
|
})
|
||||||
|
|
||||||
|
if not roots:
|
||||||
|
continue
|
||||||
|
|
||||||
|
n_items = _count_items(roots)
|
||||||
|
stats["groups_created"] += len(roots)
|
||||||
|
stats["files_assigned"] += n_items
|
||||||
|
|
||||||
|
context_key = "files/list"
|
||||||
|
if verbose:
|
||||||
|
logger.debug(f" user={user_id} mandate={mandate_id}: {len(roots)} root groups, {n_items} files")
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
# Check for existing grouping
|
||||||
|
cur.execute(
|
||||||
|
'SELECT "id", "rootGroups" FROM "TableGrouping" WHERE "userId" = %s AND "contextKey" = %s',
|
||||||
|
(user_id, context_key),
|
||||||
|
)
|
||||||
|
existing_row = cur.fetchone()
|
||||||
|
|
||||||
|
if existing_row:
|
||||||
|
existing_id, existing_raw = existing_row
|
||||||
|
existing_roots = json.loads(existing_raw) if isinstance(existing_raw, str) else (existing_raw or [])
|
||||||
|
# Merge: append migrated groups (avoid duplicates by migratedFromFolderId)
|
||||||
|
existing_meta_ids = {
|
||||||
|
(n.get("meta") or {}).get("migratedFromFolderId")
|
||||||
|
for n in existing_roots
|
||||||
|
if (n.get("meta") or {}).get("migratedFromFolderId")
|
||||||
|
}
|
||||||
|
new_roots = existing_roots + [
|
||||||
|
r for r in roots
|
||||||
|
if (r.get("meta") or {}).get("migratedFromFolderId") not in existing_meta_ids
|
||||||
|
]
|
||||||
|
cur.execute(
|
||||||
|
'UPDATE "TableGrouping" SET "rootGroups" = %s, "updatedAt" = %s WHERE "id" = %s',
|
||||||
|
(json.dumps(new_roots), _now_ts(), existing_id),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
new_id = str(uuid.uuid4())
|
||||||
|
cur.execute(
|
||||||
|
'INSERT INTO "TableGrouping" ("id", "userId", "contextKey", "rootGroups", "updatedAt") VALUES (%s, %s, %s, %s, %s)',
|
||||||
|
(new_id, user_id, context_key, json.dumps(roots), _now_ts()),
|
||||||
|
)
|
||||||
|
stats["groupings_upserted"] += 1
|
||||||
|
|
||||||
|
# ── 5. Summary ────────────────────────────────────────────────────────────
|
||||||
|
if not dry_run:
|
||||||
|
conn.commit()
|
||||||
|
logger.info("Migration committed.")
|
||||||
|
else:
|
||||||
|
logger.info("DRY RUN — no changes written.")
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Summary: groupings_upserted={stats['groupings_upserted']}, "
|
||||||
|
f"groups_created={stats['groups_created']}, "
|
||||||
|
f"files_assigned={stats['files_assigned']}"
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
"Next steps (run after verifying data):\n"
|
||||||
|
" 1. Run Alembic migration to DROP COLUMN FileItem.folderId\n"
|
||||||
|
" 2. Run Alembic migration to DROP TABLE FileFolder"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Migrate FileFolder tree to table_groupings")
|
||||||
|
parser.add_argument("--dry-run", action="store_true", default=True, help="Preview only, no DB writes (default)")
|
||||||
|
parser.add_argument("--execute", action="store_true", help="Actually write to DB (disables dry-run)")
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="Show per-user details")
|
||||||
|
args = parser.parse_args()
|
||||||
|
dry_run = not args.execute
|
||||||
|
run_migration(dry_run=dry_run, verbose=args.verbose)
|
||||||
|
|
@ -12,7 +12,6 @@ from modules.auth import limiter, getCurrentUser, getRequestContext, RequestCont
|
||||||
# Import interfaces
|
# Import interfaces
|
||||||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||||||
from modules.datamodels.datamodelFiles import FileItem, FilePreview
|
from modules.datamodels.datamodelFiles import FileItem, FilePreview
|
||||||
from modules.datamodels.datamodelFileFolder import FileFolder
|
|
||||||
from modules.shared.attributeUtils import getModelAttributeDefinitions
|
from modules.shared.attributeUtils import getModelAttributeDefinitions
|
||||||
from modules.datamodels.datamodelUam import User
|
from modules.datamodels.datamodelUam import User
|
||||||
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
|
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
|
||||||
|
|
@ -319,16 +318,7 @@ def get_files(
|
||||||
recordFilter = {"sysCreatedBy": managementInterface.userId}
|
recordFilter = {"sysCreatedBy": managementInterface.userId}
|
||||||
return handleIdsMode(managementInterface.db, FileItem, pagination, recordFilter)
|
return handleIdsMode(managementInterface.db, FileItem, pagination, recordFilter)
|
||||||
|
|
||||||
recordFilter = None
|
result = managementInterface.getAllFiles(pagination=paginationParams)
|
||||||
if paginationParams and paginationParams.filters and "folderId" in paginationParams.filters:
|
|
||||||
fVal = paginationParams.filters.get("folderId")
|
|
||||||
if fVal is None or (isinstance(fVal, str) and fVal.strip() == ""):
|
|
||||||
paginationParams.filters["folderId"] = None
|
|
||||||
else:
|
|
||||||
paginationParams.filters.pop("folderId")
|
|
||||||
recordFilter = {"folderId": fVal}
|
|
||||||
|
|
||||||
result = managementInterface.getAllFiles(pagination=paginationParams, recordFilter=recordFilter)
|
|
||||||
|
|
||||||
if paginationParams:
|
if paginationParams:
|
||||||
enriched = applyGroupScopeFilter(enrichRowsWithFkLabels(_filesToDicts(result.items), FileItem), groupCtx.itemIds)
|
enriched = applyGroupScopeFilter(enrichRowsWithFkLabels(_filesToDicts(result.items), FileItem), groupCtx.itemIds)
|
||||||
|
|
@ -358,6 +348,36 @@ def get_files(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _addFileToGroup(appInterface, fileId: str, groupId: str, contextKey: str = "files/list"):
|
||||||
|
"""Add a file to a group in the persisted groupTree (upsert)."""
|
||||||
|
from modules.routes.routeHelpers import _collectItemIds
|
||||||
|
try:
|
||||||
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
|
if not existing:
|
||||||
|
return
|
||||||
|
nodes = [n.model_dump() if hasattr(n, 'model_dump') else n for n in existing.rootGroups]
|
||||||
|
def _add(nds):
|
||||||
|
for nd in nds:
|
||||||
|
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
|
||||||
|
if nid == groupId:
|
||||||
|
itemIds = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
|
||||||
|
if fileId not in itemIds:
|
||||||
|
itemIds.append(fileId)
|
||||||
|
if isinstance(nd, dict):
|
||||||
|
nd["itemIds"] = itemIds
|
||||||
|
else:
|
||||||
|
nd.itemIds = itemIds
|
||||||
|
return True
|
||||||
|
subs = nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", [])
|
||||||
|
if _add(subs):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
_add(nodes)
|
||||||
|
appInterface.upsertTableGrouping(contextKey, nodes)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"_addFileToGroup failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
@router.post("/upload", status_code=status.HTTP_201_CREATED)
|
@router.post("/upload", status_code=status.HTTP_201_CREATED)
|
||||||
@limiter.limit("10/minute")
|
@limiter.limit("10/minute")
|
||||||
async def upload_file(
|
async def upload_file(
|
||||||
|
|
@ -365,7 +385,7 @@ async def upload_file(
|
||||||
file: UploadFile = File(...),
|
file: UploadFile = File(...),
|
||||||
workflowId: Optional[str] = Form(None),
|
workflowId: Optional[str] = Form(None),
|
||||||
featureInstanceId: Optional[str] = Form(None),
|
featureInstanceId: Optional[str] = Form(None),
|
||||||
folderId: Optional[str] = Form(None),
|
groupId: Optional[str] = Form(None),
|
||||||
currentUser: User = Depends(getCurrentUser),
|
currentUser: User = Depends(getCurrentUser),
|
||||||
context: RequestContext = Depends(getRequestContext),
|
context: RequestContext = Depends(getRequestContext),
|
||||||
) -> JSONResponse:
|
) -> JSONResponse:
|
||||||
|
|
@ -389,31 +409,22 @@ async def upload_file(
|
||||||
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
||||||
detail=f"File too large. Maximum size: {interfaceDbManagement.APP_CONFIG.get('File_Management_MAX_UPLOAD_SIZE_MB')}MB"
|
detail=f"File too large. Maximum size: {interfaceDbManagement.APP_CONFIG.get('File_Management_MAX_UPLOAD_SIZE_MB')}MB"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Normalize folderId: empty string / "null" / "root" → None (root folder)
|
|
||||||
normalizedFolderId: Optional[str] = folderId
|
|
||||||
if isinstance(normalizedFolderId, str):
|
|
||||||
trimmed = normalizedFolderId.strip()
|
|
||||||
if not trimmed or trimmed.lower() in {"null", "none", "root"}:
|
|
||||||
normalizedFolderId = None
|
|
||||||
else:
|
|
||||||
normalizedFolderId = trimmed
|
|
||||||
|
|
||||||
# Save file via LucyDOM interface in the database
|
# Save file via LucyDOM interface in the database
|
||||||
fileItem, duplicateType = managementInterface.saveUploadedFile(
|
fileItem, duplicateType = managementInterface.saveUploadedFile(
|
||||||
fileContent, file.filename, folderId=normalizedFolderId
|
fileContent, file.filename
|
||||||
)
|
)
|
||||||
|
|
||||||
if featureInstanceId and not fileItem.featureInstanceId:
|
if featureInstanceId and not fileItem.featureInstanceId:
|
||||||
managementInterface.updateFile(fileItem.id, {"featureInstanceId": featureInstanceId})
|
managementInterface.updateFile(fileItem.id, {"featureInstanceId": featureInstanceId})
|
||||||
fileItem.featureInstanceId = featureInstanceId
|
fileItem.featureInstanceId = featureInstanceId
|
||||||
|
|
||||||
# For exact duplicates we keep the existing record, but move it into the
|
# Add to group if groupId was provided
|
||||||
# target folder so the user actually sees their upload land where they expect.
|
if groupId:
|
||||||
if duplicateType == "exact_duplicate" and normalizedFolderId != getattr(fileItem, "folderId", None):
|
import modules.interfaces.interfaceDbApp as _appIface
|
||||||
managementInterface.updateFile(fileItem.id, {"folderId": normalizedFolderId})
|
appInterface = _appIface.getInterface(currentUser)
|
||||||
fileItem.folderId = normalizedFolderId
|
_addFileToGroup(appInterface, fileItem.id, groupId)
|
||||||
|
|
||||||
# Determine response message based on duplicate type
|
# Determine response message based on duplicate type
|
||||||
if duplicateType == "exact_duplicate":
|
if duplicateType == "exact_duplicate":
|
||||||
message = f"File '{file.filename}' already exists with identical content. Reusing existing file."
|
message = f"File '{file.filename}' already exists with identical content. Reusing existing file."
|
||||||
|
|
@ -478,347 +489,6 @@ async def upload_file(
|
||||||
detail=f"Error during file upload: {str(e)}"
|
detail=f"Error during file upload: {str(e)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# ── Folder endpoints (MUST be before /{fileId} catch-all) ─────────────────────
|
|
||||||
|
|
||||||
@router.get("/folders", response_model=List[Dict[str, Any]])
|
|
||||||
@limiter.limit("30/minute")
|
|
||||||
def list_folders(
|
|
||||||
request: Request,
|
|
||||||
parentId: Optional[str] = Query(None, description="Parent folder ID (omit for all folders)"),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> List[Dict[str, Any]]:
|
|
||||||
"""List folders for the current user."""
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
if parentId is not None:
|
|
||||||
return mgmt.listFolders(parentId=parentId)
|
|
||||||
return mgmt.listFolders()
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error listing folders: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.post("/folders", status_code=status.HTTP_201_CREATED)
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def create_folder(
|
|
||||||
request: Request,
|
|
||||||
body: Dict[str, Any] = Body(...),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Create a new folder."""
|
|
||||||
name = body.get("name", "")
|
|
||||||
parentId = body.get("parentId")
|
|
||||||
if not name:
|
|
||||||
raise HTTPException(status_code=400, detail=routeApiMsg("name is required"))
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
return mgmt.createFolder(name=name, parentId=parentId)
|
|
||||||
except ValueError as e:
|
|
||||||
raise HTTPException(status_code=400, detail=str(e))
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error creating folder: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.put("/folders/{folderId}")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def rename_folder(
|
|
||||||
request: Request,
|
|
||||||
folderId: str = Path(...),
|
|
||||||
body: Dict[str, Any] = Body(...),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Rename a folder."""
|
|
||||||
newName = body.get("name", "")
|
|
||||||
if not newName:
|
|
||||||
raise HTTPException(status_code=400, detail=routeApiMsg("name is required"))
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
mgmt.renameFolder(folderId, newName)
|
|
||||||
return {"success": True, "folderId": folderId, "name": newName}
|
|
||||||
except ValueError as e:
|
|
||||||
raise HTTPException(status_code=400, detail=str(e))
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error renaming folder: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.delete("/folders/{folderId}")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def delete_folder(
|
|
||||||
request: Request,
|
|
||||||
folderId: str = Path(...),
|
|
||||||
recursive: bool = Query(False, description="Delete folder contents recursively"),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Delete a folder. Use recursive=true to delete non-empty folders."""
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
return mgmt.deleteFolder(folderId, recursive=recursive)
|
|
||||||
except ValueError as e:
|
|
||||||
raise HTTPException(status_code=400, detail=str(e))
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error deleting folder: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.post("/folders/{folderId}/move")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def move_folder(
|
|
||||||
request: Request,
|
|
||||||
folderId: str = Path(...),
|
|
||||||
body: Dict[str, Any] = Body(...),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Move a folder to a new parent."""
|
|
||||||
targetParentId = body.get("targetParentId")
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
mgmt.moveFolder(folderId, targetParentId)
|
|
||||||
return {"success": True, "folderId": folderId, "parentId": targetParentId}
|
|
||||||
except ValueError as e:
|
|
||||||
raise HTTPException(status_code=400, detail=str(e))
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error moving folder: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.patch("/folders/{folderId}/scope")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def _updateFolderScope(
|
|
||||||
request: Request,
|
|
||||||
folderId: str = Path(..., description="ID of the folder"),
|
|
||||||
scope: str = Body(..., embed=True),
|
|
||||||
context: RequestContext = Depends(getRequestContext),
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Update the scope of a folder. Propagates to all files inside (recursively). Global scope requires sysAdmin."""
|
|
||||||
validScopes = {"personal", "featureInstance", "mandate", "global"}
|
|
||||||
if scope not in validScopes:
|
|
||||||
raise HTTPException(status_code=400, detail=f"Invalid scope: {scope}. Must be one of {validScopes}")
|
|
||||||
if scope == "global" and not context.isSysAdmin:
|
|
||||||
raise HTTPException(status_code=403, detail=routeApiMsg("Only sysadmins can set global scope"))
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
context.user,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
folder = mgmt.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise HTTPException(status_code=404, detail=routeApiMsg("Folder not found"))
|
|
||||||
mgmt.updateFolder(folderId, {"scope": scope})
|
|
||||||
fileIds = _collectFolderFileIds(mgmt, folderId)
|
|
||||||
for fid in fileIds:
|
|
||||||
try:
|
|
||||||
mgmt.updateFile(fid, {"scope": scope})
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Folder scope propagation: failed to update file %s: %s", fid, e)
|
|
||||||
logger.info("Updated scope=%s for folder %s: %d files affected", scope, folderId, len(fileIds))
|
|
||||||
return {"folderId": folderId, "scope": scope, "filesUpdated": len(fileIds)}
|
|
||||||
except HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error updating folder scope: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@router.patch("/folders/{folderId}/neutralize")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def updateFolderNeutralize(
|
|
||||||
request: Request,
|
|
||||||
background_tasks: BackgroundTasks,
|
|
||||||
folderId: str = Path(..., description="ID of the folder"),
|
|
||||||
neutralize: bool = Body(..., embed=True),
|
|
||||||
context: RequestContext = Depends(getRequestContext),
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Toggle neutralization on a folder. Propagates to all files inside (recursively).
|
|
||||||
|
|
||||||
When turning ON: all files in the folder get ``neutralize=True``, their
|
|
||||||
knowledge indexes are purged synchronously, and background re-indexing
|
|
||||||
is triggered.
|
|
||||||
When turning OFF: files revert to ``neutralize=False`` unless they were
|
|
||||||
individually marked (not implemented yet -- all are reverted).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
context.user,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
folder = mgmt.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise HTTPException(status_code=404, detail=routeApiMsg("Folder not found"))
|
|
||||||
|
|
||||||
mgmt.updateFolder(folderId, {"neutralize": neutralize})
|
|
||||||
|
|
||||||
fileIds = _collectFolderFileIds(mgmt, folderId)
|
|
||||||
logger.info("Folder neutralize toggle %s for folder %s: %d files affected", neutralize, folderId, len(fileIds))
|
|
||||||
|
|
||||||
from modules.interfaces.interfaceDbKnowledge import getInterface as getKnowledgeInterface
|
|
||||||
knowledgeDb = getKnowledgeInterface()
|
|
||||||
|
|
||||||
for fid in fileIds:
|
|
||||||
try:
|
|
||||||
mgmt.updateFile(fid, {"neutralize": neutralize})
|
|
||||||
if neutralize:
|
|
||||||
try:
|
|
||||||
knowledgeDb.deleteFileContentIndex(fid)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("Folder neutralize: failed to purge index for file %s: %s", fid, e)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
from modules.datamodels.datamodelKnowledge import FileContentIndex
|
|
||||||
indices = knowledgeDb.db.getRecordset(FileContentIndex, recordFilter={"id": fid})
|
|
||||||
for idx in indices:
|
|
||||||
idxId = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None)
|
|
||||||
if idxId:
|
|
||||||
knowledgeDb.db.recordModify(FileContentIndex, idxId, {
|
|
||||||
"neutralizationStatus": "original",
|
|
||||||
"isNeutralized": False,
|
|
||||||
})
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("Folder neutralize OFF: metadata update failed for %s: %s", fid, e)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Folder neutralize: failed to update file %s: %s", fid, e)
|
|
||||||
|
|
||||||
for fid in fileIds:
|
|
||||||
fileMeta = mgmt.getFile(fid)
|
|
||||||
if fileMeta:
|
|
||||||
fn = fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", "")
|
|
||||||
mt = fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", "")
|
|
||||||
|
|
||||||
async def _reindex(fileId=fid, fileName=fn, mimeType=mt):
|
|
||||||
try:
|
|
||||||
await _autoIndexFile(fileId=fileId, fileName=fileName, mimeType=mimeType, user=context.user)
|
|
||||||
except Exception as ex:
|
|
||||||
logger.error("Folder neutralize re-index failed for %s: %s", fileId, ex)
|
|
||||||
|
|
||||||
background_tasks.add_task(_reindex)
|
|
||||||
|
|
||||||
return {"folderId": folderId, "neutralize": neutralize, "filesUpdated": len(fileIds)}
|
|
||||||
except HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error updating folder neutralize flag: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
|
|
||||||
def _collectFolderFileIds(mgmt, folderId: str) -> List[str]:
|
|
||||||
"""Recursively collect all file IDs in a folder and its sub-folders."""
|
|
||||||
fileIds = []
|
|
||||||
try:
|
|
||||||
files = mgmt.listFiles(folderId=folderId)
|
|
||||||
if isinstance(files, dict):
|
|
||||||
files = files.get("files", [])
|
|
||||||
for f in (files or []):
|
|
||||||
fid = f.get("id") if isinstance(f, dict) else getattr(f, "id", None)
|
|
||||||
if fid:
|
|
||||||
fileIds.append(fid)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("_collectFolderFileIds: listFiles failed for folder %s: %s", folderId, e)
|
|
||||||
|
|
||||||
try:
|
|
||||||
subFolders = mgmt.listFolders(parentId=folderId)
|
|
||||||
for sf in (subFolders or []):
|
|
||||||
sfId = sf.get("id") if isinstance(sf, dict) else getattr(sf, "id", None)
|
|
||||||
if sfId:
|
|
||||||
fileIds.extend(_collectFolderFileIds(mgmt, sfId))
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("_collectFolderFileIds: listFolders failed for folder %s: %s", folderId, e)
|
|
||||||
|
|
||||||
return fileIds
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/folders/{folderId}/download")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def download_folder(
|
|
||||||
request: Request,
|
|
||||||
folderId: str = Path(..., description="ID of the folder to download as ZIP"),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Response:
|
|
||||||
"""Download a folder (including subfolders) as a ZIP archive."""
|
|
||||||
import io
|
|
||||||
import zipfile
|
|
||||||
import urllib.parse
|
|
||||||
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
folder = mgmt.getFolder(folderId)
|
|
||||||
if not folder:
|
|
||||||
raise HTTPException(status_code=404, detail=f"Folder {folderId} not found")
|
|
||||||
|
|
||||||
folderName = folder.get("name", "download")
|
|
||||||
|
|
||||||
def _collectFiles(parentId: str, pathPrefix: str):
|
|
||||||
"""Recursively collect (zipPath, fileId) tuples."""
|
|
||||||
entries = []
|
|
||||||
for f in mgmt._getFilesByCurrentUser(recordFilter={"folderId": parentId}):
|
|
||||||
fname = f.get("fileName") or f.get("name") or f.get("id", "file")
|
|
||||||
entries.append((f"{pathPrefix}{fname}", f["id"]))
|
|
||||||
for sub in mgmt.listFolders(parentId=parentId):
|
|
||||||
subName = sub.get("name", sub["id"])
|
|
||||||
entries.extend(_collectFiles(sub["id"], f"{pathPrefix}{subName}/"))
|
|
||||||
return entries
|
|
||||||
|
|
||||||
fileEntries = _collectFiles(folderId, "")
|
|
||||||
if not fileEntries:
|
|
||||||
raise HTTPException(status_code=404, detail=routeApiMsg("Folder is empty"))
|
|
||||||
|
|
||||||
buf = io.BytesIO()
|
|
||||||
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
|
|
||||||
for zipPath, fileId in fileEntries:
|
|
||||||
data = mgmt.getFileData(fileId)
|
|
||||||
if data:
|
|
||||||
zf.writestr(zipPath, data)
|
|
||||||
|
|
||||||
buf.seek(0)
|
|
||||||
zipBytes = buf.getvalue()
|
|
||||||
encodedName = urllib.parse.quote(f"{folderName}.zip")
|
|
||||||
|
|
||||||
return Response(
|
|
||||||
content=zipBytes,
|
|
||||||
media_type="application/zip",
|
|
||||||
headers={
|
|
||||||
"Content-Disposition": f"attachment; filename*=UTF-8''{encodedName}"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
except HTTPException:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error downloading folder as ZIP: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=f"Error downloading folder: {str(e)}")
|
|
||||||
|
|
||||||
|
|
||||||
@router.post("/batch-delete")
|
@router.post("/batch-delete")
|
||||||
|
|
@ -829,13 +499,11 @@ def batch_delete_items(
|
||||||
currentUser: User = Depends(getCurrentUser),
|
currentUser: User = Depends(getCurrentUser),
|
||||||
context: RequestContext = Depends(getRequestContext)
|
context: RequestContext = Depends(getRequestContext)
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""Batch delete files/folders with a single SQL-backed operation per type."""
|
"""Batch delete files."""
|
||||||
fileIds = body.get("fileIds") or []
|
fileIds = body.get("fileIds") or []
|
||||||
folderIds = body.get("folderIds") or []
|
|
||||||
recursiveFolders = bool(body.get("recursiveFolders", True))
|
|
||||||
|
|
||||||
if not isinstance(fileIds, list) or not isinstance(folderIds, list):
|
if not isinstance(fileIds, list):
|
||||||
raise HTTPException(status_code=400, detail=routeApiMsg("fileIds and folderIds must be arrays"))
|
raise HTTPException(status_code=400, detail=routeApiMsg("fileIds must be an array"))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
mgmt = interfaceDbManagement.getInterface(
|
||||||
|
|
@ -844,17 +512,12 @@ def batch_delete_items(
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
result = {"deletedFiles": 0, "deletedFolders": 0}
|
result = {"deletedFiles": 0}
|
||||||
|
|
||||||
if fileIds:
|
if fileIds:
|
||||||
fileResult = mgmt.deleteFilesBatch(fileIds)
|
fileResult = mgmt.deleteFilesBatch(fileIds)
|
||||||
result["deletedFiles"] += fileResult.get("deletedFiles", 0)
|
result["deletedFiles"] += fileResult.get("deletedFiles", 0)
|
||||||
|
|
||||||
if folderIds:
|
|
||||||
folderResult = mgmt.deleteFoldersBatch(folderIds, recursive=recursiveFolders)
|
|
||||||
result["deletedFiles"] += folderResult.get("deletedFiles", 0)
|
|
||||||
result["deletedFolders"] += folderResult.get("deletedFolders", 0)
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise HTTPException(status_code=400, detail=str(e))
|
raise HTTPException(status_code=400, detail=str(e))
|
||||||
|
|
@ -863,45 +526,189 @@ def batch_delete_items(
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
@router.post("/batch-move")
|
# ── Group bulk endpoints ──────────────────────────────────────────────────────
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def batch_move_items(
|
|
||||||
request: Request,
|
|
||||||
body: Dict[str, Any] = Body(...),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Batch move files/folders with a single SQL-backed operation per type."""
|
|
||||||
fileIds = body.get("fileIds") or []
|
|
||||||
folderIds = body.get("folderIds") or []
|
|
||||||
targetFolderId = body.get("targetFolderId")
|
|
||||||
targetParentId = body.get("targetParentId")
|
|
||||||
|
|
||||||
if not isinstance(fileIds, list) or not isinstance(folderIds, list):
|
|
||||||
raise HTTPException(status_code=400, detail=routeApiMsg("fileIds and folderIds must be arrays"))
|
|
||||||
|
|
||||||
|
def _get_group_item_ids(contextKey: str, groupId: str, appInterface) -> set:
|
||||||
|
"""Collect all file IDs in a group and its sub-groups from the stored groupTree."""
|
||||||
|
from modules.routes.routeHelpers import _collectItemIds
|
||||||
try:
|
try:
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
|
if not existing:
|
||||||
|
return set()
|
||||||
|
nodes = [n.model_dump() if hasattr(n, 'model_dump') else n for n in existing.rootGroups]
|
||||||
|
result = _collectItemIds(nodes, groupId)
|
||||||
|
return result or set()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"_get_group_item_ids failed for groupId={groupId}: {e}")
|
||||||
|
return set()
|
||||||
|
|
||||||
|
|
||||||
|
@router.patch("/groups/{groupId}/scope")
|
||||||
|
@limiter.limit("60/minute")
|
||||||
|
def patch_group_scope(
|
||||||
|
request: Request,
|
||||||
|
groupId: str = Path(..., description="Group ID"),
|
||||||
|
body: dict = Body(...),
|
||||||
|
currentUser: User = Depends(getCurrentUser),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""Set scope for all files in a group (recursive)."""
|
||||||
|
scope = body.get("scope")
|
||||||
|
if not scope:
|
||||||
|
raise HTTPException(status_code=400, detail="scope is required")
|
||||||
|
try:
|
||||||
|
import modules.interfaces.interfaceDbApp as _appIface
|
||||||
|
managementInterface = interfaceDbManagement.getInterface(
|
||||||
currentUser,
|
currentUser,
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
||||||
)
|
)
|
||||||
|
appInterface = _appIface.getInterface(currentUser)
|
||||||
result = {"movedFiles": 0, "movedFolders": 0}
|
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
|
||||||
|
updated = 0
|
||||||
if fileIds:
|
for fid in fileIds:
|
||||||
fileResult = mgmt.moveFilesBatch(fileIds, targetFolderId=targetFolderId)
|
try:
|
||||||
result["movedFiles"] += fileResult.get("movedFiles", 0)
|
managementInterface.updateFile(fid, {"scope": scope})
|
||||||
|
updated += 1
|
||||||
if folderIds:
|
except Exception as e:
|
||||||
folderResult = mgmt.moveFoldersBatch(folderIds, targetParentId=targetParentId)
|
logger.error(f"patch_group_scope: failed to update file {fid}: {e}")
|
||||||
result["movedFolders"] += folderResult.get("movedFolders", 0)
|
return {"groupId": groupId, "scope": scope, "filesUpdated": updated}
|
||||||
|
except HTTPException:
|
||||||
return result
|
raise
|
||||||
except ValueError as e:
|
|
||||||
raise HTTPException(status_code=400, detail=str(e))
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error in batch move: {e}")
|
logger.error(f"patch_group_scope error: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.patch("/groups/{groupId}/neutralize")
|
||||||
|
@limiter.limit("60/minute")
|
||||||
|
def patch_group_neutralize(
|
||||||
|
request: Request,
|
||||||
|
groupId: str = Path(..., description="Group ID"),
|
||||||
|
body: dict = Body(...),
|
||||||
|
currentUser: User = Depends(getCurrentUser),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""Toggle neutralize for all files in a group (recursive, incl. knowledge purge/reindex)."""
|
||||||
|
neutralize = body.get("neutralize")
|
||||||
|
if neutralize is None:
|
||||||
|
raise HTTPException(status_code=400, detail="neutralize is required")
|
||||||
|
try:
|
||||||
|
import modules.interfaces.interfaceDbApp as _appIface
|
||||||
|
managementInterface = interfaceDbManagement.getInterface(
|
||||||
|
currentUser,
|
||||||
|
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||||
|
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
||||||
|
)
|
||||||
|
appInterface = _appIface.getInterface(currentUser)
|
||||||
|
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
|
||||||
|
updated = 0
|
||||||
|
for fid in fileIds:
|
||||||
|
try:
|
||||||
|
managementInterface.updateFile(fid, {"neutralize": neutralize})
|
||||||
|
if not neutralize:
|
||||||
|
try:
|
||||||
|
from modules.interfaces import interfaceDbKnowledge
|
||||||
|
kIface = interfaceDbKnowledge.getInterface(currentUser)
|
||||||
|
kIface.purgeFileKnowledge(fid)
|
||||||
|
except Exception as ke:
|
||||||
|
logger.warning(f"patch_group_neutralize: knowledge purge failed for {fid}: {ke}")
|
||||||
|
updated += 1
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"patch_group_neutralize: failed for file {fid}: {e}")
|
||||||
|
return {"groupId": groupId, "neutralize": neutralize, "filesUpdated": updated}
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"patch_group_neutralize error: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/groups/{groupId}/download")
|
||||||
|
@limiter.limit("20/minute")
|
||||||
|
async def download_group_zip(
|
||||||
|
request: Request,
|
||||||
|
groupId: str = Path(..., description="Group ID"),
|
||||||
|
currentUser: User = Depends(getCurrentUser),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""Download all files in a group as a ZIP archive."""
|
||||||
|
import io, zipfile
|
||||||
|
try:
|
||||||
|
import modules.interfaces.interfaceDbApp as _appIface
|
||||||
|
managementInterface = interfaceDbManagement.getInterface(
|
||||||
|
currentUser,
|
||||||
|
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||||
|
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
||||||
|
)
|
||||||
|
appInterface = _appIface.getInterface(currentUser)
|
||||||
|
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
|
||||||
|
if not fileIds:
|
||||||
|
raise HTTPException(status_code=404, detail="Group not found or empty")
|
||||||
|
buf = io.BytesIO()
|
||||||
|
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
|
||||||
|
for fid in fileIds:
|
||||||
|
try:
|
||||||
|
fileMeta = managementInterface.getFile(fid)
|
||||||
|
fileData = managementInterface.getFileData(fid)
|
||||||
|
if fileMeta and fileData:
|
||||||
|
name = (fileMeta.get("fileName") if isinstance(fileMeta, dict) else getattr(fileMeta, "fileName", fid)) or fid
|
||||||
|
zf.writestr(name, fileData)
|
||||||
|
except Exception as fe:
|
||||||
|
logger.warning(f"download_group_zip: skipping file {fid}: {fe}")
|
||||||
|
buf.seek(0)
|
||||||
|
from fastapi.responses import StreamingResponse
|
||||||
|
return StreamingResponse(
|
||||||
|
buf,
|
||||||
|
media_type="application/zip",
|
||||||
|
headers={"Content-Disposition": f'attachment; filename="group-{groupId}.zip"'},
|
||||||
|
)
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"download_group_zip error: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/groups/{groupId}")
|
||||||
|
@limiter.limit("30/minute")
|
||||||
|
def delete_group(
|
||||||
|
request: Request,
|
||||||
|
groupId: str = Path(..., description="Group ID"),
|
||||||
|
deleteItems: bool = Query(False, description="If true, also delete all files in the group"),
|
||||||
|
currentUser: User = Depends(getCurrentUser),
|
||||||
|
context: RequestContext = Depends(getRequestContext),
|
||||||
|
):
|
||||||
|
"""Remove a group from the groupTree. Optionally delete all its files."""
|
||||||
|
try:
|
||||||
|
import modules.interfaces.interfaceDbApp as _appIface
|
||||||
|
appInterface = _appIface.getInterface(currentUser)
|
||||||
|
fileIds = _get_group_item_ids("files/list", groupId, appInterface)
|
||||||
|
# Remove group from tree
|
||||||
|
existing = appInterface.getTableGrouping("files/list")
|
||||||
|
if existing:
|
||||||
|
from modules.routes.routeHelpers import _removeGroupFromTree
|
||||||
|
newRoots = _removeGroupFromTree([n.model_dump() if hasattr(n, 'model_dump') else n for n in existing.rootGroups], groupId)
|
||||||
|
appInterface.upsertTableGrouping("files/list", newRoots)
|
||||||
|
# Optionally delete files
|
||||||
|
deletedFiles = 0
|
||||||
|
if deleteItems:
|
||||||
|
managementInterface = interfaceDbManagement.getInterface(
|
||||||
|
currentUser,
|
||||||
|
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||||
|
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
||||||
|
)
|
||||||
|
for fid in fileIds:
|
||||||
|
try:
|
||||||
|
managementInterface.deleteFile(fid)
|
||||||
|
deletedFiles += 1
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"delete_group: failed to delete file {fid}: {e}")
|
||||||
|
return {"groupId": groupId, "deletedFiles": deletedFiles}
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"delete_group error: {e}")
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1102,7 +909,7 @@ def update_file(
|
||||||
) -> FileItem:
|
) -> FileItem:
|
||||||
"""Update file info"""
|
"""Update file info"""
|
||||||
try:
|
try:
|
||||||
_EDITABLE_FIELDS = {"fileName", "scope", "tags", "description", "folderId", "neutralize"}
|
_EDITABLE_FIELDS = {"fileName", "scope", "tags", "description", "neutralize"}
|
||||||
safeData = {k: v for k, v in file_info.items() if k in _EDITABLE_FIELDS}
|
safeData = {k: v for k, v in file_info.items() if k in _EDITABLE_FIELDS}
|
||||||
if not safeData:
|
if not safeData:
|
||||||
raise HTTPException(status_code=400, detail=routeApiMsg("No editable fields provided"))
|
raise HTTPException(status_code=400, detail=routeApiMsg("No editable fields provided"))
|
||||||
|
|
@ -1257,37 +1064,3 @@ def preview_file(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.post("/{fileId}/move")
|
|
||||||
@limiter.limit("10/minute")
|
|
||||||
def move_file(
|
|
||||||
request: Request,
|
|
||||||
fileId: str = Path(...),
|
|
||||||
body: Dict[str, Any] = Body(...),
|
|
||||||
currentUser: User = Depends(getCurrentUser),
|
|
||||||
context: RequestContext = Depends(getRequestContext)
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""Move a file to a different folder."""
|
|
||||||
targetFolderId = body.get("targetFolderId")
|
|
||||||
try:
|
|
||||||
mgmt = interfaceDbManagement.getInterface(
|
|
||||||
currentUser,
|
|
||||||
mandateId=str(context.mandateId) if context.mandateId else None,
|
|
||||||
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
|
|
||||||
)
|
|
||||||
mgmt.updateFile(fileId, {"folderId": targetFolderId})
|
|
||||||
|
|
||||||
if targetFolderId:
|
|
||||||
try:
|
|
||||||
targetFolder = mgmt.getFolder(targetFolderId)
|
|
||||||
folderNeut = (targetFolder.get("neutralize") if isinstance(targetFolder, dict)
|
|
||||||
else getattr(targetFolder, "neutralize", False)) if targetFolder else False
|
|
||||||
if folderNeut:
|
|
||||||
mgmt.updateFile(fileId, {"neutralize": True})
|
|
||||||
logger.info("File %s moved to neutralized folder %s — inherited neutralize=True", fileId, targetFolderId)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("File move: folder neutralize inheritance check failed for %s: %s", fileId, e)
|
|
||||||
|
|
||||||
return {"success": True, "fileId": fileId, "folderId": targetFolderId}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error moving file: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
|
||||||
|
|
@ -750,6 +750,21 @@ def _collectAllIds(node, ids: set) -> None:
|
||||||
_collectAllIds(child, ids)
|
_collectAllIds(child, ids)
|
||||||
|
|
||||||
|
|
||||||
|
def _removeGroupFromTree(nodes: list, groupId: str) -> list:
|
||||||
|
"""Remove a group node (and all descendants) from the tree by id."""
|
||||||
|
result = []
|
||||||
|
for node in nodes:
|
||||||
|
nodeId = node.get("id") if isinstance(node, dict) else getattr(node, "id", None)
|
||||||
|
if nodeId == groupId:
|
||||||
|
continue # skip this node (remove it)
|
||||||
|
subGroups = node.get("subGroups", []) if isinstance(node, dict) else getattr(node, "subGroups", [])
|
||||||
|
filtered_sub = _removeGroupFromTree(subGroups, groupId)
|
||||||
|
if isinstance(node, dict):
|
||||||
|
node = {**node, "subGroups": filtered_sub}
|
||||||
|
result.append(node)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def handleGroupingInRequest(
|
def handleGroupingInRequest(
|
||||||
paginationParams: Optional[PaginationParams],
|
paginationParams: Optional[PaginationParams],
|
||||||
interface,
|
interface,
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# Copyright (c) 2025 Patrick Motsch
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
"""Shared helpers for core agent tools (file scope, binary detection, temp folder)."""
|
"""Shared helpers for core agent tools (file scope, binary detection, group helpers)."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import uuid
|
import uuid
|
||||||
|
|
@ -46,39 +46,60 @@ def _looksLikeBinary(data: bytes, sampleSize: int = 1024) -> bool:
|
||||||
return nonPrintable / len(sample) > 0.10
|
return nonPrintable / len(sample) > 0.10
|
||||||
|
|
||||||
|
|
||||||
def _getOrCreateInstanceFolder(chatService, featureInstanceId: str, mandateId: str = "") -> Optional[str]:
|
|
||||||
"""Return the folder ID for a feature instance, creating it on first use.
|
|
||||||
|
|
||||||
Delegates to interfaceDbManagement._ensureFeatureInstanceFolder.
|
|
||||||
AI tools call this when saving a file without an explicit folderId
|
|
||||||
so that instance-produced files land in a named folder automatically.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
dbMgmt = chatService.interfaceDbComponent
|
|
||||||
return dbMgmt._ensureFeatureInstanceFolder(featureInstanceId, mandateId)
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Could not get/create instance folder for {featureInstanceId}: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _getOrCreateTempFolder(chatService) -> Optional[str]:
|
def _getOrCreateTempFolder(chatService) -> Optional[str]:
|
||||||
"""Return the ID of the root-level 'Temp' folder, creating it if it doesn't exist."""
|
"""Deprecated stub: folder-based organisation has been replaced by grouping.
|
||||||
|
|
||||||
|
Returns None unconditionally so callers skip the (now removed) folderId
|
||||||
|
assignment. Remove callers incrementally and delete this stub afterwards.
|
||||||
|
"""
|
||||||
|
logger.debug("_getOrCreateTempFolder called – folder support removed, returning None")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def _getOrCreateInstanceGroup(
|
||||||
|
appInterface,
|
||||||
|
featureInstanceId: str,
|
||||||
|
contextKey: str = "files/list",
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""Return groupId of the default group for a feature instance; create if needed."""
|
||||||
try:
|
try:
|
||||||
allFolders = chatService.interfaceDbComponent.listFolders()
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
tempFolder = next(
|
nodes = [
|
||||||
(f for f in allFolders
|
n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n))
|
||||||
if f.get("name") == "Temp" and not f.get("parentId")),
|
for n in (existing.rootGroups if existing else [])
|
||||||
None,
|
]
|
||||||
)
|
|
||||||
if tempFolder:
|
def _find(nds):
|
||||||
return tempFolder.get("id")
|
for nd in nds:
|
||||||
newFolder = chatService.interfaceDbComponent.createFolder("Temp", parentId=None)
|
meta = nd.get("meta", {}) if isinstance(nd, dict) else getattr(nd, "meta", {})
|
||||||
return newFolder.get("id") if newFolder else None
|
if (meta or {}).get("featureInstanceId") == featureInstanceId:
|
||||||
|
return nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
|
||||||
|
found = _find(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", []))
|
||||||
|
if found:
|
||||||
|
return found
|
||||||
|
return None
|
||||||
|
|
||||||
|
found = _find(nodes)
|
||||||
|
if found:
|
||||||
|
return found
|
||||||
|
newId = str(uuid.uuid4())
|
||||||
|
nodes.append({"id": newId, "name": featureInstanceId, "itemIds": [], "subGroups": [], "meta": {"featureInstanceId": featureInstanceId}})
|
||||||
|
appInterface.upsertTableGrouping(contextKey, nodes)
|
||||||
|
return newId
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Could not get/create Temp folder: {e}")
|
logger.error(f"_getOrCreateInstanceGroup: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def _getOrCreateTempGroup(
|
||||||
|
appInterface,
|
||||||
|
sessionId: str,
|
||||||
|
contextKey: str = "files/list",
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""Return groupId of a temporary group for a session; create if needed."""
|
||||||
|
return await _getOrCreateInstanceGroup(appInterface, f"_temp_{sessionId}", contextKey)
|
||||||
|
|
||||||
|
|
||||||
def _attachFileAsChatDocument(
|
def _attachFileAsChatDocument(
|
||||||
services: Any,
|
services: Any,
|
||||||
fileItem: Any,
|
fileItem: Any,
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistr
|
||||||
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
|
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
|
||||||
_attachFileAsChatDocument,
|
_attachFileAsChatDocument,
|
||||||
_formatToolFileResult,
|
_formatToolFileResult,
|
||||||
_getOrCreateInstanceFolder,
|
_getOrCreateInstanceGroup,
|
||||||
_getOrCreateTempFolder,
|
_getOrCreateTempGroup,
|
||||||
_looksLikeBinary,
|
_looksLikeBinary,
|
||||||
_MAX_TOOL_RESULT_CHARS,
|
_MAX_TOOL_RESULT_CHARS,
|
||||||
)
|
)
|
||||||
|
|
@ -169,7 +169,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
try:
|
try:
|
||||||
chatService = services.chat
|
chatService = services.chat
|
||||||
files = chatService.listFiles(
|
files = chatService.listFiles(
|
||||||
folderId=args.get("folderId"),
|
|
||||||
tags=args.get("tags"),
|
tags=args.get("tags"),
|
||||||
search=args.get("search"),
|
search=args.get("search"),
|
||||||
)
|
)
|
||||||
|
|
@ -222,18 +221,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error=str(e))
|
return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error=str(e))
|
||||||
|
|
||||||
async def _listFolders(args: Dict[str, Any], context: Dict[str, Any]):
|
|
||||||
try:
|
|
||||||
chatService = services.chat
|
|
||||||
folders = chatService.listFolders(parentId=args.get("parentId"))
|
|
||||||
folderList = "\n".join(
|
|
||||||
f"- {f.get('name', 'unnamed')} (id: {f.get('id', '?')})"
|
|
||||||
for f in folders
|
|
||||||
) if folders else "No folders found."
|
|
||||||
return ToolResult(toolCallId="", toolName="listFolders", success=True, data=folderList)
|
|
||||||
except Exception as e:
|
|
||||||
return ToolResult(toolCallId="", toolName="listFolders", success=False, error=str(e))
|
|
||||||
|
|
||||||
async def _webSearch(args: Dict[str, Any], context: Dict[str, Any]):
|
async def _webSearch(args: Dict[str, Any], context: Dict[str, Any]):
|
||||||
query = args.get("query", "")
|
query = args.get("query", "")
|
||||||
if not query:
|
if not query:
|
||||||
|
|
@ -271,35 +258,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return ToolResult(toolCallId="", toolName="tagFile", success=False, error=str(e))
|
return ToolResult(toolCallId="", toolName="tagFile", success=False, error=str(e))
|
||||||
|
|
||||||
async def _moveFile(args: Dict[str, Any], context: Dict[str, Any]):
|
|
||||||
fileId = args.get("fileId", "")
|
|
||||||
targetFolderId = args.get("targetFolderId")
|
|
||||||
if not fileId:
|
|
||||||
return ToolResult(toolCallId="", toolName="moveFile", success=False, error="fileId is required")
|
|
||||||
try:
|
|
||||||
chatService = services.chat
|
|
||||||
chatService.interfaceDbComponent.updateFile(fileId, {"folderId": targetFolderId})
|
|
||||||
return ToolResult(
|
|
||||||
toolCallId="", toolName="moveFile", success=True,
|
|
||||||
data=f"File {fileId} moved to folder {targetFolderId or 'root'}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return ToolResult(toolCallId="", toolName="moveFile", success=False, error=str(e))
|
|
||||||
|
|
||||||
async def _createFolder(args: Dict[str, Any], context: Dict[str, Any]):
|
|
||||||
name = args.get("name", "")
|
|
||||||
if not name:
|
|
||||||
return ToolResult(toolCallId="", toolName="createFolder", success=False, error="name is required")
|
|
||||||
try:
|
|
||||||
chatService = services.chat
|
|
||||||
folder = chatService.createFolder(name=name, parentId=args.get("parentId"))
|
|
||||||
return ToolResult(
|
|
||||||
toolCallId="", toolName="createFolder", success=True,
|
|
||||||
data=f"Folder '{name}' created (id: {folder.get('id', '?')})"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return ToolResult(toolCallId="", toolName="createFolder", success=False, error=str(e))
|
|
||||||
|
|
||||||
async def _writeFile(args: Dict[str, Any], context: Dict[str, Any]):
|
async def _writeFile(args: Dict[str, Any], context: Dict[str, Any]):
|
||||||
content = args.get("content", "")
|
content = args.get("content", "")
|
||||||
mode = args.get("mode", "create")
|
mode = args.get("mode", "create")
|
||||||
|
|
@ -354,12 +312,52 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
|
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
|
||||||
if fiId:
|
if fiId:
|
||||||
dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId})
|
dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId})
|
||||||
if args.get("folderId"):
|
if args.get("groupId"):
|
||||||
dbMgmt.updateFile(fileItem.id, {"folderId": args["folderId"]})
|
try:
|
||||||
|
appIface = chatService.interfaceDbApp
|
||||||
|
existing = appIface.getTableGrouping("files/list")
|
||||||
|
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
|
||||||
|
def _addToGroup(nds, gid, fid):
|
||||||
|
for nd in nds:
|
||||||
|
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
|
||||||
|
if nid == gid:
|
||||||
|
ids = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
|
||||||
|
if fid not in ids:
|
||||||
|
ids.append(fid)
|
||||||
|
if isinstance(nd, dict):
|
||||||
|
nd["itemIds"] = ids
|
||||||
|
return True
|
||||||
|
if _addToGroup(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", []), gid, fid):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
_addToGroup(nodes, args["groupId"], fileItem.id)
|
||||||
|
appIface.upsertTableGrouping("files/list", nodes)
|
||||||
|
except Exception as _ge:
|
||||||
|
logger.warning(f"writeFile: failed to add file to group {args['groupId']}: {_ge}")
|
||||||
elif fiId:
|
elif fiId:
|
||||||
instanceFolderId = _getOrCreateInstanceFolder(chatService, fiId, context.get("mandateId", ""))
|
try:
|
||||||
if instanceFolderId:
|
appIface = chatService.interfaceDbApp
|
||||||
dbMgmt.updateFile(fileItem.id, {"folderId": instanceFolderId})
|
instanceGroupId = await _getOrCreateInstanceGroup(appIface, fiId)
|
||||||
|
if instanceGroupId:
|
||||||
|
existing = appIface.getTableGrouping("files/list")
|
||||||
|
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
|
||||||
|
def _addToGroup2(nds, gid, fid):
|
||||||
|
for nd in nds:
|
||||||
|
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
|
||||||
|
if nid == gid:
|
||||||
|
ids = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
|
||||||
|
if fid not in ids:
|
||||||
|
ids.append(fid)
|
||||||
|
if isinstance(nd, dict):
|
||||||
|
nd["itemIds"] = ids
|
||||||
|
return True
|
||||||
|
if _addToGroup2(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", []), gid, fid):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
_addToGroup2(nodes, instanceGroupId, fileItem.id)
|
||||||
|
appIface.upsertTableGrouping("files/list", nodes)
|
||||||
|
except Exception as _ge:
|
||||||
|
logger.warning(f"writeFile: failed to add file to instance group for {fiId}: {_ge}")
|
||||||
if args.get("tags"):
|
if args.get("tags"):
|
||||||
dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]})
|
dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]})
|
||||||
|
|
||||||
|
|
@ -412,13 +410,13 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
registry.register(
|
registry.register(
|
||||||
"listFiles", _listFiles,
|
"listFiles", _listFiles,
|
||||||
description=(
|
description=(
|
||||||
"List files in the local workspace. Filter by folder, tags, or search term. "
|
"List files in the local workspace. Filter by tags or search term. "
|
||||||
|
"To filter by group, use listItemsInGroup. "
|
||||||
"For external data sources, use browseDataSource instead."
|
"For external data sources, use browseDataSource instead."
|
||||||
),
|
),
|
||||||
parameters={
|
parameters={
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"folderId": {"type": "string", "description": "Filter by folder ID"},
|
|
||||||
"tags": {"type": "array", "items": {"type": "string"}, "description": "Filter by tags (any match)"},
|
"tags": {"type": "array", "items": {"type": "string"}, "description": "Filter by tags (any match)"},
|
||||||
"search": {"type": "string", "description": "Search in file names and descriptions"},
|
"search": {"type": "string", "description": "Search in file names and descriptions"},
|
||||||
}
|
}
|
||||||
|
|
@ -445,18 +443,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
readOnly=True
|
readOnly=True
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(
|
|
||||||
"listFolders", _listFolders,
|
|
||||||
description="List folders in the local workspace. For external data sources, use browseDataSource instead.",
|
|
||||||
parameters={
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
readOnly=True
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.register(
|
registry.register(
|
||||||
"webSearch", _webSearch,
|
"webSearch", _webSearch,
|
||||||
description="Search the web for general information. Use readUrl to fetch content from a known URL instead.",
|
description="Search the web for general information. Use readUrl to fetch content from a known URL instead.",
|
||||||
|
|
@ -482,34 +468,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
readOnly=False
|
readOnly=False
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(
|
|
||||||
"moveFile", _moveFile,
|
|
||||||
description="Move a file to a different folder in the local workspace.",
|
|
||||||
parameters={
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"fileId": {"type": "string", "description": "The file ID to move"},
|
|
||||||
"targetFolderId": {"type": "string", "description": "Target folder ID (null for root)"},
|
|
||||||
},
|
|
||||||
"required": ["fileId"]
|
|
||||||
},
|
|
||||||
readOnly=False
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.register(
|
|
||||||
"createFolder", _createFolder,
|
|
||||||
description="Create a new folder in the local workspace.",
|
|
||||||
parameters={
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {"type": "string", "description": "Folder name"},
|
|
||||||
"parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
|
|
||||||
},
|
|
||||||
"required": ["name"]
|
|
||||||
},
|
|
||||||
readOnly=False
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.register(
|
registry.register(
|
||||||
"writeFile", _writeFile,
|
"writeFile", _writeFile,
|
||||||
description=(
|
description=(
|
||||||
|
|
@ -530,7 +488,7 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
"content": {"type": "string", "description": "Content to write/append"},
|
"content": {"type": "string", "description": "Content to write/append"},
|
||||||
"mode": {"type": "string", "enum": ["create", "append", "overwrite"], "description": "Write mode (default: create)"},
|
"mode": {"type": "string", "enum": ["create", "append", "overwrite"], "description": "Write mode (default: create)"},
|
||||||
"fileId": {"type": "string", "description": "File ID (required for mode=append/overwrite)"},
|
"fileId": {"type": "string", "description": "File ID (required for mode=append/overwrite)"},
|
||||||
"folderId": {"type": "string", "description": "Target folder ID (mode=create only)"},
|
"groupId": {"type": "string", "description": "Group ID to place the file in (mode=create only). Omit to use the instance default group."},
|
||||||
"tags": {"type": "array", "items": {"type": "string"}, "description": "Tags (mode=create only)"},
|
"tags": {"type": "array", "items": {"type": "string"}, "description": "Tags (mode=create only)"},
|
||||||
},
|
},
|
||||||
"required": ["content"]
|
"required": ["content"]
|
||||||
|
|
@ -690,55 +648,7 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
readOnly=True
|
readOnly=True
|
||||||
)
|
)
|
||||||
|
|
||||||
# ---- Phase 2: deleteFolder, renameFolder, moveFolder, copyFile, editFile ----
|
# ---- Phase 2: copyFile, editFile ----
|
||||||
|
|
||||||
async def _deleteFolder(args: Dict[str, Any], context: Dict[str, Any]):
|
|
||||||
folderId = args.get("folderId", "")
|
|
||||||
recursive = args.get("recursive", False)
|
|
||||||
if not folderId:
|
|
||||||
return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error="folderId is required")
|
|
||||||
try:
|
|
||||||
chatService = services.chat
|
|
||||||
result = chatService.interfaceDbComponent.deleteFolder(folderId, recursive=recursive)
|
|
||||||
summary = f"Deleted {result.get('deletedFolders', 1)} folder(s) and {result.get('deletedFiles', 0)} file(s)"
|
|
||||||
return ToolResult(
|
|
||||||
toolCallId="", toolName="deleteFolder", success=True, data=summary,
|
|
||||||
sideEvents=[{"type": "folderDeleted", "data": {"folderId": folderId, **result}}],
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error=str(e))
|
|
||||||
|
|
||||||
async def _renameFolder(args: Dict[str, Any], context: Dict[str, Any]):
|
|
||||||
folderId = args.get("folderId", "")
|
|
||||||
newName = args.get("newName", "")
|
|
||||||
if not folderId or not newName:
|
|
||||||
return ToolResult(toolCallId="", toolName="renameFolder", success=False, error="folderId and newName are required")
|
|
||||||
try:
|
|
||||||
chatService = services.chat
|
|
||||||
chatService.interfaceDbComponent.renameFolder(folderId, newName)
|
|
||||||
return ToolResult(
|
|
||||||
toolCallId="", toolName="renameFolder", success=True,
|
|
||||||
data=f"Folder {folderId} renamed to '{newName}'",
|
|
||||||
sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "name": newName}}],
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return ToolResult(toolCallId="", toolName="renameFolder", success=False, error=str(e))
|
|
||||||
|
|
||||||
async def _moveFolder(args: Dict[str, Any], context: Dict[str, Any]):
|
|
||||||
folderId = args.get("folderId", "")
|
|
||||||
targetParentId = args.get("targetParentId")
|
|
||||||
if not folderId:
|
|
||||||
return ToolResult(toolCallId="", toolName="moveFolder", success=False, error="folderId is required")
|
|
||||||
try:
|
|
||||||
chatService = services.chat
|
|
||||||
chatService.interfaceDbComponent.moveFolder(folderId, targetParentId)
|
|
||||||
return ToolResult(
|
|
||||||
toolCallId="", toolName="moveFolder", success=True,
|
|
||||||
data=f"Folder {folderId} moved to {targetParentId or 'root'}",
|
|
||||||
sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "parentId": targetParentId}}],
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return ToolResult(toolCallId="", toolName="moveFolder", success=False, error=str(e))
|
|
||||||
|
|
||||||
async def _copyFile(args: Dict[str, Any], context: Dict[str, Any]):
|
async def _copyFile(args: Dict[str, Any], context: Dict[str, Any]):
|
||||||
fileId = args.get("fileId", "")
|
fileId = args.get("fileId", "")
|
||||||
|
|
@ -748,7 +658,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
chatService = services.chat
|
chatService = services.chat
|
||||||
copiedFile = chatService.interfaceDbComponent.copyFile(
|
copiedFile = chatService.interfaceDbComponent.copyFile(
|
||||||
fileId,
|
fileId,
|
||||||
targetFolderId=args.get("targetFolderId"),
|
|
||||||
newFileName=args.get("newFileName"),
|
newFileName=args.get("newFileName"),
|
||||||
)
|
)
|
||||||
return ToolResult(
|
return ToolResult(
|
||||||
|
|
@ -823,48 +732,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=str(e))
|
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=str(e))
|
||||||
|
|
||||||
registry.register(
|
|
||||||
"deleteFolder", _deleteFolder,
|
|
||||||
description="Delete a folder from the local workspace. Set recursive=true to delete all contents.",
|
|
||||||
parameters={
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"folderId": {"type": "string", "description": "The folder ID to delete"},
|
|
||||||
"recursive": {"type": "boolean", "description": "If true, delete folder and all contents (files and subfolders). Default: false"},
|
|
||||||
},
|
|
||||||
"required": ["folderId"]
|
|
||||||
},
|
|
||||||
readOnly=False
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.register(
|
|
||||||
"renameFolder", _renameFolder,
|
|
||||||
description="Rename a folder in the local workspace.",
|
|
||||||
parameters={
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"folderId": {"type": "string", "description": "The folder ID to rename"},
|
|
||||||
"newName": {"type": "string", "description": "New folder name"},
|
|
||||||
},
|
|
||||||
"required": ["folderId", "newName"]
|
|
||||||
},
|
|
||||||
readOnly=False
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.register(
|
|
||||||
"moveFolder", _moveFolder,
|
|
||||||
description="Move a folder to a different parent in the local workspace.",
|
|
||||||
parameters={
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"folderId": {"type": "string", "description": "The folder ID to move"},
|
|
||||||
"targetParentId": {"type": "string", "description": "Target parent folder ID (null/omit for root)"},
|
|
||||||
},
|
|
||||||
"required": ["folderId"]
|
|
||||||
},
|
|
||||||
readOnly=False
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.register(
|
registry.register(
|
||||||
"copyFile", _copyFile,
|
"copyFile", _copyFile,
|
||||||
description="Create an independent copy of a file in the local workspace.",
|
description="Create an independent copy of a file in the local workspace.",
|
||||||
|
|
@ -872,7 +739,6 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"fileId": {"type": "string", "description": "The file ID to copy"},
|
"fileId": {"type": "string", "description": "The file ID to copy"},
|
||||||
"targetFolderId": {"type": "string", "description": "Target folder for the copy (default: same folder)"},
|
|
||||||
"newFileName": {"type": "string", "description": "New file name (default: same name, auto-numbered if duplicate)"},
|
"newFileName": {"type": "string", "description": "New file name (default: same name, auto-numbered if duplicate)"},
|
||||||
},
|
},
|
||||||
"required": ["fileId"]
|
"required": ["fileId"]
|
||||||
|
|
@ -880,6 +746,137 @@ def _registerWorkspaceTools(registry: ToolRegistry, services):
|
||||||
readOnly=False
|
readOnly=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# ---- Group tools (replaces folder-based tools) ----
|
||||||
|
|
||||||
|
async def _listGroups(args: Dict[str, Any], context: Dict[str, Any]):
|
||||||
|
contextKey = args.get("contextKey", "files/list")
|
||||||
|
try:
|
||||||
|
chatService = services.chat
|
||||||
|
appInterface = chatService.interfaceDbApp
|
||||||
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
|
if not existing:
|
||||||
|
return ToolResult(toolCallId="", toolName="listGroups", success=True, data="No groups found.")
|
||||||
|
|
||||||
|
def _flatten(nodes, depth=0):
|
||||||
|
result = []
|
||||||
|
for n in nodes:
|
||||||
|
nd = n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n))
|
||||||
|
result.append({"id": nd.get("id"), "name": nd.get("name"), "depth": depth, "itemCount": len(nd.get("itemIds", []))})
|
||||||
|
result.extend(_flatten(nd.get("subGroups", []), depth + 1))
|
||||||
|
return result
|
||||||
|
|
||||||
|
groups = _flatten(existing.rootGroups)
|
||||||
|
lines = "\n".join(
|
||||||
|
f"{' ' * g['depth']}- {g['name']} (id: {g['id']}, items: {g['itemCount']})"
|
||||||
|
for g in groups
|
||||||
|
) if groups else "No groups found."
|
||||||
|
return ToolResult(toolCallId="", toolName="listGroups", success=True, data=lines)
|
||||||
|
except Exception as e:
|
||||||
|
return ToolResult(toolCallId="", toolName="listGroups", success=False, error=str(e))
|
||||||
|
|
||||||
|
async def _listItemsInGroup(args: Dict[str, Any], context: Dict[str, Any]):
|
||||||
|
groupId = args.get("groupId", "")
|
||||||
|
contextKey = args.get("contextKey", "files/list")
|
||||||
|
if not groupId:
|
||||||
|
return ToolResult(toolCallId="", toolName="listItemsInGroup", success=False, error="groupId is required")
|
||||||
|
try:
|
||||||
|
from modules.routes.routeHelpers import _collectItemIds
|
||||||
|
chatService = services.chat
|
||||||
|
appInterface = chatService.interfaceDbApp
|
||||||
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
|
if not existing:
|
||||||
|
return ToolResult(toolCallId="", toolName="listItemsInGroup", success=True, data="No groups found.")
|
||||||
|
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in existing.rootGroups]
|
||||||
|
ids = _collectItemIds(nodes, groupId)
|
||||||
|
itemList = list(ids) if ids else []
|
||||||
|
return ToolResult(
|
||||||
|
toolCallId="", toolName="listItemsInGroup", success=True,
|
||||||
|
data="\n".join(f"- {fid}" for fid in itemList) if itemList else "No items in group.",
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return ToolResult(toolCallId="", toolName="listItemsInGroup", success=False, error=str(e))
|
||||||
|
|
||||||
|
async def _addItemsToGroup(args: Dict[str, Any], context: Dict[str, Any]):
|
||||||
|
groupId = args.get("groupId", "")
|
||||||
|
itemIds = args.get("itemIds", [])
|
||||||
|
contextKey = args.get("contextKey", "files/list")
|
||||||
|
if not groupId:
|
||||||
|
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error="groupId is required")
|
||||||
|
if not itemIds:
|
||||||
|
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error="itemIds is required")
|
||||||
|
try:
|
||||||
|
chatService = services.chat
|
||||||
|
appInterface = chatService.interfaceDbApp
|
||||||
|
existing = appInterface.getTableGrouping(contextKey)
|
||||||
|
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in (existing.rootGroups if existing else [])]
|
||||||
|
|
||||||
|
def _add(nds):
|
||||||
|
for nd in nds:
|
||||||
|
nid = nd.get("id") if isinstance(nd, dict) else getattr(nd, "id", None)
|
||||||
|
if nid == groupId:
|
||||||
|
existing_ids = list(nd.get("itemIds", []) if isinstance(nd, dict) else getattr(nd, "itemIds", []))
|
||||||
|
for fid in itemIds:
|
||||||
|
if fid not in existing_ids:
|
||||||
|
existing_ids.append(fid)
|
||||||
|
if isinstance(nd, dict):
|
||||||
|
nd["itemIds"] = existing_ids
|
||||||
|
return True
|
||||||
|
if _add(nd.get("subGroups", []) if isinstance(nd, dict) else getattr(nd, "subGroups", [])):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
found = _add(nodes)
|
||||||
|
if not found:
|
||||||
|
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error=f"Group {groupId} not found")
|
||||||
|
appInterface.upsertTableGrouping(contextKey, nodes)
|
||||||
|
return ToolResult(
|
||||||
|
toolCallId="", toolName="addItemsToGroup", success=True,
|
||||||
|
data=f"Added {len(itemIds)} item(s) to group {groupId}",
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return ToolResult(toolCallId="", toolName="addItemsToGroup", success=False, error=str(e))
|
||||||
|
|
||||||
|
registry.register(
|
||||||
|
"listGroups", _listGroups,
|
||||||
|
description="List all groups in the file grouping tree. Groups replace folders for organising files.",
|
||||||
|
parameters={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"contextKey": {"type": "string", "description": "Grouping context key (default: 'files/list')"},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
readOnly=True
|
||||||
|
)
|
||||||
|
|
||||||
|
registry.register(
|
||||||
|
"listItemsInGroup", _listItemsInGroup,
|
||||||
|
description="List all file IDs assigned to a specific group (includes sub-groups recursively).",
|
||||||
|
parameters={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"groupId": {"type": "string", "description": "The group ID to inspect"},
|
||||||
|
"contextKey": {"type": "string", "description": "Grouping context key (default: 'files/list')"},
|
||||||
|
},
|
||||||
|
"required": ["groupId"]
|
||||||
|
},
|
||||||
|
readOnly=True
|
||||||
|
)
|
||||||
|
|
||||||
|
registry.register(
|
||||||
|
"addItemsToGroup", _addItemsToGroup,
|
||||||
|
description="Add one or more file IDs to an existing group.",
|
||||||
|
parameters={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"groupId": {"type": "string", "description": "The group ID to add files to"},
|
||||||
|
"itemIds": {"type": "array", "items": {"type": "string"}, "description": "List of file IDs to add"},
|
||||||
|
"contextKey": {"type": "string", "description": "Grouping context key (default: 'files/list')"},
|
||||||
|
},
|
||||||
|
"required": ["groupId", "itemIds"]
|
||||||
|
},
|
||||||
|
readOnly=False
|
||||||
|
)
|
||||||
|
|
||||||
registry.register(
|
registry.register(
|
||||||
"replaceInFile", _replaceInFile,
|
"replaceInFile", _replaceInFile,
|
||||||
description=(
|
description=(
|
||||||
|
|
|
||||||
|
|
@ -268,24 +268,19 @@ class AgentService:
|
||||||
info = chatService.getFileInfo(fid)
|
info = chatService.getFileInfo(fid)
|
||||||
|
|
||||||
if not info:
|
if not info:
|
||||||
folderInfo = chatService.interfaceDbComponent.getFolder(fid)
|
# Check if fid is a group ID
|
||||||
if folderInfo:
|
try:
|
||||||
folderName = folderInfo.get("name", fid)
|
groupFileIds = chatService.listFilesInGroup(fid)
|
||||||
folderFiles = chatService.listFiles(folderId=fid)
|
if groupFileIds:
|
||||||
desc = f"### Folder: {folderName}\n - id: {fid}\n - type: folder\n - contains: {len(folderFiles)} file(s)"
|
allGroups = chatService.listGroups()
|
||||||
if folderFiles:
|
groupInfo = next((g for g in allGroups if g.get("id") == fid), None)
|
||||||
desc += "\n - files:"
|
groupName = groupInfo.get("name", fid) if groupInfo else fid
|
||||||
for ff in folderFiles[:30]:
|
desc = f"### Group: {groupName}\n - id: {fid}\n - type: group\n - contains: {len(groupFileIds)} file(s)"
|
||||||
ffName = ff.get("fileName", "?")
|
desc += f'\nUse `listItemsInGroup(groupId="{fid}")` to get file IDs, then `readFile(fileId)` to read each.'
|
||||||
ffId = ff.get("id", "?")
|
fileDescriptions.append(desc)
|
||||||
ffMime = ff.get("mimeType", "?")
|
continue
|
||||||
ffSize = ff.get("fileSize", ff.get("size", "?"))
|
except Exception:
|
||||||
desc += f"\n * {ffName} (id: {ffId}, type: {ffMime}, size: {ffSize} bytes)"
|
pass
|
||||||
if len(folderFiles) > 30:
|
|
||||||
desc += f"\n ... and {len(folderFiles) - 30} more files"
|
|
||||||
desc += f'\nUse `listFiles(folderId="{fid}")` to get the full file list, then `readFile(fileId)` to read individual files.'
|
|
||||||
fileDescriptions.append(desc)
|
|
||||||
continue
|
|
||||||
fileDescriptions.append(f"### File id: {fid}")
|
fileDescriptions.append(f"### File id: {fid}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
@ -333,7 +328,7 @@ class AgentService:
|
||||||
"These files/folders have been uploaded and processed through the extraction pipeline.\n"
|
"These files/folders have been uploaded and processed through the extraction pipeline.\n"
|
||||||
"Use `readFile(fileId)` to read text content, `readContentObjects(fileId)` for structured access, "
|
"Use `readFile(fileId)` to read text content, `readContentObjects(fileId)` for structured access, "
|
||||||
"or `describeImage(fileId)` for image analysis.\n"
|
"or `describeImage(fileId)` for image analysis.\n"
|
||||||
"For folders, use `listFiles(folderId)` to get the files inside, then `readFile(fileId)` for each.\n"
|
"For groups, use `listItemsInGroup(groupId)` to get the file IDs inside, then `readFile(fileId)` for each.\n"
|
||||||
"For large PDFs/DOCX, avoid huge `renderDocument` tool JSON: build markdown with "
|
"For large PDFs/DOCX, avoid huge `renderDocument` tool JSON: build markdown with "
|
||||||
"`writeFile` (create + append), then `renderDocument(sourceFileId=that file id, outputFormat=...)`.\n"
|
"`writeFile` (create + append), then `renderDocument(sourceFileId=that file id, outputFormat=...)`.\n"
|
||||||
"For small docs you may pass `content` inline. Embed images with `` in markdown.\n\n"
|
"For small docs you may pass `content` inline. Embed images with `` in markdown.\n\n"
|
||||||
|
|
|
||||||
|
|
@ -419,7 +419,7 @@ class ChatService:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def getFileInfo(self, fileId: str) -> Dict[str, Any]:
|
def getFileInfo(self, fileId: str) -> Dict[str, Any]:
|
||||||
"""Get file information including new fields (tags, folderId, description, status)."""
|
"""Get file information including new fields (tags, description, status)."""
|
||||||
fileItem = self.interfaceDbComponent.getFile(fileId)
|
fileItem = self.interfaceDbComponent.getFile(fileId)
|
||||||
if fileItem:
|
if fileItem:
|
||||||
return {
|
return {
|
||||||
|
|
@ -430,7 +430,6 @@ class ChatService:
|
||||||
"fileHash": fileItem.fileHash,
|
"fileHash": fileItem.fileHash,
|
||||||
"creationDate": fileItem.sysCreatedAt,
|
"creationDate": fileItem.sysCreatedAt,
|
||||||
"tags": getattr(fileItem, "tags", None),
|
"tags": getattr(fileItem, "tags", None),
|
||||||
"folderId": getattr(fileItem, "folderId", None),
|
|
||||||
"description": getattr(fileItem, "description", None),
|
"description": getattr(fileItem, "description", None),
|
||||||
"status": getattr(fileItem, "status", None),
|
"status": getattr(fileItem, "status", None),
|
||||||
}
|
}
|
||||||
|
|
@ -449,14 +448,12 @@ class ChatService:
|
||||||
|
|
||||||
def listFiles(
|
def listFiles(
|
||||||
self,
|
self,
|
||||||
folderId: str = None,
|
|
||||||
tags: List[str] = None,
|
tags: List[str] = None,
|
||||||
search: str = None,
|
search: str = None,
|
||||||
) -> List[Dict[str, Any]]:
|
) -> List[Dict[str, Any]]:
|
||||||
"""List files for the current user with optional filters.
|
"""List files for the current user with optional filters.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
folderId: Filter by folder (None = root / all).
|
|
||||||
tags: Filter by tags (any match).
|
tags: Filter by tags (any match).
|
||||||
search: Search in fileName and description.
|
search: Search in fileName and description.
|
||||||
|
|
||||||
|
|
@ -469,10 +466,6 @@ class ChatService:
|
||||||
allFiles = self.interfaceDbComponent.getAllFiles()
|
allFiles = self.interfaceDbComponent.getAllFiles()
|
||||||
results = []
|
results = []
|
||||||
for fileItem in allFiles:
|
for fileItem in allFiles:
|
||||||
if folderId is not None:
|
|
||||||
if fileItem.get("folderId") != folderId:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if tags:
|
if tags:
|
||||||
itemTags = fileItem.get("tags") or []
|
itemTags = fileItem.get("tags") or []
|
||||||
if not any(t in itemTags for t in tags):
|
if not any(t in itemTags for t in tags):
|
||||||
|
|
@ -492,27 +485,40 @@ class ChatService:
|
||||||
"fileSize": fileItem.get("fileSize"),
|
"fileSize": fileItem.get("fileSize"),
|
||||||
"creationDate": fileItem.get("sysCreatedAt"),
|
"creationDate": fileItem.get("sysCreatedAt"),
|
||||||
"tags": fileItem.get("tags"),
|
"tags": fileItem.get("tags"),
|
||||||
"folderId": fileItem.get("folderId"),
|
|
||||||
"description": fileItem.get("description"),
|
"description": fileItem.get("description"),
|
||||||
"status": fileItem.get("status"),
|
"status": fileItem.get("status"),
|
||||||
})
|
})
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def listFolders(self, parentId: str = None) -> List[Dict[str, Any]]:
|
def listGroups(self, contextKey: str = "files/list") -> list:
|
||||||
"""List file folders for the current user.
|
"""List all groups in the groupTree for the current context."""
|
||||||
|
try:
|
||||||
|
existing = self.interfaceDbApp.getTableGrouping(contextKey)
|
||||||
|
if not existing:
|
||||||
|
return []
|
||||||
|
def _flatten(nodes, depth=0):
|
||||||
|
result = []
|
||||||
|
for n in nodes:
|
||||||
|
nd = n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n))
|
||||||
|
result.append({"id": nd.get("id"), "name": nd.get("name"), "depth": depth, "itemCount": len(nd.get("itemIds", []))})
|
||||||
|
result.extend(_flatten(nd.get("subGroups", []), depth + 1))
|
||||||
|
return result
|
||||||
|
return _flatten(existing.rootGroups)
|
||||||
|
except Exception as e:
|
||||||
|
return []
|
||||||
|
|
||||||
Args:
|
def listFilesInGroup(self, groupId: str, contextKey: str = "files/list") -> list:
|
||||||
parentId: Optional parent folder ID to filter by.
|
"""List file IDs in a specific group (recursive)."""
|
||||||
None = return ALL folders (for tree building).
|
try:
|
||||||
|
from modules.routes.routeHelpers import _collectItemIds
|
||||||
Returns:
|
existing = self.interfaceDbApp.getTableGrouping(contextKey)
|
||||||
List of folder dicts.
|
if not existing:
|
||||||
"""
|
return []
|
||||||
return self.interfaceDbComponent.listFolders(parentId=parentId)
|
nodes = [n.model_dump() if hasattr(n, "model_dump") else (n if isinstance(n, dict) else vars(n)) for n in existing.rootGroups]
|
||||||
|
ids = _collectItemIds(nodes, groupId)
|
||||||
def createFolder(self, name: str, parentId: str = None) -> Dict[str, Any]:
|
return list(ids) if ids else []
|
||||||
"""Create a new file folder with unique name validation."""
|
except Exception:
|
||||||
return self.interfaceDbComponent.createFolder(name=name, parentId=parentId)
|
return []
|
||||||
|
|
||||||
# ---- DataSource CRUD ----
|
# ---- DataSource CRUD ----
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue