# Copyright (c) 2025 Patrick Motsch # All rights reserved. """ RBAC helper functions for interfaces. Provides RBAC filtering for database queries without connectors importing security. Multi-Tenant Design: - mandateId kommt aus Request-Context (X-Mandate-Id Header) - GROUP-Filter verwendet expliziten mandateId Parameter Data Namespace Structure: - data.uam.{Table} → User Access Management (mandantenübergreifend) - data.chat.{Table} → Chat/AI-Daten (benutzer-eigen, kein Mandantenkontext) - data.files.{Table} → Dateien (benutzer-eigen) - data.automation.{Table} → Automation (benutzer-eigen) - data.feature.{code}.{Table} → Mandanten-/Feature-spezifische Daten (dynamisch) GROUP-Berechtigung: - data.uam.*: GROUP filtert nach Mandant (via UserMandate) - data.chat.*, data.automation.*: GROUP = MY (benutzer-eigen); bei gesetztem featureInstanceId zusätzlich sysCreatedBy - data.files.*: GROUP = eigene Files + scope-basierte Sichtbarkeit (global, mandate, featureInstance) - data.feature.*: GROUP filtert nach mandateId/featureInstanceId """ import logging import json import math import re from typing import List, Dict, Any, Optional, Type, Union from pydantic import BaseModel from modules.datamodels.datamodelRbac import AccessRuleContext from modules.datamodels.datamodelUam import User, UserPermissions, AccessLevel from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResult from modules.security.rbac import RbacClass from modules.security.rootAccess import getRootDbAppConnector logger = logging.getLogger(__name__) _ISO_DATE_RE = re.compile(r"^\d{4}-\d{2}-\d{2}$") def _rbacAppendPaginationDictFilter( key: str, val: Dict[str, Any], colType: str, whereConditions: List[str], whereValues: List[Any], ) -> None: """Append SQL for one pagination ``filters`` dict entry (operator + value). Mirrors ``connectorDbPostgre._buildPaginationClauses`` semantics so numeric comparisons use ``::double precision`` instead of lexicographic ``::TEXT``. """ op = val.get("operator", "equals") v = val.get("value", "") isNumericCol = colType in ("INTEGER", "DOUBLE PRECISION") if op in ("equals", "eq"): if colType == "BOOLEAN": whereConditions.append(f'COALESCE("{key}", FALSE) = %s') whereValues.append(str(v).lower() == "true") elif isNumericCol: try: whereConditions.append(f'"{key}"::double precision = %s') whereValues.append(float(v)) except (ValueError, TypeError): whereConditions.append(f'"{key}"::TEXT = %s') whereValues.append(str(v)) else: whereConditions.append(f'"{key}"::TEXT = %s') whereValues.append(str(v)) return if op == "contains": whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(f"%{v}%") return if op == "startsWith": whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(f"{v}%") return if op == "endsWith": whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(f"%{v}") return if op in ("gt", "gte", "lt", "lte"): sqlOp = {"gt": ">", "gte": ">=", "lt": "<", "lte": "<="}[op] if isNumericCol: try: whereConditions.append(f'"{key}"::double precision {sqlOp} %s') whereValues.append(float(v)) except (ValueError, TypeError): whereConditions.append(f'"{key}"::TEXT {sqlOp} %s') whereValues.append(str(v)) else: whereConditions.append(f'"{key}"::TEXT {sqlOp} %s') whereValues.append(str(v)) return if op == "between" and isinstance(v, dict): fromVal = v.get("from", "") toVal = v.get("to", "") if not fromVal and not toVal: return isDateVal = bool(fromVal and _ISO_DATE_RE.match(str(fromVal))) or bool( toVal and _ISO_DATE_RE.match(str(toVal)) ) if isNumericCol and isDateVal: from datetime import datetime as _dt, timezone as _tz if fromVal and toVal: fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace( hour=23, minute=59, second=59, tzinfo=_tz.utc ).timestamp() whereConditions.append(f'"{key}" >= %s AND "{key}" <= %s') whereValues.extend([fromTs, toTs]) elif fromVal: fromTs = _dt.strptime(str(fromVal), "%Y-%m-%d").replace(tzinfo=_tz.utc).timestamp() whereConditions.append(f'"{key}" >= %s') whereValues.append(fromTs) else: toTs = _dt.strptime(str(toVal), "%Y-%m-%d").replace( hour=23, minute=59, second=59, tzinfo=_tz.utc ).timestamp() whereConditions.append(f'"{key}" <= %s') whereValues.append(toTs) elif isNumericCol: try: if fromVal and toVal: whereConditions.append( f'"{key}"::double precision >= %s AND "{key}"::double precision <= %s' ) whereValues.extend([float(fromVal), float(toVal)]) elif fromVal: whereConditions.append(f'"{key}"::double precision >= %s') whereValues.append(float(fromVal)) elif toVal: whereConditions.append(f'"{key}"::double precision <= %s') whereValues.append(float(toVal)) except (ValueError, TypeError): pass else: if fromVal and toVal: whereConditions.append(f'"{key}"::TEXT >= %s AND "{key}"::TEXT <= %s') whereValues.extend([str(fromVal), str(toVal)]) elif fromVal: whereConditions.append(f'"{key}"::TEXT >= %s') whereValues.append(str(fromVal)) elif toVal: whereConditions.append(f'"{key}"::TEXT <= %s') whereValues.append(str(toVal)) return if op == "in" and isinstance(v, list): if not v: whereConditions.append("1 = 0") else: whereConditions.append(f'"{key}"::TEXT = ANY(%s)') whereValues.append([str(x) for x in v]) return if op == "notIn" and isinstance(v, list): if v: whereConditions.append(f'NOT ("{key}"::TEXT = ANY(%s))') whereValues.append([str(x) for x in v]) return whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(str(v)) # ============================================================================= # Namespace-Mapping für statische Tabellen # ============================================================================= # Definiert, welcher Namespace für jede Tabelle verwendet wird. # Tabellen ohne Eintrag fallen auf "system" zurück (Fallback für Rückwärtskompatibilität). # ============================================================================= TABLE_NAMESPACE = { # UAM (User Access Management) - mandantenübergreifend "UserInDB": "uam", "UserConnection": "uam", "AuthEvent": "uam", "Mandate": "uam", "UserMandate": "uam", "UserMandateRole": "uam", "Invitation": "uam", "Role": "uam", "AccessRule": "uam", "FeatureInstance": "uam", "FeatureAccess": "uam", "FeatureAccessRole": "uam", # Chat - benutzer-eigen, kein Mandantenkontext "ChatWorkflow": "chat", "ChatMessage": "chat", "ChatLog": "chat", "ChatDocument": "chat", "Prompt": "chat", # Chatbot (poweron_chatbot) - per feature-instance isolation "ChatbotConversation": "chatbot", "ChatbotMessage": "chatbot", "ChatbotDocument": "chatbot", "ChatbotLog": "chatbot", # Files - benutzer-eigen "FileItem": "files", "FileData": "files", "FileFolder": "files", # Automation - benutzer-eigen "AutomationDefinition": "automation", "AutomationTemplate": "automation", # GraphicalEditor - Greenfield DB poweron_graphicaleditor (Auto-prefix models) "AutoWorkflow": "feature.graphicalEditor", "AutoVersion": "feature.graphicalEditor", "AutoRun": "feature.graphicalEditor", "AutoStepLog": "feature.graphicalEditor", "AutoTask": "feature.graphicalEditor", # Legacy aliases (backward compat) "Automation2Workflow": "feature.graphicalEditor", "Automation2WorkflowRun": "feature.graphicalEditor", "Automation2HumanTask": "feature.graphicalEditor", # Knowledge Store - benutzer-eigen "FileContentIndex": "knowledge", "ContentChunk": "knowledge", "WorkflowMemory": "knowledge", # Data Sources - benutzer-eigen "DataSource": "datasource", } # Namespaces ohne Mandantenkontext - GROUP wird auf MY gemappt # NOTE: "files" is NOT in this set – files use scope-based visibility for GROUP USER_OWNED_NAMESPACES = {"chat", "chatbot", "automation", "knowledge", "datasource"} def buildDataObjectKey(tableName: str, featureCode: Optional[str] = None) -> str: """ Build the standardized objectKey for a DATA context item. Format: - UAM tables: data.uam.{TableName} - Chat tables: data.chat.{TableName} - File tables: data.files.{TableName} - Automation tables: data.automation.{TableName} - Feature tables: data.feature.{featureCode}.{TableName} Args: tableName: The database table name (e.g., "UserInDB", "ChatWorkflow") featureCode: Optional feature code (e.g., "trustee", "realestate") If provided, uses data.feature.{featureCode}.{tableName} Returns: Full objectKey string (e.g., "data.uam.UserInDB", "data.chat.ChatWorkflow", or "data.feature.trustee.TrusteePosition") """ if featureCode: return f"data.feature.{featureCode}.{tableName}" namespace = TABLE_NAMESPACE.get(tableName, "system") # Fallback für unbekannte Tabellen return f"data.{namespace}.{tableName}" def getRecordsetWithRBAC( connector, # DatabaseConnector instance modelClass: Type[BaseModel], currentUser: User, recordFilter: Dict[str, Any] = None, orderBy: str = None, limit: int = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, enrichPermissions: bool = False, featureCode: Optional[str] = None, ) -> List[Dict[str, Any]]: """ Get records with RBAC filtering applied at database level. This function wraps connector.getRecordset() with RBAC logic. Multi-Tenant Design: - mandateId wird explizit übergeben (aus Request-Context / X-Mandate-Id Header) Args: connector: DatabaseConnector instance modelClass: Pydantic model class for the table currentUser: User object recordFilter: Additional record filters orderBy: Field to order by (defaults to "id") limit: Maximum number of records to return mandateId: Explicit mandate context (from request header). Required for GROUP access. featureInstanceId: Explicit feature instance context enrichPermissions: If True, adds _permissions field to each record with row-level permissions { canUpdate, canDelete } based on RBAC rules and sysCreatedBy featureCode: Optional feature code for feature-specific tables (e.g., "trustee"). If None, table is treated as a system table. Returns: List of filtered records (with _permissions if enrichPermissions=True) """ table = modelClass.__name__ # Build full objectKey for RBAC lookup objectKey = buildDataObjectKey(table, featureCode) effectiveMandateId = mandateId try: if not connector._ensureTableExists(modelClass): return [] # All users (including SysAdmins) go through RBAC filtering # SysAdmin flag does NOT grant automatic data access - proper RBAC rules must exist # Get RBAC permissions for this table using full objectKey # AccessRule table is always in DbApp database dbApp = getRootDbAppConnector() rbacInstance = RbacClass(connector, dbApp=dbApp) permissions = rbacInstance.getUserPermissions( currentUser, AccessRuleContext.DATA, objectKey, # Use full objectKey (e.g., "data.uam.UserInDB", "data.chat.ChatWorkflow") mandateId=effectiveMandateId, featureInstanceId=featureInstanceId ) # Check view permission first if not permissions.view: return [] # Build WHERE clause with RBAC filtering whereConditions = [] whereValues = [] # CRITICAL: Only pass featureInstanceId to WHERE clause if the model actually has # this column. Chat child tables (ChatMessage, ChatLog, ChatDocument) # are user-owned and do NOT have featureInstanceId - only ChatWorkflow does. # Without this check, the SQL query would reference a non-existent column, # causing a silent error that returns empty results. featureInstanceIdForQuery = featureInstanceId if featureInstanceId and hasattr(modelClass, 'model_fields') and "featureInstanceId" not in modelClass.model_fields: featureInstanceIdForQuery = None # Add RBAC WHERE clause based on read permission rbacWhereClause = buildRbacWhereClause( permissions, currentUser, table, connector, mandateId=effectiveMandateId, featureInstanceId=featureInstanceIdForQuery ) if rbacWhereClause: whereConditions.append(rbacWhereClause["condition"]) whereValues.extend(rbacWhereClause["values"]) # Add additional record filters if recordFilter: for field, value in recordFilter.items(): if isinstance(value, (list, tuple)): if len(value) == 0: whereConditions.append("1 = 0") # Empty IN -> no matches else: whereConditions.append(f'"{field}" = ANY(%s)') whereValues.append(list(value)) elif value is None: whereConditions.append(f'"{field}" IS NULL') else: whereConditions.append(f'"{field}" = %s') whereValues.append(value) # Build the query whereClause = "" if whereConditions: whereClause = " WHERE " + " AND ".join(whereConditions) orderByClause = f' ORDER BY "{orderBy}"' if orderBy else ' ORDER BY "id"' limitClause = f" LIMIT {limit}" if limit else "" query = f'SELECT * FROM "{table}"{whereClause}{orderByClause}{limitClause}' with connector.connection.cursor() as cursor: cursor.execute(query, whereValues) records = [dict(row) for row in cursor.fetchall()] # Handle JSONB fields and ensure numeric types are correct # Import the helper function from connector module from modules.connectors.connectorDbPostgre import getModelFields fields = getModelFields(modelClass) for record in records: for fieldName, fieldType in fields.items(): # Ensure numeric fields are properly typed if fieldType in ("DOUBLE PRECISION", "INTEGER") and fieldName in record: value = record[fieldName] if value is not None: try: if fieldType == "DOUBLE PRECISION": record[fieldName] = float(value) elif fieldType == "INTEGER": record[fieldName] = int(value) except (ValueError, TypeError): logger.warning( f"Could not convert {fieldName} to {fieldType} for record {record.get('id', 'unknown')}: {value}" ) elif fieldType == "JSONB" and fieldName in record: if record[fieldName] is None: # Generic type-based default: List types -> [], Dict types -> {} # Interfaces handle domain-specific defaults modelFields = modelClass.model_fields fieldInfo = modelFields.get(fieldName) if fieldInfo: fieldAnnotation = fieldInfo.annotation # Check if it's a List type if (fieldAnnotation == list or (hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is list)): record[fieldName] = [] # Check if it's a Dict type elif (fieldAnnotation == dict or (hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is dict)): record[fieldName] = {} else: record[fieldName] = None else: record[fieldName] = None else: try: if isinstance(record[fieldName], str): record[fieldName] = json.loads(record[fieldName]) elif isinstance(record[fieldName], (dict, list)): pass else: record[fieldName] = json.loads(str(record[fieldName])) except (json.JSONDecodeError, TypeError, ValueError): logger.warning( f"Could not parse JSONB field {fieldName}, keeping as string: {record[fieldName]}" ) # Enrich records with row-level permissions if requested if enrichPermissions: records = _enrichRecordsWithPermissions( records, permissions, currentUser ) return records except Exception as e: logger.error(f"Error loading records with RBAC from table {table}: {e}") return [] def getRecordsetPaginatedWithRBAC( connector, modelClass: Type[BaseModel], currentUser: User, pagination: Optional[PaginationParams] = None, recordFilter: Dict[str, Any] = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, enrichPermissions: bool = False, featureCode: Optional[str] = None, ) -> Union[List[Dict[str, Any]], PaginatedResult]: """ Get records with RBAC filtering and SQL-level pagination. When pagination is None, returns a plain list (backward compatible). When pagination is provided, returns PaginatedResult with COUNT + LIMIT/OFFSET at SQL level. """ table = modelClass.__name__ objectKey = buildDataObjectKey(table, featureCode) effectiveMandateId = mandateId try: if not connector._ensureTableExists(modelClass): return PaginatedResult(items=[], totalItems=0, totalPages=0) if pagination else [] dbApp = getRootDbAppConnector() rbacInstance = RbacClass(connector, dbApp=dbApp) permissions = rbacInstance.getUserPermissions( currentUser, AccessRuleContext.DATA, objectKey, mandateId=effectiveMandateId, featureInstanceId=featureInstanceId ) if not permissions.view: return PaginatedResult(items=[], totalItems=0, totalPages=0) if pagination else [] whereConditions = [] whereValues = [] featureInstanceIdForQuery = featureInstanceId if featureInstanceId and hasattr(modelClass, 'model_fields') and "featureInstanceId" not in modelClass.model_fields: featureInstanceIdForQuery = None rbacWhereClause = buildRbacWhereClause( permissions, currentUser, table, connector, mandateId=effectiveMandateId, featureInstanceId=featureInstanceIdForQuery ) if rbacWhereClause: whereConditions.append(rbacWhereClause["condition"]) whereValues.extend(rbacWhereClause["values"]) if recordFilter: for field, value in recordFilter.items(): if isinstance(value, (list, tuple)): if len(value) == 0: whereConditions.append("1 = 0") else: whereConditions.append(f'"{field}" = ANY(%s)') whereValues.append(list(value)) elif value is None: whereConditions.append(f'"{field}" IS NULL') else: whereConditions.append(f'"{field}" = %s') whereValues.append(value) if pagination and pagination.filters: from modules.connectors.connectorDbPostgre import getModelFields fields = getModelFields(modelClass) validColumns = set(fields.keys()) for key, val in pagination.filters.items(): if key == "search" and isinstance(val, str) and val.strip(): term = f"%{val.strip()}%" textCols = [c for c, t in fields.items() if t == "TEXT"] if textCols: orParts = [f'COALESCE("{c}"::TEXT, \'\') ILIKE %s' for c in textCols] whereConditions.append(f"({' OR '.join(orParts)})") whereValues.extend([term] * len(textCols)) continue if key not in validColumns: continue if val is None: # val=None in pagination.filters means "match empty/null" # (same convention as connectorDbPostgre._buildPaginationClauses). # Covers both historical empty-string values and true NULLs. whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')') continue if isinstance(val, dict): colType = fields.get(key, "TEXT") _rbacAppendPaginationDictFilter( key, val, colType, whereConditions, whereValues ) else: whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(str(val)) whereClause = " WHERE " + " AND ".join(whereConditions) if whereConditions else "" countValues = list(whereValues) orderParts: List[str] = [] if pagination and pagination.sort: from modules.connectors.connectorDbPostgre import getModelFields validColumns = set(getModelFields(modelClass).keys()) for sf in pagination.sort: if sf.field in validColumns: direction = "DESC" if sf.direction.lower() == "desc" else "ASC" orderParts.append(f'"{sf.field}" {direction}') if not orderParts: orderParts.append('"id"') orderByClause = " ORDER BY " + ", ".join(orderParts) limitClause = "" if pagination: offset = (pagination.page - 1) * pagination.pageSize limitClause = f" LIMIT {pagination.pageSize} OFFSET {offset}" with connector.connection.cursor() as cursor: countSql = f'SELECT COUNT(*) FROM "{table}"{whereClause}' cursor.execute(countSql, countValues) totalItems = cursor.fetchone()["count"] dataSql = f'SELECT * FROM "{table}"{whereClause}{orderByClause}{limitClause}' cursor.execute(dataSql, whereValues) records = [dict(row) for row in cursor.fetchall()] from modules.connectors.connectorDbPostgre import getModelFields, parseRecordFields fields = getModelFields(modelClass) for record in records: parseRecordFields(record, fields, f"table {table}") for fieldName, fieldType in fields.items(): if fieldType == "JSONB" and fieldName in record and record[fieldName] is None: modelFields = modelClass.model_fields fieldInfo = modelFields.get(fieldName) if fieldInfo: fieldAnnotation = fieldInfo.annotation if (fieldAnnotation == list or (hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is list)): record[fieldName] = [] elif (fieldAnnotation == dict or (hasattr(fieldAnnotation, "__origin__") and fieldAnnotation.__origin__ is dict)): record[fieldName] = {} if enrichPermissions: records = _enrichRecordsWithPermissions(records, permissions, currentUser) from modules.routes.routeHelpers import enrichRowsWithFkLabels enrichRowsWithFkLabels(records, modelClass) if pagination: pageSize = pagination.pageSize totalPages = math.ceil(totalItems / pageSize) if totalItems > 0 else 0 return PaginatedResult(items=records, totalItems=totalItems, totalPages=totalPages) return records except Exception as e: logger.error(f"Error in getRecordsetPaginatedWithRBAC for table {table}: {e}") return PaginatedResult(items=[], totalItems=0, totalPages=0) if pagination else [] def getDistinctColumnValuesWithRBAC( connector, modelClass: Type[BaseModel], currentUser: User, column: str, pagination: Optional[PaginationParams] = None, recordFilter: Dict[str, Any] = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None, ) -> List[str]: """ Get sorted distinct values for a column with RBAC filtering at SQL level. Cross-filtering: removes the requested column from active filters. """ import copy table = modelClass.__name__ objectKey = buildDataObjectKey(table, featureCode) try: if not connector._ensureTableExists(modelClass): return [] from modules.connectors.connectorDbPostgre import getModelFields fields = getModelFields(modelClass) if column not in fields: return [] dbApp = getRootDbAppConnector() rbacInstance = RbacClass(connector, dbApp=dbApp) permissions = rbacInstance.getUserPermissions( currentUser, AccessRuleContext.DATA, objectKey, mandateId=mandateId, featureInstanceId=featureInstanceId ) if not permissions.view: return [] whereConditions = [] whereValues = [] featureInstanceIdForQuery = featureInstanceId if featureInstanceId and hasattr(modelClass, 'model_fields') and "featureInstanceId" not in modelClass.model_fields: featureInstanceIdForQuery = None rbacWhereClause = buildRbacWhereClause( permissions, currentUser, table, connector, mandateId=mandateId, featureInstanceId=featureInstanceIdForQuery ) if rbacWhereClause: whereConditions.append(rbacWhereClause["condition"]) whereValues.extend(rbacWhereClause["values"]) if recordFilter: for field, value in recordFilter.items(): if isinstance(value, (list, tuple)): if not value: whereConditions.append("1 = 0") else: whereConditions.append(f'"{field}" = ANY(%s)') whereValues.append(list(value)) elif value is None: whereConditions.append(f'"{field}" IS NULL') else: whereConditions.append(f'"{field}" = %s') whereValues.append(value) crossPagination = copy.deepcopy(pagination) if pagination else None if crossPagination and crossPagination.filters: crossPagination.filters.pop(column, None) validColumns = set(fields.keys()) for key, val in crossPagination.filters.items(): if key == "search" and isinstance(val, str) and val.strip(): term = f"%{val.strip()}%" textCols = [c for c, t in fields.items() if t == "TEXT"] if textCols: orParts = [f'COALESCE("{c}"::TEXT, \'\') ILIKE %s' for c in textCols] whereConditions.append(f"({' OR '.join(orParts)})") whereValues.extend([term] * len(textCols)) continue if key not in validColumns: continue if val is None: # val=None in pagination.filters means "match empty/null" # (same convention as connectorDbPostgre._buildPaginationClauses). # Covers both historical empty-string values and true NULLs. whereConditions.append(f'("{key}" IS NULL OR "{key}"::TEXT = \'\')') continue if isinstance(val, dict): colType = fields.get(key, "TEXT") _rbacAppendPaginationDictFilter( key, val, colType, whereConditions, whereValues ) else: whereConditions.append(f'"{key}"::TEXT ILIKE %s') whereValues.append(str(val)) whereClause = " WHERE " + " AND ".join(whereConditions) if whereConditions else "" notNullCond = f'"{column}" IS NOT NULL AND "{column}"::TEXT != \'\'' if whereClause: nonNullWhere = whereClause + f" AND {notNullCond}" else: nonNullWhere = f" WHERE {notNullCond}" sql = f'SELECT DISTINCT "{column}"::TEXT AS val FROM "{table}"{nonNullWhere} ORDER BY val' with connector.connection.cursor() as cursor: cursor.execute(sql, whereValues) result = [row["val"] for row in cursor.fetchall()] # Include a None entry when NULL/empty rows exist (enables "(Leer)" filter) emptyCond = f'("{column}" IS NULL OR "{column}"::TEXT = \'\')' if whereClause: emptySql = f'SELECT 1 FROM "{table}"{whereClause} AND {emptyCond} LIMIT 1' else: emptySql = f'SELECT 1 FROM "{table}" WHERE {emptyCond} LIMIT 1' with connector.connection.cursor() as cursor: cursor.execute(emptySql, whereValues) if cursor.fetchone(): result.append(None) return result except Exception as e: logger.error(f"Error in getDistinctColumnValuesWithRBAC for {table}.{column}: {e}") return [] def buildFilesScopeWhereClause( currentUser: User, table: str, connector, mandateId: Optional[str], featureInstanceId: Optional[str], baseConditions: List[str], baseValues: List, ) -> Optional[Dict[str, Any]]: """Build WHERE clause for files namespace with scope-based visibility. Two modes depending on request context: WITHOUT instance/mandate context (Dateien-Seite): Only own files: sysCreatedBy = currentUser WITH instance context (Instanz-Seiten): - sysCreatedBy = me AND featureInstanceId = X (own personal files of this instance) - scope = 'featureInstance' AND featureInstanceId = X - scope = 'mandate' AND mandateId = M (M = mandate of the instance) - scope = 'global' """ conditions = list(baseConditions) values = list(baseValues) # ── No context: Dateien-Seite → only own files ──────────────────────── if not featureInstanceId and not mandateId: conditions.append('"sysCreatedBy" = %s') values.append(currentUser.id) if conditions: return {"condition": " AND ".join(conditions), "values": values} return None # ── With context: Instanz-/Mandanten-Seite → scope-based visibility ── effectiveMandateId = mandateId if featureInstanceId and not effectiveMandateId: try: from modules.datamodels.datamodelFeatures import FeatureInstance dbApp = getRootDbAppConnector() instances = dbApp.getRecordset( FeatureInstance, recordFilter={"id": featureInstanceId}, ) if instances: effectiveMandateId = instances[0].get("mandateId") or "" except Exception as e: logger.warning(f"buildFilesScopeWhereClause: could not resolve mandate for instance {featureInstanceId}: {e}") scopeParts: List[str] = [] scopeValues: List = [] if featureInstanceId: # 1) Own personal files of this specific instance scopeParts.append('("sysCreatedBy" = %s AND "featureInstanceId" = %s)') scopeValues.extend([currentUser.id, featureInstanceId]) # 2) scope=featureInstance files shared with this instance scopeParts.append('("scope" = \'featureInstance\' AND "featureInstanceId" = %s)') scopeValues.append(featureInstanceId) # 3) scope=mandate files of the effective mandate if effectiveMandateId: scopeParts.append('("scope" = \'mandate\' AND "mandateId" = %s)') scopeValues.append(effectiveMandateId) # 4) scope=global files scopeParts.append('"scope" = \'global\'') if scopeParts: conditions.append("(" + " OR ".join(scopeParts) + ")") values.extend(scopeValues) if conditions: return {"condition": " AND ".join(conditions), "values": values} return None def buildRbacWhereClause( permissions: UserPermissions, currentUser: User, table: str, connector, # DatabaseConnector instance for connection access mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None ) -> Optional[Dict[str, Any]]: """ Build RBAC WHERE clause based on permissions and access level. Multi-Tenant Design: - mandateId wird explizit übergeben (aus Request-Context / X-Mandate-Id Header) - featureInstanceId wird für Feature-Tabellen zusätzlich gefiltert Args: permissions: UserPermissions object currentUser: User object table: Table name connector: DatabaseConnector instance (needed for GROUP queries) mandateId: Explicit mandate context (from request header). Required for GROUP access. featureInstanceId: Feature instance context for feature-level data isolation. Returns: Dictionary with "condition" and "values" keys, or None if no filtering needed """ if not permissions or not hasattr(permissions, "read"): return None readLevel = permissions.read # No access - return empty result condition if readLevel == AccessLevel.NONE: return {"condition": "1 = 0", "values": []} # CRITICAL: featureInstanceId filter is ALWAYS required when provided # This ensures data isolation between feature instances regardless of access level. # EXCEPTION: files namespace handles featureInstanceId inside its own scope logic # because files with scope=global or scope=mandate must remain visible even when # they belong to a different (or no) featureInstanceId. baseConditions = [] baseValues = [] namespace = TABLE_NAMESPACE.get(table, "system") if featureInstanceId and namespace != "files": baseConditions.append('"featureInstanceId" = %s') baseValues.append(featureInstanceId) # All records within the feature instance - only featureInstanceId filtering if readLevel == AccessLevel.ALL: namespaceAll = TABLE_NAMESPACE.get(table, "system") # Files: scope-based context filtering applies even with ALL access if namespaceAll == "files": return buildFilesScopeWhereClause( currentUser, table, connector, mandateId, featureInstanceId, baseConditions, baseValues, ) # Chat / AI Workspace: even DATA read ALL must not list other users' rows in a # shared featureInstance (stale RBAC rules or merged roles). Same as MY. if featureInstanceId and namespaceAll == "chat": userIdFieldAll = "sysCreatedBy" if table == "UserInDB": userIdFieldAll = "id" elif table == "UserConnection": userIdFieldAll = "userId" conditionsAll = list(baseConditions) valuesAll = list(baseValues) conditionsAll.append(f'"{userIdFieldAll}" = %s') valuesAll.append(currentUser.id) return {"condition": " AND ".join(conditionsAll), "values": valuesAll} if baseConditions: return {"condition": " AND ".join(baseConditions), "values": baseValues} return None # My records - filter by sysCreatedBy or userId field if readLevel == AccessLevel.MY: # Try common field names for creator userIdField = None if table == "UserInDB": userIdField = "id" elif table == "UserConnection": userIdField = "userId" else: userIdField = "sysCreatedBy" conditions = list(baseConditions) values = list(baseValues) conditions.append(f'"{userIdField}" = %s') values.append(currentUser.id) return { "condition": " AND ".join(conditions), "values": values } # Group records - filter by mandateId or ownership based on namespace if readLevel == AccessLevel.GROUP: # Determine namespace for this table namespace = TABLE_NAMESPACE.get(table, "system") # ── Files namespace: scope-based visibility ────────────────────── # GROUP for files = own files + shared files based on scope field: # - scope='global' → visible to everyone # - scope='mandate' → visible to users in that mandate # - scope='featureInstance' → visible to users with access to that instance # - scope='personal' → only visible to owner (sysCreatedBy) if namespace == "files": return buildFilesScopeWhereClause( currentUser, table, connector, mandateId, featureInstanceId, baseConditions, baseValues, ) # For user-owned namespaces (chat, automation): # GROUP has no meaning - these tables have no mandate context # But still apply featureInstanceId filter if provided if namespace in USER_OWNED_NAMESPACES: if baseConditions: # Shared feature instance: GROUP would otherwise only filter by featureInstanceId # and expose every user's rows in that instance (e.g. ChatWorkflow). if featureInstanceId and readLevel == AccessLevel.GROUP: conditions = list(baseConditions) values = list(baseValues) conditions.append('"sysCreatedBy" = %s') values.append(currentUser.id) return {"condition": " AND ".join(conditions), "values": values} return {"condition": " AND ".join(baseConditions), "values": baseValues} return None # For UAM and other namespaces: GROUP filters by mandate effectiveMandateId = mandateId if not effectiveMandateId: # Fall back to Root mandate (first mandate in system) for GROUP access # This allows system-level tables to be accessed without explicit mandate context try: from modules.datamodels.datamodelUam import Mandate dbApp = getRootDbAppConnector() allMandates = dbApp.getRecordset(Mandate) if allMandates: effectiveMandateId = allMandates[0].get("id") except Exception as e: logger.error(f"Error getting Root mandate: {e}") if not effectiveMandateId: logger.warning(f"User {currentUser.id} has no mandateId for GROUP access") return {"condition": "1 = 0", "values": []} # For UserInDB: Filter via UserMandate junction table # Multi-Tenant Design: Users do NOT have mandateId - they are linked via UserMandate if table == "UserInDB": try: with connector.connection.cursor() as cursor: # Get all user IDs that are members of the current mandate cursor.execute( 'SELECT "userId" FROM "UserMandate" WHERE "mandateId" = %s AND "enabled" = true', (effectiveMandateId,) ) userMandates = cursor.fetchall() userIds = [um["userId"] for um in userMandates] if not userIds: return {"condition": "1 = 0", "values": []} placeholders = ",".join(["%s"] * len(userIds)) # Combine with base conditions (featureInstanceId) conditions = list(baseConditions) values = list(baseValues) conditions.append(f'"id" IN ({placeholders})') values.extend(userIds) return { "condition": " AND ".join(conditions) if conditions else f'"id" IN ({placeholders})', "values": values } except Exception as e: logger.error(f"Error building GROUP filter for UserInDB via UserMandate: {e}") return {"condition": "1 = 0", "values": []} # For UserConnection: Filter via UserMandate junction table elif table == "UserConnection": try: with connector.connection.cursor() as cursor: # Get all user IDs that are members of the current mandate cursor.execute( 'SELECT "userId" FROM "UserMandate" WHERE "mandateId" = %s AND "enabled" = true', (effectiveMandateId,) ) userMandates = cursor.fetchall() userIds = [um["userId"] for um in userMandates] if not userIds: return {"condition": "1 = 0", "values": []} placeholders = ",".join(["%s"] * len(userIds)) # Combine with base conditions (featureInstanceId) conditions = list(baseConditions) values = list(baseValues) conditions.append(f'"userId" IN ({placeholders})') values.extend(userIds) return { "condition": " AND ".join(conditions) if conditions else f'"userId" IN ({placeholders})', "values": values } except Exception as e: logger.error(f"Error building GROUP filter for UserConnection: {e}") return {"condition": "1 = 0", "values": []} # For system tables without mandateId column (Mandate, Role, etc.): # No row-level filtering based on mandate, but still apply featureInstanceId if provided elif table in ("Mandate", "Role"): if baseConditions: return {"condition": " AND ".join(baseConditions), "values": baseValues} return None # For other tables, filter by mandateId field # Also include records with NULL mandateId for backwards compatibility else: # Start with base conditions (includes strict featureInstanceId filter) conditions = list(baseConditions) values = list(baseValues) # Add mandate filter conditions.append('("mandateId" = %s OR "mandateId" IS NULL)') values.append(effectiveMandateId) return { "condition": " AND ".join(conditions), "values": values } # Unknown access level - deny access (security: deny by default) logger.warning(f"Unknown access level '{readLevel}' for user {currentUser.id} - denying access") return {"condition": "1 = 0", "values": []} def _enrichRecordsWithPermissions( records: List[Dict[str, Any]], permissions: UserPermissions, currentUser: User ) -> List[Dict[str, Any]]: """ Enrich records with per-row permissions (_permissions field). The _permissions field contains: - canUpdate: bool - whether current user can update this record - canDelete: bool - whether current user can delete this record Logic: - AccessLevel.ALL ('a'): User can update/delete all records - AccessLevel.MY ('m'): User can only update/delete records where sysCreatedBy == userId - AccessLevel.GROUP ('g'): Same as MY for now (group-level ownership) - AccessLevel.NONE ('n'): User cannot update/delete any records Args: records: List of record dicts permissions: UserPermissions with update/delete levels currentUser: Current user object Returns: Records with _permissions field added """ enriched = [] userId = currentUser.id if currentUser else None for record in records: recordCopy = dict(record) createdBy = record.get("sysCreatedBy") # Determine canUpdate canUpdate = _checkRowPermission(permissions.update, userId, createdBy) # Determine canDelete canDelete = _checkRowPermission(permissions.delete, userId, createdBy) recordCopy["_permissions"] = { "canUpdate": canUpdate, "canDelete": canDelete } enriched.append(recordCopy) return enriched def _checkRowPermission( accessLevel: Optional[AccessLevel], userId: Optional[str], recordCreatedBy: Optional[str] ) -> bool: """ Check if user has permission for a specific row based on access level. Args: accessLevel: The permission level (ALL, MY, GROUP, NONE) userId: Current user's ID recordCreatedBy: The sysCreatedBy value of the record Returns: True if user has permission, False otherwise """ if not accessLevel or accessLevel == AccessLevel.NONE: return False if accessLevel == AccessLevel.ALL: return True # MY and GROUP: Check ownership via sysCreatedBy if accessLevel in (AccessLevel.MY, AccessLevel.GROUP): # If record has no sysCreatedBy, allow access (can't verify ownership) if not recordCreatedBy: return True # If no userId, can't verify - deny if not userId: return False # Check ownership return recordCreatedBy == userId # Unknown level - deny by default return False