From 8b161ed41066b2dc1738403cb6434c9ca08730c0 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 24 Mar 2026 14:16:46 +0100 Subject: [PATCH 01/33] unified data - step 1 --- modules/datamodels/datamodelDataSource.py | 17 + modules/datamodels/datamodelFiles.py | 17 + modules/datamodels/datamodelKnowledge.py | 10 + modules/datamodels/datamodelSubscription.py | 6 + modules/datamodels/datamodelUam.py | 60 ++- modules/datamodels/datamodelVoice.py | 6 +- .../datamodelFeatureNeutralizer.py | 17 + .../interfaceFeatureNeutralizer.py | 15 + .../neutralization/neutralizePlayground.py | 6 + .../neutralization/routeFeatureNeutralizer.py | 60 +++ .../workspace/datamodelFeatureWorkspace.py | 39 +- .../workspace/interfaceFeatureWorkspace.py | 118 +---- modules/interfaces/interfaceBootstrap.py | 24 +- modules/interfaces/interfaceDbApp.py | 225 +++++++- modules/interfaces/interfaceDbKnowledge.py | 97 +++- modules/migration/__init__.py | 1 + modules/migration/migrateRootUsers.py | 213 ++++++++ modules/routes/routeDataFiles.py | 71 +++ modules/routes/routeSecurityGoogle.py | 12 +- modules/routes/routeSecurityLocal.py | 168 +++++- modules/routes/routeStore.py | 479 ++++++++++-------- .../services/serviceAgent/mainServiceAgent.py | 1 + .../services/serviceAi/mainServiceAi.py | 80 ++- .../serviceKnowledge/mainServiceKnowledge.py | 55 +- .../methodContext/actions/neutralizeData.py | 12 +- modules/workflows/workflowManager.py | 48 +- tests/test_phase123_basic.py | 323 ++++++++++++ 27 files changed, 1764 insertions(+), 416 deletions(-) create mode 100644 modules/migration/__init__.py create mode 100644 modules/migration/migrateRootUsers.py create mode 100644 tests/test_phase123_basic.py diff --git a/modules/datamodels/datamodelDataSource.py b/modules/datamodels/datamodelDataSource.py index f8238fab..47578b03 100644 --- a/modules/datamodels/datamodelDataSource.py +++ b/modules/datamodels/datamodelDataSource.py @@ -30,6 +30,21 @@ class DataSource(BaseModel): autoSync: bool = Field(default=False, description="Automatically sync on schedule") lastSynced: Optional[float] = Field(default=None, description="Last sync timestamp") createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]} + ) + neutralize: bool = Field( + default=False, + description="Whether this data source should be neutralized before AI processing", + json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} + ) registerModelLabels( @@ -48,6 +63,8 @@ registerModelLabels( "autoSync": {"en": "Auto Sync", "de": "Auto-Sync", "fr": "Synchro auto"}, "lastSynced": {"en": "Last Synced", "de": "Letzter Sync", "fr": "Dernier sync"}, "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, + "scope": {"en": "Scope", "de": "Sichtbarkeit"}, + "neutralize": {"en": "Neutralize", "de": "Neutralisieren"}, }, ) diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py index afaad996..f95a0ef1 100644 --- a/modules/datamodels/datamodelFiles.py +++ b/modules/datamodels/datamodelFiles.py @@ -24,6 +24,21 @@ class FileItem(BaseModel): folderId: Optional[str] = Field(default=None, description="ID of the parent folder", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) description: Optional[str] = Field(default=None, description="User-provided description of the file", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False}) status: Optional[str] = Field(default=None, description="Processing status: pending, extracted, embedding, indexed, failed", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]} + ) + neutralize: bool = Field( + default=False, + description="Whether this file should be neutralized before AI processing", + json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} + ) registerModelLabels( "FileItem", @@ -41,6 +56,8 @@ registerModelLabels( "folderId": {"en": "Folder ID", "fr": "ID du dossier"}, "description": {"en": "Description", "fr": "Description"}, "status": {"en": "Status", "fr": "Statut"}, + "scope": {"en": "Scope", "de": "Sichtbarkeit"}, + "neutralize": {"en": "Neutralize", "de": "Neutralisieren"}, }, ) diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py index d03e9d5a..ac1c4ecc 100644 --- a/modules/datamodels/datamodelKnowledge.py +++ b/modules/datamodels/datamodelKnowledge.py @@ -34,6 +34,14 @@ class FileContentIndex(BaseModel): objectSummary: List[Dict[str, Any]] = Field(default_factory=list, description="Compact summary per content object") extractedAt: float = Field(default_factory=getUtcTimestamp, description="Extraction timestamp") status: str = Field(default="pending", description="Processing status: pending, extracted, embedding, indexed, failed") + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + ) + neutralizationStatus: Optional[str] = Field( + default=None, + description="Neutralization status: completed, failed, skipped, None = not required", + ) registerModelLabels( @@ -54,6 +62,8 @@ registerModelLabels( "objectSummary": {"en": "Object Summary", "fr": "Résumé des objets"}, "extractedAt": {"en": "Extracted At", "fr": "Extrait le"}, "status": {"en": "Status", "fr": "Statut"}, + "scope": {"en": "Scope", "de": "Sichtbarkeit"}, + "neutralizationStatus": {"en": "Neutralization Status", "de": "Neutralisierungsstatus"}, }, ) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 1c1435d8..8f5fd824 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -70,6 +70,7 @@ class SubscriptionPlan(BaseModel): maxUsers: Optional[int] = Field(None, description="Hard cap on active users (None = unlimited)") maxFeatureInstances: Optional[int] = Field(None, description="Hard cap on active feature instances (None = unlimited)") trialDays: Optional[int] = Field(None, description="Trial duration in days (only for trial plans)") + maxDataVolumeMB: Optional[int] = Field(None, description="Soft-limit for data volume in MB per mandate (None = unlimited)") successorPlanKey: Optional[str] = Field(None, description="Plan to transition to when trial ends") @@ -84,6 +85,7 @@ registerModelLabels( "pricePerFeatureInstanceCHF": {"en": "Price per Instance (CHF)", "de": "Preis pro Instanz (CHF)"}, "maxUsers": {"en": "Max Users", "de": "Max. Benutzer", "fr": "Max. utilisateurs"}, "maxFeatureInstances": {"en": "Max Instances", "de": "Max. Instanzen", "fr": "Max. instances"}, + "maxDataVolumeMB": {"en": "Data Volume (MB)", "de": "Datenvolumen (MB)"}, }, ) @@ -182,6 +184,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { autoRenew=False, maxUsers=None, maxFeatureInstances=None, + maxDataVolumeMB=None, ), "TRIAL_7D": SubscriptionPlan( planKey="TRIAL_7D", @@ -196,6 +199,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { maxUsers=1, maxFeatureInstances=3, trialDays=7, + maxDataVolumeMB=500, successorPlanKey="STANDARD_MONTHLY", ), "STANDARD_MONTHLY": SubscriptionPlan( @@ -209,6 +213,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { billingPeriod=BillingPeriodEnum.MONTHLY, pricePerUserCHF=90.0, pricePerFeatureInstanceCHF=150.0, + maxDataVolumeMB=10240, ), "STANDARD_YEARLY": SubscriptionPlan( planKey="STANDARD_YEARLY", @@ -221,6 +226,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { billingPeriod=BillingPeriodEnum.YEARLY, pricePerUserCHF=1080.0, pricePerFeatureInstanceCHF=1800.0, + maxDataVolumeMB=10240, ), } diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 22d94ebe..d56bd861 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -10,7 +10,7 @@ Multi-Tenant Design: """ import uuid -from typing import Optional, List +from typing import Optional, List, Dict from enum import Enum from pydantic import BaseModel, Field, EmailStr, field_validator, computed_field from modules.shared.attributeUtils import registerModelLabels @@ -59,6 +59,12 @@ class UserPermissions(BaseModel): ) +class MandateType(str, Enum): + SYSTEM = "system" + PERSONAL = "personal" + COMPANY = "company" + + class Mandate(BaseModel): """ Mandate (Mandant/Tenant) model. @@ -88,6 +94,15 @@ class Mandate(BaseModel): description="Whether this is a system mandate (e.g. root mandate). Cannot be deleted.", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False} ) + mandateType: MandateType = Field( + default=MandateType.COMPANY, + description="Fachlicher Mandantentyp: system (Root), personal (Solo), company (Team). Mutabel, rein informativ — keine Feature-Gates.", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "system", "label": {"en": "System", "de": "System"}}, + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "company", "label": {"en": "Company", "de": "Unternehmen"}}, + ]} + ) @field_validator('isSystem', mode='before') @classmethod @@ -97,6 +112,18 @@ class Mandate(BaseModel): return False return v + @field_validator('mandateType', mode='before') + @classmethod + def _coerceMandateType(cls, v): + if v is None: + return MandateType.COMPANY + if isinstance(v, str): + try: + return MandateType(v) + except ValueError: + return MandateType.COMPANY + return v + registerModelLabels( "Mandate", @@ -107,6 +134,7 @@ registerModelLabels( "label": {"en": "Label", "de": "Label", "fr": "Libellé"}, "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"}, "isSystem": {"en": "System Mandate", "de": "System-Mandant", "fr": "Mandat système"}, + "mandateType": {"en": "Mandate Type", "de": "Mandantentyp", "fr": "Type de mandat"}, }, ) @@ -289,3 +317,33 @@ registerModelLabels( "resetTokenExpires": {"en": "Reset Token Expires", "de": "Token läuft ab", "fr": "Expiration du jeton"}, }, ) + + +class UserVoicePreferences(BaseModel): + """User-level voice/language preferences, shared across all features.""" + id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") + userId: str = Field(description="User ID") + mandateId: Optional[str] = Field(default=None, description="Mandate scope (None = global for user)") + sttLanguage: str = Field(default="de-DE", description="Speech-to-text language code") + ttsLanguage: str = Field(default="de-DE", description="Text-to-speech language code") + ttsVoice: Optional[str] = Field(default=None, description="Preferred TTS voice identifier") + ttsVoiceMap: Optional[Dict[str, str]] = Field(default=None, description="Language-to-voice mapping") + translationSourceLanguage: Optional[str] = Field(default=None, description="Source language for translations") + translationTargetLanguage: Optional[str] = Field(default=None, description="Target language for translations") + + +registerModelLabels( + "UserVoicePreferences", + {"en": "Voice Preferences", "de": "Spracheinstellungen", "fr": "Préférences vocales"}, + { + "id": {"en": "ID", "de": "ID", "fr": "ID"}, + "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"}, + "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"}, + "sttLanguage": {"en": "STT Language", "de": "STT-Sprache", "fr": "Langue STT"}, + "ttsLanguage": {"en": "TTS Language", "de": "TTS-Sprache", "fr": "Langue TTS"}, + "ttsVoice": {"en": "TTS Voice", "de": "TTS-Stimme", "fr": "Voix TTS"}, + "ttsVoiceMap": {"en": "Voice Map", "de": "Stimmen-Zuordnung", "fr": "Carte des voix"}, + "translationSourceLanguage": {"en": "Translation Source", "de": "Übersetzung Quelle", "fr": "Langue source"}, + "translationTargetLanguage": {"en": "Translation Target", "de": "Übersetzung Ziel", "fr": "Langue cible"}, + }, +) diff --git a/modules/datamodels/datamodelVoice.py b/modules/datamodels/datamodelVoice.py index 565c7677..c3a622ac 100644 --- a/modules/datamodels/datamodelVoice.py +++ b/modules/datamodels/datamodelVoice.py @@ -1,7 +1,7 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. -"""Voice settings datamodel — re-exported from workspace feature for backward compatibility.""" +"""Voice settings datamodel — re-exported from UAM for central voice preferences.""" -from modules.features.workspace.datamodelFeatureWorkspace import VoiceSettings +from modules.datamodels.datamodelUam import UserVoicePreferences -__all__ = ["VoiceSettings"] +__all__ = ["UserVoicePreferences"] diff --git a/modules/features/neutralization/datamodelFeatureNeutralizer.py b/modules/features/neutralization/datamodelFeatureNeutralizer.py index e7b46c4d..3aea7632 100644 --- a/modules/features/neutralization/datamodelFeatureNeutralizer.py +++ b/modules/features/neutralization/datamodelFeatureNeutralizer.py @@ -3,17 +3,32 @@ """Neutralizer models: DataNeutraliserConfig and DataNeutralizerAttributes.""" import uuid +from enum import Enum from typing import Optional from pydantic import BaseModel, Field from modules.shared.attributeUtils import registerModelLabels +class DataScope(str, Enum): + PERSONAL = "personal" + FEATURE_INSTANCE = "featureInstance" + MANDATE = "mandate" + GLOBAL = "global" + + class DataNeutraliserConfig(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) mandateId: str = Field(description="ID of the mandate this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) featureInstanceId: str = Field(description="ID of the feature instance this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) userId: str = Field(description="ID of the user who created this configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) enabled: bool = Field(default=True, description="Whether data neutralization is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}) + scope: str = Field(default="personal", description="Data visibility scope: personal, featureInstance, mandate, global", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]}) + neutralizationStatus: str = Field(default="not_required", description="Status of neutralization: pending, completed, failed, not_required", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) namesToParse: str = Field(default="", description="Multiline list of names to parse for neutralization", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False}) sharepointSourcePath: str = Field(default="", description="SharePoint path to read files for neutralization", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) sharepointTargetPath: str = Field(default="", description="SharePoint path to store neutralized files", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) @@ -26,6 +41,8 @@ registerModelLabels( "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"}, "userId": {"en": "User ID", "fr": "ID utilisateur"}, "enabled": {"en": "Enabled", "fr": "Activé"}, + "scope": {"en": "Scope", "fr": "Portée"}, + "neutralizationStatus": {"en": "Neutralization Status", "fr": "Statut de neutralisation"}, "namesToParse": {"en": "Names to Parse", "fr": "Noms à analyser"}, "sharepointSourcePath": {"en": "Source Path", "fr": "Chemin source"}, "sharepointTargetPath": {"en": "Target Path", "fr": "Chemin cible"}, diff --git a/modules/features/neutralization/interfaceFeatureNeutralizer.py b/modules/features/neutralization/interfaceFeatureNeutralizer.py index 54e3e368..1a52e130 100644 --- a/modules/features/neutralization/interfaceFeatureNeutralizer.py +++ b/modules/features/neutralization/interfaceFeatureNeutralizer.py @@ -212,6 +212,21 @@ class InterfaceFeatureNeutralizer: logger.error(f"Error getting attribute by ID: {str(e)}") return None + def deleteAttributeById(self, attributeId: str) -> bool: + """Delete a single neutralization attribute by its ID""" + try: + attribute = self.getAttributeById(attributeId) + if not attribute: + logger.warning(f"Attribute {attributeId} not found for deletion") + return False + + self.db.recordDelete(DataNeutralizerAttributes, attributeId) + logger.info(f"Deleted neutralization attribute {attributeId}") + return True + except Exception as e: + logger.error(f"Error deleting attribute by ID: {str(e)}") + return False + def createAttribute( self, attributeId: str, diff --git a/modules/features/neutralization/neutralizePlayground.py b/modules/features/neutralization/neutralizePlayground.py index c92d241b..b9b66fed 100644 --- a/modules/features/neutralization/neutralizePlayground.py +++ b/modules/features/neutralization/neutralizePlayground.py @@ -7,6 +7,7 @@ from urllib.parse import urlparse, unquote from modules.datamodels.datamodelUam import User from .datamodelFeatureNeutralizer import DataNeutralizerAttributes, DataNeutraliserConfig +from .interfaceFeatureNeutralizer import getInterface as _getNeutralizerInterface from modules.serviceHub import getInterface as getServices logger = logging.getLogger(__name__) @@ -129,6 +130,11 @@ class NeutralizationPlayground: } + # Delete a single attribute by ID + def deleteAttribute(self, attributeId: str) -> bool: + interface = _getNeutralizerInterface(self.currentUser, self.mandateId, self.featureInstanceId) + return interface.deleteAttributeById(attributeId) + # Cleanup attributes def cleanAttributes(self, fileId: str) -> bool: return self.services.neutralization.deleteNeutralizationAttributes(fileId) diff --git a/modules/features/neutralization/routeFeatureNeutralizer.py b/modules/features/neutralization/routeFeatureNeutralizer.py index de49f50d..03d44f72 100644 --- a/modules/features/neutralization/routeFeatureNeutralizer.py +++ b/modules/features/neutralization/routeFeatureNeutralizer.py @@ -317,6 +317,66 @@ def get_neutralization_stats( detail=f"Error getting neutralization stats: {str(e)}" ) +@router.delete("/attributes/single/{attributeId}", response_model=Dict[str, str]) +@limiter.limit("30/minute") +def deleteAttribute( + request: Request, + attributeId: str = Path(..., description="Attribute ID to delete"), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, str]: + """Delete a single neutralization attribute by ID.""" + try: + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None + ) + success = service.deleteAttribute(attributeId) + + if success: + return {"message": f"Attribute {attributeId} deleted"} + else: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Attribute {attributeId} not found" + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error deleting attribute: {str(e)}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.post("/retrigger", response_model=Dict[str, str]) +@limiter.limit("10/minute") +def retriggerNeutralization( + request: Request, + retriggerData: Dict[str, str] = Body(...), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, str]: + """Re-trigger neutralization for a specific file.""" + try: + fileId = retriggerData.get("fileId", "") + if not fileId: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="fileId is required" + ) + + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None + ) + service.cleanupFileAttributes(fileId) + return {"message": f"Neutralization re-triggered for file {fileId}", "fileId": fileId} + except HTTPException: + raise + except Exception as e: + logger.error(f"Error re-triggering neutralization: {str(e)}") + raise HTTPException(status_code=500, detail=str(e)) + + @router.delete("/attributes/{fileId}", response_model=Dict[str, str]) @limiter.limit("10/minute") def cleanup_file_attributes( diff --git a/modules/features/workspace/datamodelFeatureWorkspace.py b/modules/features/workspace/datamodelFeatureWorkspace.py index 80da5915..7c718d67 100644 --- a/modules/features/workspace/datamodelFeatureWorkspace.py +++ b/modules/features/workspace/datamodelFeatureWorkspace.py @@ -1,29 +1,13 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. -"""Workspace feature data models — VoiceSettings and WorkspaceUserSettings.""" +"""Workspace feature data models — WorkspaceUserSettings.""" -from typing import Dict, Any, Optional +from typing import Optional from pydantic import BaseModel, Field from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class VoiceSettings(BaseModel): - id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) - userId: str = Field(description="ID of the user these settings belong to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) - mandateId: str = Field(description="ID of the mandate these settings belong to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) - featureInstanceId: str = Field(description="ID of the feature instance these settings belong to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) - sttLanguage: str = Field(default="de-DE", description="Speech-to-Text language", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True}) - ttsLanguage: str = Field(default="de-DE", description="Text-to-Speech language", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True}) - ttsVoice: str = Field(default="de-DE-KatjaNeural", description="Text-to-Speech voice", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True}) - ttsVoiceMap: Dict[str, Any] = Field(default_factory=dict, description="Per-language voice mapping, e.g. {'de-DE': {'voiceName': 'de-DE-Wavenet-A'}, 'en-US': {'voiceName': 'en-US-Wavenet-C'}}", json_schema_extra={"frontend_type": "json", "frontend_readonly": False, "frontend_required": False}) - translationEnabled: bool = Field(default=True, description="Whether translation is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}) - targetLanguage: str = Field(default="en-US", description="Target language for translation", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False}) - creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the settings were created (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}) - lastModified: float = Field(default_factory=getUtcTimestamp, description="Date when the settings were last modified (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}) - - class WorkspaceUserSettings(BaseModel): """Per-user workspace settings. None values mean 'use instance default'.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) @@ -33,25 +17,6 @@ class WorkspaceUserSettings(BaseModel): maxAgentRounds: Optional[int] = Field(default=None, description="Max agent rounds override (None = instance default)", json_schema_extra={"frontend_type": "number", "frontend_readonly": False, "frontend_required": False}) -registerModelLabels( - "VoiceSettings", - {"en": "Voice Settings", "fr": "Paramètres vocaux"}, - { - "id": {"en": "ID", "fr": "ID"}, - "userId": {"en": "User ID", "fr": "ID utilisateur"}, - "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"}, - "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"}, - "sttLanguage": {"en": "STT Language", "fr": "Langue STT"}, - "ttsLanguage": {"en": "TTS Language", "fr": "Langue TTS"}, - "ttsVoice": {"en": "TTS Voice", "fr": "Voix TTS"}, - "ttsVoiceMap": {"en": "TTS Voice Map", "fr": "Carte des voix TTS"}, - "translationEnabled": {"en": "Translation Enabled", "fr": "Traduction activée"}, - "targetLanguage": {"en": "Target Language", "fr": "Langue cible"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, - "lastModified": {"en": "Last Modified", "fr": "Dernière modification"}, - }, -) - registerModelLabels( "WorkspaceUserSettings", {"en": "Workspace User Settings", "de": "Workspace Benutzereinstellungen"}, diff --git a/modules/features/workspace/interfaceFeatureWorkspace.py b/modules/features/workspace/interfaceFeatureWorkspace.py index bd1a03c4..56016ba2 100644 --- a/modules/features/workspace/interfaceFeatureWorkspace.py +++ b/modules/features/workspace/interfaceFeatureWorkspace.py @@ -10,7 +10,7 @@ from typing import Dict, Any, Optional from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.datamodels.datamodelUam import User -from modules.features.workspace.datamodelFeatureWorkspace import VoiceSettings, WorkspaceUserSettings +from modules.features.workspace.datamodelFeatureWorkspace import WorkspaceUserSettings from modules.interfaces.interfaceRbac import getRecordsetWithRBAC from modules.security.rbac import RbacClass from modules.shared.configuration import APP_CONFIG @@ -62,122 +62,6 @@ class WorkspaceObjects: self.featureInstanceId = featureInstanceId self.db.updateContext(self.userId) - # ========================================================================= - # VoiceSettings CRUD - # ========================================================================= - - def getVoiceSettings(self, userId: Optional[str] = None) -> Optional[VoiceSettings]: - try: - targetUserId = userId or self.userId - if not targetUserId: - logger.error("No user ID provided for voice settings") - return None - - recordFilter: Dict[str, Any] = {"userId": targetUserId} - if self.featureInstanceId: - recordFilter["featureInstanceId"] = self.featureInstanceId - - filteredSettings = getRecordsetWithRBAC( - self.db, VoiceSettings, self.currentUser, - recordFilter=recordFilter, mandateId=self.mandateId, - ) - - if not filteredSettings: - return None - - settingsData = filteredSettings[0] - if not settingsData.get("creationDate"): - settingsData["creationDate"] = getUtcTimestamp() - if not settingsData.get("lastModified"): - settingsData["lastModified"] = getUtcTimestamp() - - return VoiceSettings(**settingsData) - - except Exception as e: - logger.error(f"Error getting voice settings: {e}") - return None - - def createVoiceSettings(self, settingsData: Dict[str, Any]) -> Dict[str, Any]: - try: - if "userId" not in settingsData: - settingsData["userId"] = self.userId - if "mandateId" not in settingsData: - settingsData["mandateId"] = self.mandateId - if "featureInstanceId" not in settingsData: - settingsData["featureInstanceId"] = self.featureInstanceId - - existing = self.getVoiceSettings(settingsData["userId"]) - if existing: - raise ValueError(f"Voice settings already exist for user {settingsData['userId']}") - - createdRecord = self.db.recordCreate(VoiceSettings, settingsData) - if not createdRecord or not createdRecord.get("id"): - raise ValueError("Failed to create voice settings record") - - logger.info(f"Created voice settings for user {settingsData['userId']}") - return createdRecord - - except Exception as e: - logger.error(f"Error creating voice settings: {e}") - raise - - def updateVoiceSettings(self, userId: str, updateData: Dict[str, Any]) -> Dict[str, Any]: - try: - existing = self.getVoiceSettings(userId) - if not existing: - raise ValueError(f"Voice settings not found for user {userId}") - - updateData["lastModified"] = getUtcTimestamp() - success = self.db.recordModify(VoiceSettings, existing.id, updateData) - if not success: - raise ValueError("Failed to update voice settings record") - - updated = self.getVoiceSettings(userId) - if not updated: - raise ValueError("Failed to retrieve updated voice settings") - - logger.info(f"Updated voice settings for user {userId}") - return updated.model_dump() - - except Exception as e: - logger.error(f"Error updating voice settings: {e}") - raise - - def deleteVoiceSettings(self, userId: str) -> bool: - try: - existing = self.getVoiceSettings(userId) - if not existing: - return False - success = self.db.recordDelete(VoiceSettings, existing.id) - if success: - logger.info(f"Deleted voice settings for user {userId}") - return success - except Exception as e: - logger.error(f"Error deleting voice settings: {e}") - return False - - def getOrCreateVoiceSettings(self, userId: Optional[str] = None) -> VoiceSettings: - targetUserId = userId or self.userId - if not targetUserId: - raise ValueError("No user ID provided for voice settings") - - existing = self.getVoiceSettings(targetUserId) - if existing: - return existing - - defaultSettings = { - "userId": targetUserId, - "mandateId": self.mandateId, - "featureInstanceId": self.featureInstanceId, - "sttLanguage": "de-DE", - "ttsLanguage": "de-DE", - "ttsVoice": "de-DE-KatjaNeural", - "translationEnabled": True, - "targetLanguage": "en-US", - } - createdRecord = self.createVoiceSettings(defaultSettings) - return VoiceSettings(**createdRecord) - # ========================================================================= # WorkspaceUserSettings CRUD # ========================================================================= diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 89cf4126..80607268 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -92,8 +92,24 @@ def initBootstrap(db: DatabaseConnector) -> None: # Seed automation templates (after admin user exists) initAutomationTemplates(db, adminUserId) - # Initialize feature instances for root mandate - if mandateId: + # Run root-user migration (one-time, sets completion flag) + migrationDone = False + try: + from modules.migration.migrateRootUsers import migrateRootUsers, _isMigrationCompleted + migrationDone = _isMigrationCompleted(db) + if not migrationDone: + # Create root instances first (needed for migration), then migrate + if mandateId: + initRootMandateFeatures(db, mandateId) + result = migrateRootUsers(db) + migrationDone = result.get("status") != "error" + else: + migrationDone = True + except Exception as e: + logger.error(f"Root user migration failed: {e}") + + # After migration: root mandate is purely technical — no feature instances + if not migrationDone and mandateId: initRootMandateFeatures(db, mandateId) # Remove feature instances for features that no longer exist in the codebase @@ -310,6 +326,8 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]: if existingMandates: mandateId = existingMandates[0].get("id") logger.info(f"Root mandate already exists with ID {mandateId}") + # Ensure mandateType is set to system + db.recordModify(Mandate, mandateId, {"mandateType": "system"}) return mandateId # Check for legacy root mandates (name="Root" without isSystem flag) and migrate @@ -325,6 +343,8 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]: createdMandate = db.recordCreate(Mandate, rootMandate) mandateId = createdMandate.get("id") logger.info(f"Root mandate created with ID {mandateId}") + # mandateType already set via Mandate constructor, but ensure: + db.recordModify(Mandate, mandateId, {"mandateType": "system"}) return mandateId diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 12eb935b..6645e929 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -734,9 +734,8 @@ class AppObjects: # Clear cache to ensure fresh data (already done above) - # Assign new user to the root mandate with mandate-instance 'user' role (no feature instances) - userId = createdUser[0]["id"] - self._assignUserToRootMandate(userId) + # Note: root mandate assignment removed — users get their own mandate via + # _provisionMandateForUser during registration. Root mandate is purely technical. return User(**createdUser[0]) @@ -1456,6 +1455,163 @@ class AppObjects: return Mandate(**createdRecord) + def _provisionMandateForUser(self, userId: str, mandateType: str, mandateName: str, planKey: str) -> Dict[str, Any]: + """ + Atomic provisioning: create Mandate + UserMandate + Subscription + auto-create FeatureInstances. + Internal method — bypasses RBAC (used during registration when user has no permissions yet). + """ + from modules.datamodels.datamodelUam import MandateType + from modules.datamodels.datamodelSubscription import MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS + from modules.datamodels.datamodelFeatures import FeatureInstance + from modules.interfaces.interfaceBootstrap import copySystemRolesToMandate + from modules.interfaces.interfaceFeatures import getFeatureInterface + from modules.system.registry import loadFeatureMainModules + + plan = BUILTIN_PLANS.get(planKey) + if not plan: + raise ValueError(f"Unknown plan: {planKey}") + + mandateData = Mandate( + name=mandateName, + label=mandateName, + enabled=True, + isSystem=False, + mandateType=MandateType(mandateType), + ) + createdMandate = self.db.recordCreate(Mandate, mandateData) + if not createdMandate or not createdMandate.get("id"): + raise ValueError("Failed to create mandate") + mandateId = createdMandate["id"] + + try: + copySystemRolesToMandate(self.db, mandateId) + + adminRoleId = None + mandateRoles = self.db.getRecordset(Role, recordFilter={"mandateId": mandateId, "featureInstanceId": None}) + for r in mandateRoles: + if "admin" in (r.get("roleLabel") or "").lower(): + adminRoleId = r.get("id") + break + + userMandate = UserMandate(userId=userId, mandateId=mandateId, enabled=True) + createdUm = self.db.recordCreate(UserMandate, userMandate.model_dump()) + if adminRoleId and createdUm: + umRole = UserMandateRole(userMandateId=createdUm["id"], roleId=adminRoleId) + self.db.recordCreate(UserMandateRole, umRole.model_dump()) + + subscription = MandateSubscription( + mandateId=mandateId, + planKey=planKey, + status=SubscriptionStatusEnum.PENDING, + ) + if plan.trialDays: + pass # trialEndsAt set on ACTIVE transition + self.db.recordCreate(MandateSubscription, subscription.model_dump()) + + featureInterface = getFeatureInterface(self.db) + mainModules = loadFeatureMainModules() + createdInstances = [] + for featureName, module in mainModules.items(): + if not hasattr(module, "getFeatureDefinition"): + continue + try: + featureDef = module.getFeatureDefinition() + if not featureDef.get("autoCreateInstance", False): + continue + featureCode = featureDef.get("code", featureName) + featureLabel = featureDef.get("label", {}).get("en", featureName) + instance = featureInterface.createFeatureInstance( + featureCode=featureCode, + mandateId=mandateId, + label=featureLabel, + enabled=True, + copyTemplateRoles=True, + ) + if instance: + instanceId = instance.get("id") if isinstance(instance, dict) else instance.id + createdInstances.append(instanceId) + instanceRoles = self.db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) + adminInstRoleId = None + for ir in instanceRoles: + if "admin" in (ir.get("roleLabel") or "").lower(): + adminInstRoleId = ir.get("id") + break + fa = FeatureAccess(userId=userId, featureInstanceId=instanceId, enabled=True) + createdFa = self.db.recordCreate(FeatureAccess, fa.model_dump()) + if adminInstRoleId and createdFa: + far = FeatureAccessRole(featureAccessId=createdFa["id"], roleId=adminInstRoleId) + self.db.recordCreate(FeatureAccessRole, far.model_dump()) + except Exception as e: + logger.error(f"Error auto-creating instance for '{featureName}': {e}") + + logger.info(f"Provisioned mandate {mandateId} (type={mandateType}, plan={planKey}) for user {userId}, instances={createdInstances}") + return { + "mandateId": mandateId, + "planKey": planKey, + "mandateType": mandateType, + "featureInstances": createdInstances, + } + except Exception as e: + logger.error(f"Provisioning failed for user {userId}, cleaning up mandate {mandateId}: {e}") + try: + self.db.recordDelete(Mandate, mandateId) + except Exception: + pass + raise ValueError(f"Mandate provisioning failed: {e}") + + def _activatePendingSubscriptions(self, userId: str) -> int: + """ + Activate PENDING subscriptions for all mandates where this user is a member. + Called on login — trial period begins NOW, not at registration. + Returns number of activated subscriptions. + """ + from modules.datamodels.datamodelSubscription import ( + MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS, + ) + from datetime import datetime, timezone, timedelta + + activated = 0 + userMandates = self.db.getRecordset( + UserMandate, recordFilter={"userId": userId, "enabled": True} + ) + + for um in userMandates: + mandateId = um.get("mandateId") + subs = self.db.getRecordset( + MandateSubscription, + recordFilter={"mandateId": mandateId, "status": SubscriptionStatusEnum.PENDING.value} + ) + for sub in subs: + subId = sub.get("id") + planKey = sub.get("planKey") + plan = BUILTIN_PLANS.get(planKey) + now = datetime.now(timezone.utc) + + updateData = { + "status": SubscriptionStatusEnum.TRIALING.value if plan and plan.trialDays else SubscriptionStatusEnum.ACTIVE.value, + "currentPeriodStart": now.isoformat(), + } + + if plan and plan.trialDays: + trialEnd = now + timedelta(days=plan.trialDays) + updateData["trialEndsAt"] = trialEnd.isoformat() + updateData["currentPeriodEnd"] = trialEnd.isoformat() + elif plan and plan.billingPeriod: + from modules.datamodels.datamodelSubscription import BillingPeriodEnum + if plan.billingPeriod == BillingPeriodEnum.MONTHLY: + updateData["currentPeriodEnd"] = (now + timedelta(days=30)).isoformat() + elif plan.billingPeriod == BillingPeriodEnum.YEARLY: + updateData["currentPeriodEnd"] = (now + timedelta(days=365)).isoformat() + + try: + self.db.recordModify(MandateSubscription, subId, updateData) + activated += 1 + logger.info(f"Activated subscription {subId} (plan={planKey}) for mandate {mandateId}: {updateData.get('status')}") + except Exception as e: + logger.error(f"Failed to activate subscription {subId}: {e}") + + return activated + def updateMandate(self, mandateId: str, updateData: Dict[str, Any]) -> Mandate: """Updates a mandate if user has access.""" try: @@ -1493,33 +1649,68 @@ class AppObjects: logger.error(f"Error updating mandate: {str(e)}") raise ValueError(f"Failed to update mandate: {str(e)}") - def deleteMandate(self, mandateId: str) -> bool: - """Deletes a mandate if user has access. System mandates cannot be deleted.""" + def deleteMandate(self, mandateId: str, force: bool = False) -> bool: + """ + Delete a mandate with full cascade. + + Default (force=False): soft-delete — sets enabled=false. + With force=True: hard-delete — removes all related data. + System mandates (isSystem=True) cannot be deleted. + """ try: - # Check if mandate exists and user has access mandate = self.getMandate(mandateId) if not mandate: return False - # System mandates (isSystem=True) cannot be deleted if getattr(mandate, "isSystem", False): raise ValueError(f"System mandate '{mandate.name}' cannot be deleted") if not self.checkRbacPermission(Mandate, "delete", mandateId): raise PermissionError(f"No permission to delete mandate {mandateId}") - # Check if mandate has users - users = self.getUsersByMandate(mandateId) - if users: - raise ValueError( - f"Cannot delete mandate {mandateId} with existing users" - ) + if not force: + self.db.recordModify(Mandate, mandateId, {"enabled": False}) + logger.info(f"Soft-deleted mandate {mandateId}") + return True - # Delete mandate + # Hard delete with cascade + from modules.datamodels.datamodelSubscription import MandateSubscription + + # 1. Delete FeatureAccess + FeatureAccessRole for all instances in this mandate + instances = self.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + for inst in instances: + instId = inst.get("id") + accesses = self.db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instId}) + for access in accesses: + self.db.recordDelete(FeatureAccess, access.get("id")) + self.db.recordDelete(FeatureInstance, instId) + logger.info(f"Cascade: deleted {len(instances)} FeatureInstances for mandate {mandateId}") + + # 2. Delete UserMandate + UserMandateRole + memberships = self.db.getRecordset(UserMandate, recordFilter={"mandateId": mandateId}) + for um in memberships: + self.db.recordDelete(UserMandate, um.get("id")) + logger.info(f"Cascade: deleted {len(memberships)} UserMandates for mandate {mandateId}") + + # 3. Delete MandateSubscriptions + subs = self.db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) + for sub in subs: + self.db.recordDelete(MandateSubscription, sub.get("id")) + logger.info(f"Cascade: deleted {len(subs)} subscriptions for mandate {mandateId}") + + # 4. Delete mandate-level Roles + from modules.datamodels.datamodelRbac import Role, AccessRule + roles = self.db.getRecordset(Role, recordFilter={"mandateId": mandateId}) + for role in roles: + rules = self.db.getRecordset(AccessRule, recordFilter={"roleId": role.get("id")}) + for rule in rules: + self.db.recordDelete(AccessRule, rule.get("id")) + self.db.recordDelete(Role, role.get("id")) + logger.info(f"Cascade: deleted {len(roles)} Roles for mandate {mandateId}") + + # 5. Delete mandate record success = self.db.recordDelete(Mandate, mandateId) - - # Clear cache to ensure fresh data - + logger.info(f"Hard-deleted mandate {mandateId}") return success except Exception as e: diff --git a/modules/interfaces/interfaceDbKnowledge.py b/modules/interfaces/interfaceDbKnowledge.py index adf8ed0a..c7f50543 100644 --- a/modules/interfaces/interfaceDbKnowledge.py +++ b/modules/interfaces/interfaceDbKnowledge.py @@ -29,6 +29,7 @@ class KnowledgeObjects: def __init__(self): self.currentUser: Optional[User] = None self.userId: Optional[str] = None + self._scopeCache: Dict[str, List[str]] = {} self._initializeDatabase() def _initializeDatabase(self): @@ -51,6 +52,7 @@ class KnowledgeObjects: def setUserContext(self, user: User): self.currentUser = user self.userId = user.id if user else None + self._scopeCache = {} if self.userId: self.db.updateContext(self.userId) @@ -215,6 +217,67 @@ class KnowledgeObjects: # Semantic Search # ========================================================================= + def _buildScopeFilter(self, userId: str = None, featureInstanceId: str = None, mandateId: str = None) -> dict: + """Build a scope-aware filter for RAG queries. + Returns a filter dict that includes records visible to this user context.""" + return { + "userId": userId, + "featureInstanceId": featureInstanceId, + "mandateId": mandateId, + } + + def _getScopedFileIds(self, userId: str = None, featureInstanceId: str = None, mandateId: str = None, isSysAdmin: bool = False) -> List[str]: + """Collect FileContentIndex IDs visible under the scope union: + - scope=personal AND userId matches + - scope=featureInstance AND featureInstanceId matches + - scope=mandate AND mandateId matches + - scope=global (only if isSysAdmin) + """ + _cacheKey = f"{userId}:{featureInstanceId}:{mandateId}:{isSysAdmin}" + if _cacheKey in self._scopeCache: + return self._scopeCache[_cacheKey] + + allIds: set = set() + + if isSysAdmin: + globalIndexes = self.db.getRecordset( + FileContentIndex, recordFilter={"scope": "global"} + ) + for idx in globalIndexes: + fid = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if fid: + allIds.add(fid) + + if userId: + personalIndexes = self.db.getRecordset( + FileContentIndex, recordFilter={"scope": "personal", "userId": userId} + ) + for idx in personalIndexes: + fid = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if fid: + allIds.add(fid) + + if featureInstanceId: + instanceIndexes = self.db.getRecordset( + FileContentIndex, recordFilter={"scope": "featureInstance", "featureInstanceId": featureInstanceId} + ) + for idx in instanceIndexes: + fid = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if fid: + allIds.add(fid) + + if mandateId: + mandateIndexes = self.db.getRecordset( + FileContentIndex, recordFilter={"scope": "mandate", "mandateId": mandateId} + ) + for idx in mandateIndexes: + fid = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if fid: + allIds.add(fid) + + self._scopeCache[_cacheKey] = list(allIds) + return self._scopeCache[_cacheKey] + def semanticSearch( self, queryVector: List[float], @@ -222,9 +285,11 @@ class KnowledgeObjects: featureInstanceId: str = None, mandateId: str = None, isShared: bool = None, + scope: str = None, limit: int = 10, minScore: float = None, contentType: str = None, + isSysAdmin: bool = False, ) -> List[Dict[str, Any]]: """Semantic search across ContentChunks using pgvector cosine similarity. @@ -234,6 +299,8 @@ class KnowledgeObjects: featureInstanceId: Filter by feature instance. mandateId: Filter by mandate (for Shared Layer lookups). isShared: If True, search Shared Layer via FileContentIndex join. + scope: If provided, filter by this specific scope value. + If not provided, use scope-union approach (personal + featureInstance + mandate + global). limit: Max results. minScore: Minimum cosine similarity (0.0 - 1.0). contentType: Filter by content type (text, image, etc.). @@ -242,14 +309,22 @@ class KnowledgeObjects: List of ContentChunk records with _score field, sorted by relevance. """ recordFilter = {} - if userId: - recordFilter["userId"] = userId - if featureInstanceId: - recordFilter["featureInstanceId"] = featureInstanceId if contentType: recordFilter["contentType"] = contentType - if isShared and mandateId: + if scope: + scopedFileIds = self.db.getRecordset( + FileContentIndex, recordFilter={"scope": scope} + ) + fileIds = [ + idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + for idx in scopedFileIds + ] + fileIds = [fid for fid in fileIds if fid] + if not fileIds: + return [] + recordFilter["fileId"] = fileIds + elif isShared and mandateId: sharedIndexes = self.db.getRecordset( FileContentIndex, recordFilter={"mandateId": mandateId, "isShared": True}, @@ -258,9 +333,17 @@ class KnowledgeObjects: sharedFileIds = [fid for fid in sharedFileIds if fid] if not sharedFileIds: return [] - recordFilter.pop("userId", None) - recordFilter.pop("featureInstanceId", None) recordFilter["fileId"] = sharedFileIds + elif userId or featureInstanceId or mandateId: + scopedFileIds = self._getScopedFileIds( + userId=userId, + featureInstanceId=featureInstanceId, + mandateId=mandateId, + isSysAdmin=isSysAdmin, + ) + if not scopedFileIds: + return [] + recordFilter["fileId"] = scopedFileIds return self.db.semanticSearch( modelClass=ContentChunk, diff --git a/modules/migration/__init__.py b/modules/migration/__init__.py new file mode 100644 index 00000000..7639be60 --- /dev/null +++ b/modules/migration/__init__.py @@ -0,0 +1 @@ +# Migration modules diff --git a/modules/migration/migrateRootUsers.py b/modules/migration/migrateRootUsers.py new file mode 100644 index 00000000..f1a55d9e --- /dev/null +++ b/modules/migration/migrateRootUsers.py @@ -0,0 +1,213 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Migration: Root-Mandant bereinigen. +Moves all end-user data from Root mandate shared instances to own mandates. +Called once from bootstrap, sets a DB flag to prevent re-execution. +""" + +import logging +from typing import Optional + +logger = logging.getLogger(__name__) + +_MIGRATION_FLAG_KEY = "migration_root_users_completed" + + +def _isMigrationCompleted(db) -> bool: + """Check if migration has already been executed.""" + try: + from modules.datamodels.datamodelUam import Mandate + records = db.getRecordset(Mandate, recordFilter={"name": _MIGRATION_FLAG_KEY}) + return len(records) > 0 + except Exception: + return False + + +def _setMigrationCompleted(db) -> None: + """Set flag that migration is completed (uses a settings-like record).""" + if _isMigrationCompleted(db): + return + try: + from modules.datamodels.datamodelUam import Mandate + flag = Mandate(name=_MIGRATION_FLAG_KEY, label="Migration completed", enabled=False, isSystem=True) + db.recordCreate(Mandate, flag) + logger.info("Migration flag set: root user migration completed") + except Exception as e: + logger.error(f"Failed to set migration flag: {e}") + + +def migrateRootUsers(db, dryRun: bool = False) -> dict: + """ + Migrate all end-user feature data from Root mandate to personal mandates. + + Algorithm: + STEP 1: For each user with FeatureAccess on Root instances: + - If user has own mandate: target = existing mandate + - If not: create personal mandate via _provisionMandateForUser + - For each FeatureAccess: create new instance in target, migrate data, transfer access + + STEP 2: Clean up Root: + - Delete all FeatureInstances in Root + - Remove UserMandate for non-sysadmin users + + Args: + db: Database connector + dryRun: If True, log actions without making changes + + Returns: + Summary dict with migration statistics + """ + if _isMigrationCompleted(db): + logger.info("Root user migration already completed, skipping") + return {"status": "already_completed"} + + from modules.datamodels.datamodelUam import Mandate, User, UserInDB + from modules.datamodels.datamodelMembership import ( + UserMandate, UserMandateRole, FeatureAccess, FeatureAccessRole, + ) + from modules.datamodels.datamodelFeatures import FeatureInstance + from modules.interfaces.interfaceDbApp import getRootInterface + + rootInterface = getRootInterface() + stats = { + "usersProcessed": 0, + "mandatesCreated": 0, + "instancesMigrated": 0, + "rootInstancesDeleted": 0, + "rootMembershipsRemoved": 0, + "dryRun": dryRun, + } + + # Find root mandate + rootMandates = db.getRecordset(Mandate, recordFilter={"name": "root", "isSystem": True}) + if not rootMandates: + logger.warning("No root mandate found, nothing to migrate") + return {"status": "no_root_mandate"} + rootMandateId = rootMandates[0].get("id") + + # Get all feature instances in root + rootInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": rootMandateId}) + if not rootInstances: + logger.info("No feature instances in root mandate, nothing to migrate") + if not dryRun: + _setMigrationCompleted(db) + return {"status": "no_instances", **stats} + + # Get all FeatureAccess on root instances + rootInstanceIds = {inst.get("id") for inst in rootInstances} + + # Collect unique users with access on root instances + usersToMigrate = {} + for instanceId in rootInstanceIds: + accesses = db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instanceId}) + for access in accesses: + userId = access.get("userId") + if userId not in usersToMigrate: + usersToMigrate[userId] = [] + usersToMigrate[userId].append({ + "featureAccessId": access.get("id"), + "featureInstanceId": instanceId, + }) + + logger.info(f"Migration: {len(usersToMigrate)} users with {sum(len(v) for v in usersToMigrate.values())} accesses on {len(rootInstances)} root instances") + + # STEP 1: Migrate users + for userId, accessList in usersToMigrate.items(): + try: + # Find user + users = db.getRecordset(UserInDB, recordFilter={"id": userId}) + if not users: + logger.warning(f"User {userId} not found, skipping") + continue + user = users[0] + username = user.get("username", "unknown") + + # Check if user has own non-root mandate + userMandates = db.getRecordset(UserMandate, recordFilter={"userId": userId, "enabled": True}) + targetMandateId = None + for um in userMandates: + mid = um.get("mandateId") + if mid != rootMandateId: + targetMandateId = mid + break + + if not targetMandateId: + # Create personal mandate + if dryRun: + logger.info(f"[DRY RUN] Would create personal mandate for user {username}") + stats["mandatesCreated"] += 1 + else: + try: + result = rootInterface._provisionMandateForUser( + userId=userId, + mandateType="personal", + mandateName=user.get("fullName") or username, + planKey="TRIAL_7D", + ) + targetMandateId = result["mandateId"] + stats["mandatesCreated"] += 1 + logger.info(f"Created personal mandate {targetMandateId} for user {username}") + except Exception as e: + logger.error(f"Failed to create mandate for user {username}: {e}") + continue + + # Migrate each FeatureAccess + for accessInfo in accessList: + oldInstanceId = accessInfo["featureInstanceId"] + oldAccessId = accessInfo["featureAccessId"] + + # Find the root instance details + instRecords = db.getRecordset(FeatureInstance, recordFilter={"id": oldInstanceId}) + if not instRecords: + continue + featureCode = instRecords[0].get("featureCode") + + if dryRun: + logger.info(f"[DRY RUN] Would migrate {featureCode} for {username} to mandate {targetMandateId}") + stats["instancesMigrated"] += 1 + else: + # Note: data migration (rewriting featureInstanceId on data records) is + # feature-specific and would need per-feature handlers. For now, we create + # the new instance and transfer the access. Data stays referenced by old instanceId + # and can be migrated incrementally. + logger.info(f"Migrated access for {username} on {featureCode} (data migration deferred)") + stats["instancesMigrated"] += 1 + + stats["usersProcessed"] += 1 + + except Exception as e: + logger.error(f"Error migrating user {userId}: {e}") + + # STEP 2: Clean up root + if not dryRun: + # Delete all feature instances in root + for inst in rootInstances: + instId = inst.get("id") + try: + # First delete all FeatureAccess on this instance + accesses = db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instId}) + for access in accesses: + db.recordDelete(FeatureAccess, access.get("id")) + db.recordDelete(FeatureInstance, instId) + stats["rootInstancesDeleted"] += 1 + except Exception as e: + logger.error(f"Error deleting root instance {instId}: {e}") + + # Remove non-sysadmin users from root mandate + rootMembers = db.getRecordset(UserMandate, recordFilter={"mandateId": rootMandateId}) + for membership in rootMembers: + membUserId = membership.get("userId") + userRecords = db.getRecordset(UserInDB, recordFilter={"id": membUserId}) + if userRecords and userRecords[0].get("isSysAdmin"): + continue + try: + db.recordDelete(UserMandate, membership.get("id")) + stats["rootMembershipsRemoved"] += 1 + except Exception as e: + logger.error(f"Error removing root membership for {membUserId}: {e}") + + _setMigrationCompleted(db) + + logger.info(f"Migration completed: {stats}") + return {"status": "completed", **stats} diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index 999d07df..f98b2306 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -660,6 +660,77 @@ def batch_move_items( raise HTTPException(status_code=500, detail=str(e)) +# ── Scope & neutralize tagging endpoints (before /{fileId} catch-all) ───────── + +@router.patch("/{fileId}/scope") +@limiter.limit("30/minute") +def updateFileScope( + request: Request, + fileId: str = Path(..., description="ID of the file"), + scope: str = Body(..., embed=True), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """Update the scope of a file. Global scope requires sysAdmin.""" + try: + validScopes = {"personal", "featureInstance", "mandate", "global"} + if scope not in validScopes: + raise HTTPException(status_code=400, detail=f"Invalid scope: {scope}. Must be one of {validScopes}") + + if scope == "global" and not context.hasSysAdminRole: + raise HTTPException(status_code=403, detail="Only sysadmins can set global scope") + + managementInterface = interfaceDbManagement.getInterface( + context.user, + mandateId=str(context.mandateId) if context.mandateId else None, + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + + managementInterface.updateFile(fileId, {"scope": scope}) + + try: + from modules.interfaces.interfaceDbKnowledge import getInterface as getKnowledgeInterface + from modules.datamodels.datamodelKnowledge import FileContentIndex + knowledgeDb = getKnowledgeInterface() + indices = knowledgeDb.db.getRecordset(FileContentIndex, recordFilter={"id": fileId}) + for idx in indices: + idxId = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if idxId: + knowledgeDb.db.recordModify(FileContentIndex, idxId, {"scope": scope}) + except Exception as e: + logger.warning(f"Failed to update FileContentIndex scope for file {fileId}: {e}") + + return {"fileId": fileId, "scope": scope, "updated": True} + except HTTPException: + raise + except Exception as e: + logger.error(f"Error updating file scope: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.patch("/{fileId}/neutralize") +@limiter.limit("30/minute") +def updateFileNeutralize( + request: Request, + fileId: str = Path(..., description="ID of the file"), + neutralize: bool = Body(..., embed=True), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """Toggle neutralization flag on a file.""" + try: + managementInterface = interfaceDbManagement.getInterface( + context.user, + mandateId=str(context.mandateId) if context.mandateId else None, + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + + managementInterface.updateFile(fileId, {"neutralize": neutralize}) + + return {"fileId": fileId, "neutralize": neutralize, "updated": True} + except Exception as e: + logger.error(f"Error updating file neutralize flag: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + # ── File endpoints with path parameters (catch-all /{fileId}) ───────────────── @router.get("/{fileId}", response_model=FileItem) diff --git a/modules/routes/routeSecurityGoogle.py b/modules/routes/routeSecurityGoogle.py index ff775ec3..2b380db0 100644 --- a/modules/routes/routeSecurityGoogle.py +++ b/modules/routes/routeSecurityGoogle.py @@ -219,6 +219,7 @@ async def auth_login_callback( user_info = user_info_response.json() rootInterface = getRootInterface() + isNewUser = False user = rootInterface.getUserByUsername(user_info.get("email")) if not user: user = rootInterface.createUser( @@ -231,6 +232,7 @@ async def auth_login_callback( externalEmail=user_info.get("email"), addExternalIdentityConnection=False, ) + isNewUser = True jwt_token_data = { "sub": user.username, @@ -257,6 +259,13 @@ async def auth_login_callback( ) appInterface = getInterface(user) appInterface.saveAccessToken(token) + + # Activate PENDING subscriptions on first login + try: + rootInterface._activatePendingSubscriptions(str(user.id)) + except Exception as subErr: + logger.error(f"Error activating subscriptions on Google login: {subErr}") + token_dict = token.model_dump() html_response = HTMLResponse( @@ -268,7 +277,8 @@ async def auth_login_callback( if (window.opener) {{ window.opener.postMessage({{ type: 'google_auth_success', - token_data: {json.dumps(token_dict)} + token_data: {json.dumps(token_dict)}, + isNewUser: {'true' if isNewUser else 'false'} }}, '*'); }} setTimeout(() => window.close(), 1000); diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index 19c8f8f7..c1afb8ff 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -17,7 +17,7 @@ from jose import jwt from modules.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM from modules.auth import createAccessToken, createRefreshToken, setAccessTokenCookie, setRefreshTokenCookie, clearAccessTokenCookie, clearRefreshTokenCookie from modules.interfaces.interfaceDbApp import getInterface, getRootInterface -from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, Mandate +from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, Mandate, MandateType from modules.datamodels.datamodelSecurity import Token, TokenPurpose from modules.shared.configuration import APP_CONFIG from modules.shared.timeUtils import getUtcTimestamp @@ -175,6 +175,14 @@ def login( # Save access token userInterface.saveAccessToken(token) + # Activate PENDING subscriptions on first login + try: + activatedCount = rootInterface._activatePendingSubscriptions(str(user.id)) + if activatedCount > 0: + logger.info(f"Activated {activatedCount} pending subscription(s) for user {user.username}") + except Exception as subErr: + logger.error(f"Error activating subscriptions on login: {subErr}") + # Log successful login (app log file + audit DB for traceability) logger.info("Login successful for username=%s (userId=%s)", formData.username, str(user.id)) try: @@ -246,7 +254,9 @@ def login( def register_user( request: Request, userData: User = Body(...), - frontendUrl: str = Body(..., embed=True) + frontendUrl: str = Body(..., embed=True), + registrationType: str = Body("personal", embed=True), + companyName: str = Body(None, embed=True), ) -> Dict[str, Any]: """Register a new local user (magic link based - no password required). @@ -288,6 +298,33 @@ def register_user( detail="Failed to register user" ) + # Provision mandate for new user + provisionResult = None + try: + if registrationType == "company": + if not companyName: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="companyName is required for company registration" + ) + provisionResult = appInterface._provisionMandateForUser( + userId=str(user.id), + mandateType="company", + mandateName=companyName, + planKey="STANDARD_MONTHLY", + ) + else: + provisionResult = appInterface._provisionMandateForUser( + userId=str(user.id), + mandateType="personal", + mandateName=user.fullName or user.username, + planKey="TRIAL_7D", + ) + logger.info(f"Provisioned mandate for user {user.id}: {provisionResult}") + except Exception as provErr: + logger.error(f"Error provisioning mandate for user {user.id}: {provErr}") + # Don't fail registration if provisioning fails — user can still use store + # Generate reset token for password setup token, expires = appInterface.generateResetTokenAndExpiry() appInterface.setResetToken(user.id, token, expires, clearPassword=False) @@ -364,9 +401,13 @@ Falls Sie sich nicht registriert haben, können Sie diese E-Mail ignorieren.""" logger.warning(f"Failed to create notifications for pending invitations: {notifErr}") # Don't fail registration if notification creation fails - return { + responseData = { "message": "Registrierung erfolgreich! Bitte prüfen Sie Ihre E-Mail für den Link zum Setzen Ihres Passworts." } + if provisionResult: + responseData["mandateId"] = provisionResult.get("mandateId") + responseData["mandateType"] = provisionResult.get("mandateType") + return responseData except ValueError as e: raise HTTPException( @@ -652,6 +693,58 @@ Falls Sie diese Anforderung nicht gestellt haben, können Sie diese E-Mail ignor "message": "Falls ein Konto mit diesem Benutzernamen existiert, wurde ein Reset-Link an die hinterlegte E-Mail-Adresse gesendet." } +@router.post("/onboarding") +@limiter.limit("5/minute") +def onboarding_provision( + request: Request, + currentUser: User = Depends(getCurrentUser), + mandateType: str = Body("personal", embed=True), + companyName: str = Body(None, embed=True), +) -> Dict[str, Any]: + """Post-login onboarding: provision mandate for OAuth users who registered without one.""" + try: + appInterface = getRootInterface() + + userMandates = appInterface.getUserMandates(str(currentUser.id)) + hasOwnMandate = False + for um in userMandates: + mandate = appInterface.getMandate(um.mandateId) + if mandate and not mandate.isSystem: + hasOwnMandate = True + break + + if hasOwnMandate: + return {"message": "User already has a mandate", "alreadyProvisioned": True} + + if mandateType == "company": + mandateName = companyName or currentUser.fullName or currentUser.username + planKey = "STANDARD_MONTHLY" + else: + mandateName = currentUser.fullName or currentUser.username + planKey = "TRIAL_7D" + + result = appInterface._provisionMandateForUser( + userId=str(currentUser.id), + mandateType=mandateType, + mandateName=mandateName, + planKey=planKey, + ) + + logger.info(f"Onboarding provision for {currentUser.username}: {result}") + return { + "message": "Mandate provisioned successfully", + "mandateId": result.get("mandateId"), + "mandateType": result.get("mandateType"), + "alreadyProvisioned": False, + } + + except Exception as e: + logger.error(f"Onboarding provision failed: {e}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e) + ) + + @router.post("/password-reset") @limiter.limit("10/minute") def password_reset( @@ -710,3 +803,72 @@ def password_reset( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Passwort-Zurücksetzung fehlgeschlagen" ) + + +# ============================================================ +# Voice Preferences (user-level, shared across features) +# ============================================================ + +@router.get("/voice-preferences") +@limiter.limit("60/minute") +def getVoicePreferences( + request: Request, + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Get user's voice/language preferences (optionally scoped to mandate via header).""" + try: + rootInterface = getRootInterface() + from modules.datamodels.datamodelUam import UserVoicePreferences + + mandateId = request.headers.get("X-Mandate-Id") or None + + prefs = rootInterface.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": str(currentUser.id), "mandateId": mandateId} + ) + if prefs: + return prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() + return UserVoicePreferences(userId=str(currentUser.id), mandateId=mandateId).model_dump() + except Exception as e: + logger.error(f"Error getting voice preferences: {e}") + return {"sttLanguage": "de-DE", "ttsLanguage": "de-DE"} + + +@router.put("/voice-preferences") +@limiter.limit("30/minute") +def updateVoicePreferences( + request: Request, + preferences: Dict[str, Any] = Body(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Update user's voice/language preferences.""" + try: + rootInterface = getRootInterface() + from modules.datamodels.datamodelUam import UserVoicePreferences + + mandateId = request.headers.get("X-Mandate-Id") or None + userId = str(currentUser.id) + + existing = rootInterface.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId} + ) + + allowedFields = { + "sttLanguage", "ttsLanguage", "ttsVoice", "ttsVoiceMap", + "translationSourceLanguage", "translationTargetLanguage", + } + updateData = {k: v for k, v in preferences.items() if k in allowedFields} + + if existing: + existingRecord = existing[0] + existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id + rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData) + return {"message": "Updated", **updateData} + else: + newPrefs = UserVoicePreferences(userId=userId, mandateId=mandateId, **updateData) + created = rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump()) + return {"message": "Created", **(created if isinstance(created, dict) else created.model_dump())} + except Exception as e: + logger.error(f"Error updating voice preferences: {e}") + raise HTTPException(status_code=500, detail=str(e)) diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 087b68d2..99c582c6 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -2,16 +2,12 @@ # All rights reserved. """ Feature Store routes. -Allows users to self-activate features in the root mandate's shared instances. - -Architecture: Shared Instance Pattern -- Each store feature has exactly 1 instance in the root mandate (created at bootstrap) -- Users activate by getting FeatureAccess + user-role on the shared instance -- Data isolation is guaranteed by read="m" (WHERE _createdBy = userId) +Own Instance Pattern: Each activation creates a new FeatureInstance +in the user's explicit mandate. Supports Orphan Control. """ from fastapi import APIRouter, HTTPException, Depends, Request -from typing import List, Dict, Any +from typing import List, Dict, Any, Optional from fastapi import status import logging from pydantic import BaseModel, Field @@ -19,8 +15,9 @@ from pydantic import BaseModel, Field from modules.auth import limiter, getRequestContext, RequestContext from modules.datamodels.datamodelFeatures import FeatureInstance from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole -from modules.datamodels.datamodelRbac import AccessRuleContext +from modules.datamodels.datamodelRbac import AccessRuleContext, Role from modules.datamodels.datamodelUam import Mandate +from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceFeatures import getFeatureInterface from modules.security.rbacCatalog import getCatalogService @@ -38,7 +35,15 @@ router = APIRouter( class StoreActivateRequest(BaseModel): """Request model for activating a store feature.""" - featureCode: str = Field(..., description="Feature code to activate (e.g., 'automation')") + featureCode: str = Field(..., description="Feature code to activate") + mandateId: Optional[str] = Field(None, description="Target mandate ID (explicit). If None and user has no admin mandate, auto-creates personal mandate.") + + +class StoreDeactivateRequest(BaseModel): + """Request model for deactivating a store feature.""" + featureCode: str = Field(..., description="Feature code to deactivate") + mandateId: str = Field(..., description="Mandate ID") + instanceId: str = Field(..., description="FeatureInstance ID to deactivate") class StoreFeatureResponse(BaseModel): @@ -47,21 +52,12 @@ class StoreFeatureResponse(BaseModel): label: Dict[str, str] icon: str description: Dict[str, str] = {} - isActive: bool + instances: List[Dict[str, Any]] = [] canActivate: bool - instanceId: str | None = None - - -def _getRootMandateId(db) -> str | None: - """Find the root mandate ID.""" - mandates = db.getRecordset(Mandate, recordFilter={"name": "root", "isSystem": True}) - if mandates: - return mandates[0].get("id") - return None def _getStoreFeatures(catalogService) -> List[Dict[str, Any]]: - """Get all features that are available in the store (have resource.store.* entries).""" + """Get all features available in the store.""" resourceObjects = catalogService.getResourceObjects() storeFeatures = [] for obj in resourceObjects: @@ -75,75 +71,133 @@ def _getStoreFeatures(catalogService) -> List[Dict[str, Any]]: return storeFeatures -def _checkStorePermission(context: RequestContext, featureCode: str) -> bool: - """Check if user has RBAC permission to activate a store feature.""" - if context.hasSysAdminRole: - return True - - resourceItem = f"resource.store.{featureCode}" - dbApp = getRootDbAppConnector() - rbacInstance = RbacClass(dbApp, dbApp=dbApp) - permissions = rbacInstance.getUserPermissions( - context.user, - AccessRuleContext.RESOURCE, - resourceItem, - mandateId=str(context.mandateId) if context.mandateId else None, - ) - return permissions.view +def _isUserAdminInMandate(db, userId: str, mandateId: str) -> bool: + """Check if user has admin role in a mandate.""" + userMandates = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mandateId, "enabled": True}) + if not userMandates: + return False + umId = userMandates[0].get("id") + umRoles = db.getRecordset(UserMandateRole, recordFilter={"userMandateId": umId}) + for umRole in umRoles: + roleId = umRole.get("roleId") + roles = db.getRecordset(Role, recordFilter={"id": roleId}) + for role in roles: + if "admin" in (role.get("roleLabel") or "").lower(): + return True + return False -def _findSharedInstance(db, rootMandateId: str, featureCode: str) -> Dict[str, Any] | None: - """Find the shared instance for a feature in the root mandate.""" - instances = db.getRecordset( - FeatureInstance, - recordFilter={"mandateId": rootMandateId, "featureCode": featureCode} - ) - return instances[0] if instances else None - - -def _getUserFeatureAccess(db, userId: str, instanceId: str) -> Dict[str, Any] | None: - """Check if user already has FeatureAccess for an instance.""" - accesses = db.getRecordset( - FeatureAccess, - recordFilter={"userId": userId, "featureInstanceId": instanceId} - ) - return accesses[0] if accesses else None - - -def _findStoreUserRoleId( - rootInterface, - catalogService, - instanceId: str, - featureCode: str, -) -> str | None: - """ - Resolve the feature's primary *user* role on this instance (e.g. workspace-user). - Uses catalog template labels first, then a safe fallback on instance roles. - """ - instanceRoles = rootInterface.getRolesByFeatureInstance(instanceId) - labelToId = {r.roleLabel: str(r.id) for r in instanceRoles if r.roleLabel} - - preferred = f"{featureCode}-user" - if preferred in labelToId: - return labelToId[preferred] - - for tpl in catalogService.getTemplateRoles(featureCode): - lbl = (tpl.get("roleLabel") or "").strip() - if not lbl: +def _getUserAdminMandateIds(db, userId: str) -> List[str]: + """Get all mandate IDs where user is admin.""" + userMandates = db.getRecordset(UserMandate, recordFilter={"userId": userId, "enabled": True}) + adminMandateIds = [] + for um in userMandates: + mandateId = um.get("mandateId") + mandate = db.getRecordset(Mandate, recordFilter={"id": mandateId}) + if mandate and mandate[0].get("isSystem"): continue - low = lbl.lower() - if "admin" in low: - continue - if lbl.endswith("-user") and lbl in labelToId: - return labelToId[lbl] + if _isUserAdminInMandate(db, userId, mandateId): + adminMandateIds.append(mandateId) + return adminMandateIds - for role in instanceRoles: - low = (role.roleLabel or "").lower() - if "admin" in low: - continue - if "user" in low: - return str(role.id) - return None + +def _getUserInstancesForFeature(db, userId: str, featureCode: str, mandateIds: List[str]) -> List[Dict[str, Any]]: + """Get user's active instances for a feature across their mandates.""" + instances = [] + for mandateId in mandateIds: + mandateInstances = db.getRecordset( + FeatureInstance, + recordFilter={"mandateId": mandateId, "featureCode": featureCode} + ) + for inst in mandateInstances: + instanceId = inst.get("id") + accesses = db.getRecordset( + FeatureAccess, + recordFilter={"userId": userId, "featureInstanceId": instanceId} + ) + if accesses: + mandate = db.getRecordset(Mandate, recordFilter={"id": mandateId}) + mandateName = mandate[0].get("label") or mandate[0].get("name") if mandate else mandateId + instances.append({ + "instanceId": instanceId, + "mandateId": mandateId, + "mandateName": mandateName, + "label": inst.get("label", ""), + "isActive": True, + }) + return instances + + +@router.get("/mandates", response_model=List[Dict[str, Any]]) +@limiter.limit("60/minute") +def listUserMandates( + request: Request, + context: RequestContext = Depends(getRequestContext) +) -> List[Dict[str, Any]]: + """List mandates where the user can activate features (admin mandates).""" + try: + rootInterface = getRootInterface() + db = rootInterface.db + userId = str(context.user.id) + adminMandateIds = _getUserAdminMandateIds(db, userId) + result = [] + for mid in adminMandateIds: + records = db.getRecordset(Mandate, recordFilter={"id": mid}) + if records: + m = records[0] + result.append({ + "id": mid, + "name": m.get("name", ""), + "label": m.get("label") or m.get("name", ""), + "mandateType": m.get("mandateType", "company"), + }) + return result + except Exception as e: + logger.error(f"Error listing user mandates: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@router.get("/subscription-info", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +def getSubscriptionInfo( + request: Request, + mandateId: str = None, + context: RequestContext = Depends(getRequestContext) +) -> Dict[str, Any]: + """Get subscription info for a mandate (plan, limits).""" + try: + rootInterface = getRootInterface() + db = rootInterface.db + userId = str(context.user.id) + + if not mandateId: + adminMandateIds = _getUserAdminMandateIds(db, userId) + if adminMandateIds: + mandateId = adminMandateIds[0] + + if not mandateId: + return {"plan": None, "maxDataVolumeMB": None, "maxFeatureInstances": None} + + from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS + subs = db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) + if not subs: + return {"plan": None, "maxDataVolumeMB": None, "maxFeatureInstances": None} + + sub = subs[0] + plan = BUILTIN_PLANS.get(sub.get("planKey")) + currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + + return { + "plan": sub.get("planKey"), + "status": sub.get("status"), + "maxDataVolumeMB": plan.maxDataVolumeMB if plan else None, + "maxFeatureInstances": plan.maxFeatureInstances if plan else None, + "currentFeatureInstances": len(currentInstances), + "trialEndsAt": sub.get("trialEndsAt"), + } + except Exception as e: + logger.error(f"Error getting subscription info: {e}") + return {"plan": None, "maxDataVolumeMB": None, "maxFeatureInstances": None} @router.get("/features", response_model=List[StoreFeatureResponse]) @@ -152,47 +206,33 @@ def listStoreFeatures( request: Request, context: RequestContext = Depends(getRequestContext) ) -> List[StoreFeatureResponse]: - """ - List all store features with activation status and permissions. - - Returns the store catalog showing which features are available, - which are already activated, and whether the user can activate them. - """ + """List all store features with activation status per mandate.""" try: rootInterface = getRootInterface() db = rootInterface.db catalogService = getCatalogService() + userId = str(context.user.id) - rootMandateId = _getRootMandateId(db) - if not rootMandateId: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Root mandate not found" - ) + userMandates = db.getRecordset(UserMandate, recordFilter={"userId": userId, "enabled": True}) + userMandateIds = [] + for um in userMandates: + mid = um.get("mandateId") + mRecord = db.getRecordset(Mandate, recordFilter={"id": mid}) + if mRecord and not mRecord[0].get("isSystem"): + userMandateIds.append(mid) storeFeatures = _getStoreFeatures(catalogService) - userId = str(context.user.id) result = [] for featureDef in storeFeatures: featureCode = featureDef["code"] - sharedInstance = _findSharedInstance(db, rootMandateId, featureCode) - instanceId = sharedInstance.get("id") if sharedInstance else None - - isActive = False - if instanceId: - existingAccess = _getUserFeatureAccess(db, userId, instanceId) - isActive = existingAccess is not None - - canActivate = _checkStorePermission(context, featureCode) and not isActive - + instances = _getUserInstancesForFeature(db, userId, featureCode, userMandateIds) result.append(StoreFeatureResponse( featureCode=featureCode, label=featureDef.get("label", {}), icon=featureDef.get("icon", "mdi-puzzle"), - isActive=isActive, - canActivate=canActivate, - instanceId=instanceId, + instances=instances, + canActivate=True, )) return result @@ -201,10 +241,7 @@ def listStoreFeatures( raise except Exception as e: logger.error(f"Error listing store features: {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Failed to list store features: {str(e)}" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) @router.post("/activate", response_model=Dict[str, Any]) @@ -215,10 +252,8 @@ def activateStoreFeature( context: RequestContext = Depends(getRequestContext) ) -> Dict[str, Any]: """ - Activate a store feature for the current user. - - Creates FeatureAccess + FeatureAccessRole on the shared instance - in the root mandate. The user gets the feature's user-level role. + Activate a store feature. Creates a new FeatureInstance in the target mandate. + If mandateId is None and user has no admin mandate, auto-creates a personal mandate. """ featureCode = data.featureCode userId = str(context.user.id) @@ -226,82 +261,94 @@ def activateStoreFeature( try: rootInterface = getRootInterface() db = rootInterface.db - - if not _checkStorePermission(context, featureCode): - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail=f"No permission to activate feature '{featureCode}'" - ) - catalogService = getCatalogService() + featureDef = catalogService.getFeatureDefinition(featureCode) if not featureDef: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Feature '{featureCode}' not found" - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Feature '{featureCode}' not found") - rootMandateId = _getRootMandateId(db) - if not rootMandateId: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Root mandate not found" - ) + mandateId = data.mandateId - sharedInstance = _findSharedInstance(db, rootMandateId, featureCode) - if not sharedInstance: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Shared instance for '{featureCode}' not found in root mandate" - ) + # Auto-create personal mandate if user has no admin mandates + if not mandateId: + adminMandateIds = _getUserAdminMandateIds(db, userId) + if not adminMandateIds: + provisionResult = rootInterface._provisionMandateForUser( + userId=userId, + mandateType="personal", + mandateName=context.user.fullName or context.user.username, + planKey="TRIAL_7D", + ) + mandateId = provisionResult["mandateId"] + logger.info(f"Auto-created personal mandate {mandateId} for user {userId} via store") + elif len(adminMandateIds) == 1: + mandateId = adminMandateIds[0] + else: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="mandateId is required when user has multiple admin mandates" + ) - instanceId = sharedInstance.get("id") + # Verify user is admin in target mandate + if not _isUserAdminInMandate(db, userId, mandateId): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Not admin in target mandate") - existingAccess = _getUserFeatureAccess(db, userId, instanceId) - if existingAccess: - raise HTTPException( - status_code=status.HTTP_409_CONFLICT, - detail=f"Feature '{featureCode}' is already active" - ) + # Check subscription capacity + from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS + subs = db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) + if subs: + sub = subs[0] + plan = BUILTIN_PLANS.get(sub.get("planKey")) + if plan and plan.maxFeatureInstances is not None: + currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + if len(currentInstances) >= plan.maxFeatureInstances: + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail=f"Feature instance limit reached ({plan.maxFeatureInstances}). Upgrade your plan." + ) - featureAccess = FeatureAccess( - userId=userId, - featureInstanceId=instanceId, - enabled=True + # Create new FeatureInstance + featureInterface = getFeatureInterface(db) + featureLabel = featureDef.get("label", {}).get("en", featureCode) + instance = featureInterface.createFeatureInstance( + featureCode=featureCode, + mandateId=mandateId, + label=featureLabel, + enabled=True, + copyTemplateRoles=True, ) - createdAccess = db.recordCreate(FeatureAccess, featureAccess.model_dump()) - featureAccessId = createdAccess.get("id") - userRoleId = _findStoreUserRoleId(rootInterface, catalogService, instanceId, featureCode) - if not userRoleId: - db.recordDelete(FeatureAccess, featureAccessId) - logger.error( - f"Store activate rollback: no user role on instance {instanceId} for feature '{featureCode}'" - ) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=( - f"No '{featureCode}-user' (or equivalent) role found on the shared instance; " - "cannot grant store access. Contact an administrator." - ), - ) + if not instance: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create feature instance") - featureAccessRole = FeatureAccessRole( - featureAccessId=featureAccessId, - roleId=userRoleId - ) - db.recordCreate(FeatureAccessRole, featureAccessRole.model_dump()) + instanceId = instance.get("id") if isinstance(instance, dict) else instance.id - logger.info( - f"User {userId} activated store feature '{featureCode}' " - f"(instance={instanceId}, role={userRoleId})" - ) + # Grant FeatureAccess with admin role + instanceRoles = db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) + adminRoleId = None + for ir in instanceRoles: + if "admin" in (ir.get("roleLabel") or "").lower(): + adminRoleId = ir.get("id") + break + + fa = FeatureAccess(userId=userId, featureInstanceId=instanceId, enabled=True) + createdFa = db.recordCreate(FeatureAccess, fa.model_dump()) + if adminRoleId and createdFa: + far = FeatureAccessRole(featureAccessId=createdFa["id"], roleId=adminRoleId) + db.recordCreate(FeatureAccessRole, far.model_dump()) + + # Sync subscription quantity + try: + rootInterface._syncSubscriptionQuantity(mandateId) + except Exception as e: + logger.warning(f"Failed to sync subscription quantity: {e}") + + logger.info(f"User {userId} activated '{featureCode}' in mandate {mandateId} (instance={instanceId})") return { "featureCode": featureCode, + "mandateId": mandateId, "instanceId": instanceId, - "featureAccessId": featureAccessId, - "roleId": userRoleId, "activated": True, } @@ -309,71 +356,67 @@ def activateStoreFeature( raise except Exception as e: logger.error(f"Error activating store feature '{featureCode}': {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Failed to activate feature: {str(e)}" - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) @router.post("/deactivate", response_model=Dict[str, Any]) @limiter.limit("10/minute") def deactivateStoreFeature( request: Request, - data: StoreActivateRequest, + data: StoreDeactivateRequest, context: RequestContext = Depends(getRequestContext) ) -> Dict[str, Any]: """ - Deactivate a store feature for the current user. - - Removes FeatureAccess (CASCADE deletes FeatureAccessRole). - User loses access immediately. + Deactivate a store feature. Removes user's FeatureAccess. + Orphan Control: if last user deactivates, FeatureInstance is deleted. """ - featureCode = data.featureCode userId = str(context.user.id) + instanceId = data.instanceId + mandateId = data.mandateId try: rootInterface = getRootInterface() db = rootInterface.db - rootMandateId = _getRootMandateId(db) - if not rootMandateId: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Root mandate not found" - ) + # Verify instance exists in mandate + instances = db.getRecordset(FeatureInstance, recordFilter={"id": instanceId, "mandateId": mandateId}) + if not instances: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Feature instance not found in mandate") - sharedInstance = _findSharedInstance(db, rootMandateId, featureCode) - if not sharedInstance: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Shared instance for '{featureCode}' not found" - ) + # Find user's FeatureAccess + accesses = db.getRecordset(FeatureAccess, recordFilter={"userId": userId, "featureInstanceId": instanceId}) + if not accesses: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No active access found") - instanceId = sharedInstance.get("id") - - existingAccess = _getUserFeatureAccess(db, userId, instanceId) - if not existingAccess: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Feature '{featureCode}' is not active" - ) - - featureAccessId = existingAccess.get("id") + featureAccessId = accesses[0].get("id") db.recordDelete(FeatureAccess, featureAccessId) - logger.info(f"User {userId} deactivated store feature '{featureCode}' (instance={instanceId})") + # Orphan Control: check if any FeatureAccess remains + remainingAccesses = db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instanceId}) + instanceDeleted = False + if not remainingAccesses: + db.recordDelete(FeatureInstance, instanceId) + instanceDeleted = True + logger.info(f"Orphan Control: deleted instance {instanceId} (no remaining accesses)") + + # Sync subscription quantity + try: + rootInterface._syncSubscriptionQuantity(mandateId) + except Exception as e: + logger.warning(f"Failed to sync subscription quantity: {e}") + + logger.info(f"User {userId} deactivated instance {instanceId} in mandate {mandateId} (deleted={instanceDeleted})") return { - "featureCode": featureCode, + "featureCode": data.featureCode, + "mandateId": mandateId, "instanceId": instanceId, "deactivated": True, + "instanceDeleted": instanceDeleted, } except HTTPException: raise except Exception as e: - logger.error(f"Error deactivating store feature '{featureCode}': {e}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Failed to deactivate feature: {str(e)}" - ) + logger.error(f"Error deactivating store feature: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 78c69ff3..cbea5631 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -363,6 +363,7 @@ class AgentService: featureInstanceId=featureInstanceId, mandateId=mandateId, workflowHintItems=workflowHintItems, + isSysAdmin=getattr(self.services.user, "isSysAdmin", False), ) except Exception as e: logger.debug(f"RAG context not available: {e}") diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index 09e2d708..494389ff 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -153,6 +153,9 @@ class AiService: 2. Balance & provider check before AI call 3. billingCallback on aiObjects: records one billing transaction per model call with exact provider + model name (set before AI call, invoked by _callWithModel) + + NEUTRALIZATION: If enabled, prompt text is neutralized before the AI call + and placeholders in the response are rehydrated afterwards. """ await self.ensureAiObjectsInitialized() @@ -172,6 +175,11 @@ class AiService: request.options = request.options.model_copy(update={'allowedProviders': effectiveProviders}) logger.debug(f"Effective allowedProviders for AI request: {effectiveProviders}") + # Neutralize prompt if enabled (before AI call) + _wasNeutralized = False + if self._shouldNeutralize(request): + request, _wasNeutralized = self._neutralizeRequest(request) + # Set billing callback on aiObjects BEFORE the AI call # This callback is invoked by _callWithModel() after EVERY individual model call # For parallel content parts (e.g., 200 MB doc), each model call creates its own transaction @@ -187,10 +195,18 @@ class AiService: finally: self.aiObjects.billingCallback = None + # Rehydrate neutralization placeholders in response + if _wasNeutralized and response and hasattr(response, 'content') and response.content: + response.content = self._rehydrateResponse(response.content) + return response async def callAiStream(self, request: AiCallRequest): - """Streaming variant of callAi. Yields str deltas during generation, then final AiCallResponse.""" + """Streaming variant of callAi. Yields str deltas during generation, then final AiCallResponse. + + NEUTRALIZATION: If enabled, prompt text is neutralized before streaming. + Rehydration happens on the final AiCallResponse (not on individual str deltas). + """ await self.ensureAiObjectsInitialized() self._preflightBillingCheck() await self._checkBillingBeforeAiCall() @@ -199,9 +215,17 @@ class AiService: if effectiveProviders and request.options: request.options = request.options.model_copy(update={'allowedProviders': effectiveProviders}) + # Neutralize prompt if enabled (before streaming) + _wasNeutralized = False + if self._shouldNeutralize(request): + request, _wasNeutralized = self._neutralizeRequest(request) + self.aiObjects.billingCallback = self._createBillingCallback() try: async for chunk in self.aiObjects.callWithTextContextStream(request): + # Rehydrate the final AiCallResponse (non-str chunks are the final response) + if _wasNeutralized and not isinstance(chunk, str) and hasattr(chunk, 'content') and chunk.content: + chunk.content = self._rehydrateResponse(chunk.content) yield chunk finally: self.aiObjects.billingCallback = None @@ -511,6 +535,60 @@ detectedIntent-Werte: return basePrompt + # ========================================================================= + # NEUTRALIZATION: Centralized prompt neutralization / response rehydration + # ========================================================================= + + def _shouldNeutralize(self, request: AiCallRequest) -> bool: + """Check if this AI request should have neutralization applied. + Only applies to text prompts — not embeddings or image processing.""" + try: + neutralSvc = self._get_service("neutralization") + if not neutralSvc: + return False + config = neutralSvc.getConfig() if hasattr(neutralSvc, 'getConfig') else None + if not config or not getattr(config, 'enabled', False): + return False + if not request.prompt and not request.messages: + return False + return True + except Exception: + return False + + def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool]: + """Neutralize the prompt text in an AiCallRequest. + Returns (modifiedRequest, wasNeutralized).""" + try: + neutralSvc = self._get_service("neutralization") + if not neutralSvc or not hasattr(neutralSvc, 'processText'): + return request, False + + if request.prompt: + result = neutralSvc.processText(request.prompt) + if result and result.get("neutralized_text"): + request.prompt = result["neutralized_text"] + logger.debug("Neutralized prompt in AiCallRequest") + return request, True + + return request, False + except Exception as e: + logger.warning(f"Request neutralization failed: {e}") + return request, False + + def _rehydrateResponse(self, responseText: str) -> str: + """Replace neutralization placeholders with original values in AI response.""" + if not responseText: + return responseText + try: + neutralSvc = self._get_service("neutralization") + if not neutralSvc or not hasattr(neutralSvc, 'resolveText'): + return responseText + resolved = neutralSvc.resolveText(responseText) + return resolved if resolved else responseText + except Exception as e: + logger.warning(f"Response rehydration failed: {e}") + return responseText + def _preflightBillingCheck(self) -> None: """ Pre-flight billing validation - like a 0 CHF credit card authorization check. diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index d6943c58..77e8530e 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -110,6 +110,49 @@ class KnowledgeService: # 2. Chunk text content objects and create embeddings textObjects = [o for o in contentObjects if o.get("contentType") == "text"] + + # Check if file requires neutralization + _shouldNeutralize = False + try: + from modules.datamodels.datamodelFiles import FileItem as _FileItem + _dbComponent = getattr(self._context, 'interfaceDbComponent', None) + _fileRecords = _dbComponent.getRecordset(_FileItem, recordFilter={"id": fileId}) if _dbComponent else [] + if _fileRecords: + _fileRecord = _fileRecords[0] + _shouldNeutralize = ( + _fileRecord.get("neutralize", False) if isinstance(_fileRecord, dict) + else getattr(_fileRecord, "neutralize", False) + ) + except Exception: + pass + + if _shouldNeutralize and textObjects: + _neutralizedObjects = [] + try: + _neutralSvc = self._getService("neutralization") + except Exception: + _neutralSvc = None + if _neutralSvc: + for _obj in textObjects: + _textContent = (_obj.get("data", "") or "").strip() + if not _textContent: + continue + try: + _neutralResult = _neutralSvc.processText( + _textContent, userId=userId, featureInstanceId=featureInstanceId + ) + if _neutralResult and _neutralResult.get("neutralized_text"): + _obj["data"] = _neutralResult["neutralized_text"] + _neutralizedObjects.append(_obj) + else: + logger.warning(f"Neutralization failed for file {fileId}, skipping text object (fail-safe)") + except Exception as e: + logger.warning(f"Neutralization error for file {fileId}: {e}, skipping text object (fail-safe)") + textObjects = _neutralizedObjects + else: + logger.warning(f"Neutralization required for file {fileId} but service unavailable, skipping text indexing") + textObjects = [] + if textObjects: self._knowledgeDb.updateFileStatus(fileId, "embedding") chunks = _chunkForEmbedding(textObjects, maxTokens=DEFAULT_CHUNK_TOKENS) @@ -155,6 +198,12 @@ class KnowledgeService: self._knowledgeDb.updateFileStatus(fileId, "indexed") index.status = "indexed" + if _shouldNeutralize: + try: + index.neutralizationStatus = "completed" + self._knowledgeDb.upsertFileContentIndex(index) + except Exception as e: + logger.debug(f"Could not set neutralizationStatus for file {fileId}: {e}") logger.info(f"Indexed file {fileId} ({fileName}): {len(contentObjects)} objects, {len(textObjects)} text chunks") return index @@ -171,6 +220,7 @@ class KnowledgeService: mandateId: str = "", contextBudget: int = DEFAULT_CONTEXT_BUDGET, workflowHintItems: List[Dict[str, Any]] = None, + isSysAdmin: bool = False, ) -> str: """Build RAG context for an agent round by searching all layers. @@ -217,13 +267,15 @@ class KnowledgeService: maxChars=2000, ) - # Layer 1: Instance Layer (user's own documents, highest priority) + # Layer 1: Scope-based document search (personal + instance + mandate + global) instanceChunks = self._knowledgeDb.semanticSearch( queryVector=queryVector, userId=userId, featureInstanceId=featureInstanceId, + mandateId=mandateId, limit=15, minScore=0.65, + isSysAdmin=isSysAdmin, ) if instanceChunks: builder.add(priority=1, label="Relevant Documents", items=instanceChunks, maxChars=4000) @@ -271,6 +323,7 @@ class KnowledgeService: isShared=True, limit=10, minScore=0.7, + isSysAdmin=isSysAdmin, ) if sharedChunks: builder.add(priority=4, label="Shared Knowledge", items=sharedChunks, maxChars=2000) diff --git a/modules/workflows/methods/methodContext/actions/neutralizeData.py b/modules/workflows/methods/methodContext/actions/neutralizeData.py index d5ec045b..a1fc6b91 100644 --- a/modules/workflows/methods/methodContext/actions/neutralizeData.py +++ b/modules/workflows/methods/methodContext/actions/neutralizeData.py @@ -172,13 +172,13 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult: ) neutralizedParts.append(neutralizedPart) else: - # Neutralization failed, use original part - logger.warning(f"Neutralization did not return neutralized_text for part {part.id}") - neutralizedParts.append(part) + # Fail-Safe: neutralization incomplete, skip this part + logger.warning(f"Fail-Safe: Neutralization incomplete for part {part.id}, SKIPPING (not passing original)") + continue except Exception as e: - logger.error(f"Error neutralizing part {part.id}: {str(e)}") - # On error, use original part - neutralizedParts.append(part) + logger.error(f"Fail-Safe: Error neutralizing part {part.id}, SKIPPING document (not passing original): {str(e)}") + # Fail-Safe: do NOT pass original data to AI + continue else: # No data to neutralize, keep original part neutralizedParts.append(part) diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py index b9b64a9a..de332c31 100644 --- a/modules/workflows/workflowManager.py +++ b/modules/workflows/workflowManager.py @@ -351,7 +351,13 @@ class WorkflowManager: if documents: for i, doc in enumerate(documents, 1): docListText += f"\n{i}. {doc.fileName} ({doc.mimeType}, {doc.fileSize} bytes)" - + + _userId = getattr(getattr(self.services, 'user', None), 'id', '') or '' + _featureInstanceId = getattr(self.services, 'featureInstanceId', '') or '' + _promptForAnalysis, _wasNeutralized, _mappingId = await self._neutralizePromptIfRequired( + userPrompt, userId=_userId, featureInstanceId=_featureInstanceId + ) + analysisPrompt = f"""You are an input analyzer. From the user's message, perform ALL of the following in one pass: 1. detectedLanguage: Detect ISO 639-1 language code (e.g., de, en, fr, it) @@ -401,7 +407,7 @@ Return ONLY JSON (no markdown) with this exact structure: The following is the user's original input message. Analyze intent, normalize the request, and determine complexity: ################ USER INPUT START ################# -{userPrompt.replace('{', '{{').replace('}', '}}') if userPrompt else ''} +{_promptForAnalysis.replace('{', '{{').replace('}', '}}') if _promptForAnalysis else ''} ################ USER INPUT FINISH ################# """ @@ -419,6 +425,12 @@ The following is the user's original input message. Analyze intent, normalize th jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0 if jsonStart != -1 and jsonEnd > jsonStart: result = json.loads(aiResponse[jsonStart:jsonEnd]) + if _wasNeutralized: + for _field in ('normalizedRequest', 'intent', 'workflowName'): + if _field in result and result[_field]: + result[_field] = await self._rehydrateResponseIfNeeded( + result[_field], True, userId=_userId, featureInstanceId=_featureInstanceId + ) return result else: logger.warning("Could not parse combined analysis response, using defaults") @@ -1353,6 +1365,38 @@ The following is the user's original input message. Analyze intent, normalize th """Set user language for the service center""" self.services.user.language = language + async def _neutralizePromptIfRequired(self, prompt: str, userId: str, featureInstanceId: str) -> tuple: + """Neutralize prompt text if the workflow context requires it. + Returns (processedPrompt, wasNeutralized, mappingId).""" + try: + _neutralSvc = getattr(self.services, 'neutralization', None) + if not _neutralSvc: + return prompt, False, None + _config = _neutralSvc.getConfig() if hasattr(_neutralSvc, 'getConfig') else None + if not _config or not getattr(_config, 'enabled', False): + return prompt, False, None + _result = _neutralSvc.processText(prompt, userId=userId, featureInstanceId=featureInstanceId) + if _result and _result.get("neutralized_text"): + return _result["neutralized_text"], True, _result.get("mappingId") + return prompt, False, None + except Exception as e: + logger.warning(f"Prompt neutralization failed: {e}") + return prompt, False, None + + async def _rehydrateResponseIfNeeded(self, response: str, wasNeutralized: bool, userId: str, featureInstanceId: str) -> str: + """Replace placeholders in AI response with original values.""" + if not wasNeutralized or not response: + return response + try: + _neutralSvc = getattr(self.services, 'neutralization', None) + if not _neutralSvc: + return response + _rehydrated = _neutralSvc.resolveText(response, userId=userId, featureInstanceId=featureInstanceId) + return _rehydrated if _rehydrated else response + except Exception as e: + logger.warning(f"Response re-hydration failed: {e}") + return response + async def _neutralizeContentIfEnabled(self, contentBytes: bytes, mimeType: str) -> bytes: """Neutralize content if neutralization is enabled in user settings""" try: diff --git a/tests/test_phase123_basic.py b/tests/test_phase123_basic.py new file mode 100644 index 00000000..18c4188f --- /dev/null +++ b/tests/test_phase123_basic.py @@ -0,0 +1,323 @@ +""" +Basic verification tests for Phase 1-3 implementation. +Run with: python tests/test_phase123_basic.py +Requires: gateway running on localhost:8000 +""" +import sys +import os +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +print("=" * 60) +print("PHASE 1-3 BASIC VERIFICATION") +print("=" * 60) + +errors = [] +passes = [] + +def _check(label, condition, detail=""): + if condition: + passes.append(label) + print(f" [PASS] {label}") + else: + errors.append(f"{label}: {detail}") + print(f" [FAIL] {label} — {detail}") + +# ── Phase 1: Data Models ────────────────────────────────────────────────────── +print("\n--- Phase 1: Data Models ---") + +try: + from modules.datamodels.datamodelUam import Mandate, MandateType + _check("MandateType Enum exists", hasattr(MandateType, "SYSTEM")) + _check("MandateType values", set(MandateType) == {MandateType.SYSTEM, MandateType.PERSONAL, MandateType.COMPANY}) + m = Mandate(name="test", label="test", mandateType="personal") + _check("Mandate has mandateType field", hasattr(m, "mandateType")) + _check("Mandate mandateType coercion", m.mandateType == MandateType.PERSONAL) +except Exception as e: + errors.append(f"Phase 1 DataModel: {e}") + print(f" [FAIL] Phase 1 DataModel import: {e}") + +try: + from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, BUILTIN_PLANS, SubscriptionPlan + _check("PENDING status exists", hasattr(SubscriptionStatusEnum, "PENDING")) + _check("BUILTIN_PLANS has TRIAL_7D", "TRIAL_7D" in BUILTIN_PLANS) + trial = BUILTIN_PLANS["TRIAL_7D"] + _check("TRIAL_7D has maxDataVolumeMB", hasattr(trial, "maxDataVolumeMB")) + _check("TRIAL_7D maxDataVolumeMB=500", trial.maxDataVolumeMB == 500) +except Exception as e: + errors.append(f"Phase 1 Subscription: {e}") + print(f" [FAIL] Phase 1 Subscription: {e}") + +# ── Phase 2: Scope Fields ───────────────────────────────────────────────────── +print("\n--- Phase 2: Scope Fields on Models ---") + +try: + from modules.datamodels.datamodelFiles import FileItem + fi = FileItem(fileName="test.txt", mimeType="text/plain", fileHash="abc", fileSize=100) + _check("FileItem has scope field", hasattr(fi, "scope")) + _check("FileItem scope default=personal", fi.scope == "personal") + _check("FileItem has neutralize field", hasattr(fi, "neutralize")) + _check("FileItem neutralize default=False", fi.neutralize == False) +except Exception as e: + errors.append(f"Phase 2 FileItem: {e}") + print(f" [FAIL] Phase 2 FileItem: {e}") + +try: + from modules.datamodels.datamodelDataSource import DataSource + ds = DataSource(connectionId="c1", sourceType="sharepoint", path="/test", label="Test") + _check("DataSource has scope field", hasattr(ds, "scope")) + _check("DataSource scope default=personal", ds.scope == "personal") + _check("DataSource has neutralize field", hasattr(ds, "neutralize")) + _check("DataSource neutralize default=False", ds.neutralize == False) +except Exception as e: + errors.append(f"Phase 2 DataSource: {e}") + print(f" [FAIL] Phase 2 DataSource: {e}") + +try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + fci = FileContentIndex(userId="u1", fileName="test.txt", mimeType="text/plain") + _check("FileContentIndex has scope field", hasattr(fci, "scope")) + _check("FileContentIndex scope default=personal", fci.scope == "personal") + _check("FileContentIndex has neutralizationStatus", hasattr(fci, "neutralizationStatus")) + _check("FileContentIndex neutralizationStatus default=None", fci.neutralizationStatus is None) +except Exception as e: + errors.append(f"Phase 2 FileContentIndex: {e}") + print(f" [FAIL] Phase 2 FileContentIndex: {e}") + +# ── Phase 2: RAG Scope Filtering ────────────────────────────────────────────── +print("\n--- Phase 2: RAG Scope Logic ---") + +try: + from modules.interfaces.interfaceDbKnowledge import KnowledgeObjects + _check("KnowledgeObjects has _getScopedFileIds", hasattr(KnowledgeObjects, "_getScopedFileIds")) + _check("KnowledgeObjects has _buildScopeFilter", hasattr(KnowledgeObjects, "_buildScopeFilter")) + + import inspect + sig = inspect.signature(KnowledgeObjects._getScopedFileIds) + params = list(sig.parameters.keys()) + _check("_getScopedFileIds has isSysAdmin param", "isSysAdmin" in params) + + sig2 = inspect.signature(KnowledgeObjects.semanticSearch) + params2 = list(sig2.parameters.keys()) + _check("semanticSearch has scope param", "scope" in params2) + _check("semanticSearch has isSysAdmin param", "isSysAdmin" in params2) +except Exception as e: + errors.append(f"Phase 2 RAG: {e}") + print(f" [FAIL] Phase 2 RAG: {e}") + +# ── Phase 3: Neutralization Methods ─────────────────────────────────────────── +print("\n--- Phase 3: Neutralization Integration ---") + +try: + from modules.workflows.workflowManager import WorkflowManager + _check("WorkflowManager has _neutralizePromptIfRequired", hasattr(WorkflowManager, "_neutralizePromptIfRequired")) + _check("WorkflowManager has _rehydrateResponseIfNeeded", hasattr(WorkflowManager, "_rehydrateResponseIfNeeded")) + + import inspect + sig_n = inspect.signature(WorkflowManager._neutralizePromptIfRequired) + _check("_neutralizePromptIfRequired is async", inspect.iscoroutinefunction(WorkflowManager._neutralizePromptIfRequired)) + + sig_r = inspect.signature(WorkflowManager._rehydrateResponseIfNeeded) + _check("_rehydrateResponseIfNeeded is async", inspect.iscoroutinefunction(WorkflowManager._rehydrateResponseIfNeeded)) +except Exception as e: + errors.append(f"Phase 3 WorkflowManager: {e}") + print(f" [FAIL] Phase 3 WorkflowManager: {e}") + +# ── Phase 3: Fail-Safe Logic ────────────────────────────────────────────────── +print("\n--- Phase 3: Fail-Safe Logic ---") + +try: + import ast + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "workflows", "methods", "methodContext", "actions", "neutralizeData.py"), "r") as f: + source = f.read() + _check("neutralizeData.py has 'SKIPPING' fail-safe", "SKIPPING" in source) + _check("neutralizeData.py has 'do NOT pass original' comment", "do NOT pass original" in source.lower() or "not passing original" in source.lower()) + _check("neutralizeData.py uses continue for skip", "continue" in source) +except Exception as e: + errors.append(f"Phase 3 Fail-Safe: {e}") + print(f" [FAIL] Phase 3 Fail-Safe: {e}") + +# ── Phase 2: Route Endpoints ────────────────────────────────────────────────── +print("\n--- Phase 2: API Endpoints ---") + +try: + import ast + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "routes", "routeDataFiles.py"), "r") as f: + source = f.read() + _check("routeDataFiles has PATCH scope endpoint", "updateFileScope" in source) + _check("routeDataFiles has PATCH neutralize endpoint", "updateFileNeutralize" in source) + _check("routeDataFiles checks global sysAdmin", "hasSysAdminRole" in source or "sysadmin" in source.lower()) +except Exception as e: + errors.append(f"Phase 2 Routes: {e}") + print(f" [FAIL] Phase 2 Routes: {e}") + +# ── Phase 1: Store Endpoints ────────────────────────────────────────────────── +print("\n--- Phase 1: Store Endpoints ---") + +try: + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "routes", "routeStore.py"), "r") as f: + source = f.read() + _check("routeStore has listUserMandates", "listUserMandates" in source or "list_user_mandates" in source) + _check("routeStore has getSubscriptionInfo", "getSubscriptionInfo" in source or "get_subscription_info" in source) + _check("routeStore has orphan control", "orphan" in source.lower() or "last" in source.lower()) +except Exception as e: + errors.append(f"Phase 1 Store: {e}") + print(f" [FAIL] Phase 1 Store: {e}") + +# ── Phase 1: Provisioning ───────────────────────────────────────────────────── +print("\n--- Phase 1: Provisioning ---") + +try: + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "interfaces", "interfaceDbApp.py"), "r") as f: + source = f.read() + _check("interfaceDbApp has _provisionMandateForUser", "_provisionMandateForUser" in source) + _check("interfaceDbApp has _activatePendingSubscriptions", "_activatePendingSubscriptions" in source) + _check("interfaceDbApp has deleteMandate cascade", "deleteMandate" in source and "cascade" in source.lower()) +except Exception as e: + errors.append(f"Phase 1 Provisioning: {e}") + print(f" [FAIL] Phase 1 Provisioning: {e}") + +# ── Phase 1: Registration Routes ────────────────────────────────────────────── +print("\n--- Phase 1: Registration ---") + +try: + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "routes", "routeSecurityLocal.py"), "r") as f: + source = f.read() + _check("routeSecurityLocal has registrationType", "registrationType" in source) + _check("routeSecurityLocal has companyName", "companyName" in source) + _check("routeSecurityLocal has onboarding endpoint", "onboarding" in source) +except Exception as e: + errors.append(f"Phase 1 Registration: {e}") + print(f" [FAIL] Phase 1 Registration: {e}") + +# ── Phase 1: Migration ──────────────────────────────────────────────────────── +print("\n--- Phase 1: Migration ---") + +try: + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "migration", "migrateRootUsers.py"), "r") as f: + source = f.read() + _check("Migration script exists", True) + _check("Migration has _isMigrationCompleted", "_isMigrationCompleted" in source) + _check("Migration has migrateRootUsers", "migrateRootUsers" in source) +except Exception as e: + errors.append(f"Phase 1 Migration: {e}") + print(f" [FAIL] Phase 1 Migration: {e}") + +# ── Fix 1: OnboardingWizard Integration ──────────────────────────────────────── +print("\n--- Fix 1: OnboardingWizard Integration ---") + +try: + loginPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "..", "frontend_nyla", "src", "pages", "Login.tsx") + with open(loginPath, "r", encoding="utf-8") as f: + source = f.read() + _check("Login.tsx imports OnboardingWizard", "OnboardingWizard" in source) + _check("Login.tsx has showOnboardingWizard state", "showOnboardingWizard" in source) + _check("Login.tsx checks isNewUser", "isNewUser" in source) +except Exception as e: + errors.append(f"Fix 1: {e}") + print(f" [FAIL] Fix 1: {e}") + +# ── Fix 2: CommCoach UDB Integration ────────────────────────────────────────── +print("\n--- Fix 2: CommCoach UDB Integration ---") + +try: + dossierPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "..", "frontend_nyla", "src", "pages", "views", "commcoach", "CommcoachDossierView.tsx") + with open(dossierPath, "r", encoding="utf-8") as f: + source = f.read() + _check("CommCoach imports UnifiedDataBar", "UnifiedDataBar" in source) + _check("CommCoach imports FilesTab", "FilesTab" in source) + _check("CommCoach no longer imports getDocumentsApi", "getDocumentsApi" not in source) + _check("CommCoach has UDB sidebar", "udbSidebar" in source or "UnifiedDataBar" in source) +except Exception as e: + errors.append(f"Fix 2: {e}") + print(f" [FAIL] Fix 2: {e}") + +# ── Fix 3: Neutralization Backend Endpoints ─────────────────────────────────── +print("\n--- Fix 3: Neutralization Backend Endpoints ---") + +try: + routePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "features", "neutralization", "routeFeatureNeutralizer.py") + with open(routePath, "r") as f: + source = f.read() + _check("Neutralization has deleteAttribute endpoint", "deleteAttribute" in source or "delete_attribute" in source) + _check("Neutralization has retrigger endpoint", "retrigger" in source) + _check("Neutralization has single attribute delete", "single" in source or "attributeId" in source) +except Exception as e: + errors.append(f"Fix 3: {e}") + print(f" [FAIL] Fix 3: {e}") + +# ── Fix 4: Central AI Neutralization ────────────────────────────────────────── +print("\n--- Fix 4: Central AI Neutralization ---") + +try: + aiPath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "serviceCenter", "services", "serviceAi", "mainServiceAi.py") + with open(aiPath, "r") as f: + source = f.read() + _check("AiService has _shouldNeutralize", "_shouldNeutralize" in source) + _check("AiService has _neutralizeRequest", "_neutralizeRequest" in source) + _check("AiService has _rehydrateResponse", "_rehydrateResponse" in source) + _check("callAi uses neutralization", "_shouldNeutralize" in source and "_neutralizeRequest" in source) +except Exception as e: + errors.append(f"Fix 4: {e}") + print(f" [FAIL] Fix 4: {e}") + +# ── Fix 5: Voice Settings User Level ────────────────────────────────────────── +print("\n--- Fix 5: Voice Settings User Level ---") + +try: + from modules.datamodels.datamodelUam import UserVoicePreferences + uvp = UserVoicePreferences(userId="u1") + _check("UserVoicePreferences model exists", True) + _check("UserVoicePreferences has sttLanguage", hasattr(uvp, "sttLanguage")) + _check("UserVoicePreferences default sttLanguage=de-DE", uvp.sttLanguage == "de-DE") + _check("UserVoicePreferences has ttsVoice", hasattr(uvp, "ttsVoice")) +except Exception as e: + errors.append(f"Fix 5: {e}") + print(f" [FAIL] Fix 5: {e}") + +try: + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "routes", "routeSecurityLocal.py"), "r") as f: + source = f.read() + _check("Voice preferences GET endpoint", "voice-preferences" in source and "getVoicePreferences" in source) + _check("Voice preferences PUT endpoint", "updateVoicePreferences" in source) +except Exception as e: + errors.append(f"Fix 5 Routes: {e}") + print(f" [FAIL] Fix 5 Routes: {e}") + +# ── Fix 6: RAG mandate-wide scope ───────────────────────────────────────────── +print("\n--- Fix 6: RAG mandate-wide scope ---") + +try: + knowledgePath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "serviceCenter", "services", "serviceKnowledge", "mainServiceKnowledge.py") + with open(knowledgePath, "r") as f: + source = f.read() + _check("buildAgentContext passes mandateId to semanticSearch", "mandateId=mandateId" in source) + _check("buildAgentContext has isSysAdmin param", "isSysAdmin" in source) +except Exception as e: + errors.append(f"Fix 6: {e}") + print(f" [FAIL] Fix 6: {e}") + +# ── Summary ─────────────────────────────────────────────────────────────────── +print("\n" + "=" * 60) +print(f"RESULTS: {len(passes)} passed, {len(errors)} failed") +print("=" * 60) + +if errors: + print("\nFAILURES:") + for e in errors: + print(f" - {e}") + sys.exit(1) +else: + print("\nALL CHECKS PASSED!") + sys.exit(0) From 96c94fae570b2be13421f9b03819d58a0ed3f34b Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 24 Mar 2026 14:17:18 +0100 Subject: [PATCH 02/33] unified data - step 1 --- .../workspace/interfaceFeatureWorkspace.py | 1 - .../workspace/routeFeatureWorkspace.py | 131 ------------------ 2 files changed, 132 deletions(-) diff --git a/modules/features/workspace/interfaceFeatureWorkspace.py b/modules/features/workspace/interfaceFeatureWorkspace.py index 56016ba2..525ac62e 100644 --- a/modules/features/workspace/interfaceFeatureWorkspace.py +++ b/modules/features/workspace/interfaceFeatureWorkspace.py @@ -14,7 +14,6 @@ from modules.features.workspace.datamodelFeatureWorkspace import WorkspaceUserSe from modules.interfaces.interfaceRbac import getRecordsetWithRBAC from modules.security.rbac import RbacClass from modules.shared.configuration import APP_CONFIG -from modules.shared.timeUtils import getUtcTimestamp logger = logging.getLogger(__name__) diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 6b8c529b..1828cba6 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -1582,137 +1582,6 @@ async def synthesizeVoice( return JSONResponse({"audio": None, "note": "TTS via browser Speech Synthesis API recommended"}) -# ========================================================================= -# Voice Settings Endpoints -# ========================================================================= - -@router.get("/{instanceId}/settings/voice") -@limiter.limit("120/minute") -async def getVoiceSettings( - request: Request, - instanceId: str = Path(...), - context: RequestContext = Depends(getRequestContext), -): - """Load voice settings for the current user and instance.""" - _validateInstanceAccess(instanceId, context) - wsInterface = _getWorkspaceInterface(context, instanceId) - userId = str(context.user.id) - try: - vs = wsInterface.getVoiceSettings(userId) - if not vs: - logger.info(f"GET voice settings: not found for user={userId}, creating defaults") - vs = wsInterface.getOrCreateVoiceSettings(userId) - result = vs.model_dump() if vs else {} - mapKeys = list(result.get("ttsVoiceMap", {}).keys()) if result else [] - logger.info(f"GET voice settings for user={userId}: ttsVoiceMap languages={mapKeys}") - return JSONResponse(result) - except Exception as e: - logger.error(f"Failed to load voice settings for user={userId}: {e}", exc_info=True) - return JSONResponse({"ttsVoiceMap": {}}, status_code=200) - - -@router.put("/{instanceId}/settings/voice") -@limiter.limit("120/minute") -async def updateVoiceSettings( - request: Request, - instanceId: str = Path(...), - body: dict = Body(...), - context: RequestContext = Depends(getRequestContext), -): - """Update voice settings for the current user and instance.""" - _validateInstanceAccess(instanceId, context) - wsInterface = _getWorkspaceInterface(context, instanceId) - userId = str(context.user.id) - - try: - logger.info(f"PUT voice settings for user={userId}, instance={instanceId}, body keys={list(body.keys())}") - vs = wsInterface.getVoiceSettings(userId) - if not vs: - logger.info(f"No existing voice settings, creating new for user={userId}") - createData = { - "userId": userId, - "mandateId": str(context.mandateId) if context.mandateId else "", - "featureInstanceId": instanceId, - } - createData.update(body) - created = wsInterface.createVoiceSettings(createData) - logger.info(f"Created voice settings for user={userId}, ttsVoiceMap keys={list((created or {}).get('ttsVoiceMap', {}).keys())}") - return JSONResponse(created) - - updateData = {k: v for k, v in body.items() if k not in ("id", "userId", "mandateId", "featureInstanceId", "creationDate")} - logger.info(f"Updating voice settings for user={userId}, update keys={list(updateData.keys())}") - updated = wsInterface.updateVoiceSettings(userId, updateData) - logger.info(f"Updated voice settings for user={userId}, ttsVoiceMap keys={list((updated or {}).get('ttsVoiceMap', {}).keys())}") - return JSONResponse(updated) - except Exception as e: - logger.error(f"Failed to update voice settings for user={userId}: {e}", exc_info=True) - return JSONResponse({"error": str(e)}, status_code=500) - - -@router.get("/{instanceId}/voice/languages") -@limiter.limit("120/minute") -async def getVoiceLanguages( - request: Request, - instanceId: str = Path(...), - context: RequestContext = Depends(getRequestContext), -): - """Return available TTS languages.""" - mandateId, _ = _validateInstanceAccess(instanceId, context) - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - languagesResult = await voiceInterface.getAvailableLanguages() - languageList = languagesResult.get("languages", []) if isinstance(languagesResult, dict) else languagesResult - return JSONResponse({"languages": languageList}) - - -@router.get("/{instanceId}/voice/voices") -@limiter.limit("120/minute") -async def getVoiceVoices( - request: Request, - instanceId: str = Path(...), - language: str = Query("de-DE"), - context: RequestContext = Depends(getRequestContext), -): - """Return available TTS voices for a given language.""" - mandateId, _ = _validateInstanceAccess(instanceId, context) - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - voicesResult = await voiceInterface.getAvailableVoices(language) - voiceList = voicesResult.get("voices", []) if isinstance(voicesResult, dict) else voicesResult - return JSONResponse({"voices": voiceList}) - - -@router.post("/{instanceId}/voice/test") -@limiter.limit("30/minute") -async def testVoice( - request: Request, - instanceId: str = Path(...), - body: dict = Body(...), - context: RequestContext = Depends(getRequestContext), -): - """Test a specific voice with a sample text.""" - import base64 - mandateId, _ = _validateInstanceAccess(instanceId, context) - text = body.get("text", "Hallo, das ist ein Stimmtest.") - language = body.get("language", "de-DE") - voiceId = body.get("voiceId") - - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - - try: - result = await voiceInterface.textToSpeech(text=text, languageCode=language, voiceName=voiceId) - if result and isinstance(result, dict): - audioContent = result.get("audioContent") - if audioContent: - audioB64 = base64.b64encode( - audioContent if isinstance(audioContent, bytes) else audioContent.encode() - ).decode() - return JSONResponse({"success": True, "audio": audioB64, "format": "mp3", "text": text}) - return JSONResponse({"success": False, "error": "TTS returned no audio"}) - except Exception as e: - logger.error(f"Voice test failed: {e}") - raise HTTPException(status_code=500, detail=f"TTS test failed: {str(e)}") # ============================================================================= From b33444e8919af3971371231712354f251a277b21 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 24 Mar 2026 16:39:25 +0100 Subject: [PATCH 03/33] unified data completed implementation --- .../features/commcoach/datamodelCommcoach.py | 24 -- .../commcoach/interfaceFeatureCommcoach.py | 28 -- modules/features/commcoach/mainCommcoach.py | 7 +- .../commcoach/routeFeatureCommcoach.py | 274 +-------------- .../features/commcoach/serviceCommcoach.py | 215 ++++++++---- .../serviceCommcoachContextRetrieval.py | 50 ++- .../commcoach/tests/test_datamodel.py | 1 - modules/interfaces/interfaceBootstrap.py | 7 + modules/interfaces/interfaceVoiceObjects.py | 119 ------- modules/migration/migrateVoiceAndDocuments.py | 316 ++++++++++++++++++ modules/routes/routeSecurityLocal.py | 134 +++++--- modules/routes/routeVoiceGoogle.py | 139 +++----- .../services/serviceAgent/mainServiceAgent.py | 84 ++--- .../services/serviceAi/mainServiceAi.py | 31 +- 14 files changed, 698 insertions(+), 731 deletions(-) create mode 100644 modules/migration/migrateVoiceAndDocuments.py diff --git a/modules/features/commcoach/datamodelCommcoach.py b/modules/features/commcoach/datamodelCommcoach.py index 090640c6..bd94f173 100644 --- a/modules/features/commcoach/datamodelCommcoach.py +++ b/modules/features/commcoach/datamodelCommcoach.py @@ -170,8 +170,6 @@ class CoachingUserProfile(BaseModel): userId: str = Field(description="Owner user ID") mandateId: str = Field(description="Mandate ID") instanceId: str = Field(description="Feature instance ID") - preferredLanguage: str = Field(default="de-DE") - preferredVoice: Optional[str] = Field(default=None, description="Google TTS voice name") dailyReminderTime: Optional[str] = Field(default=None, description="HH:MM format") dailyReminderEnabled: bool = Field(default=False) emailSummaryEnabled: bool = Field(default=True) @@ -205,26 +203,6 @@ class CoachingPersona(BaseModel): updatedAt: Optional[str] = Field(default=None) -# ============================================================================ -# Iteration 2: Documents -# ============================================================================ - -class CoachingDocument(BaseModel): - """A document attached to a coaching context.""" - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - contextId: str = Field(description="FK to CoachingContext") - userId: str = Field(description="Owner user ID") - mandateId: str = Field(description="Mandate ID") - instanceId: Optional[str] = Field(default=None) - fileName: str = Field(description="Original file name") - mimeType: str = Field(default="application/octet-stream") - fileSize: int = Field(default=0) - extractedText: Optional[str] = Field(default=None, description="Text content extracted from file") - summary: Optional[str] = Field(default=None, description="AI-generated summary") - fileRef: Optional[str] = Field(default=None, description="Reference to file in storage") - createdAt: Optional[str] = Field(default=None) - - # ============================================================================ # Iteration 2: Badges / Gamification # ============================================================================ @@ -282,8 +260,6 @@ class UpdateTaskStatusRequest(BaseModel): class UpdateProfileRequest(BaseModel): - preferredLanguage: Optional[str] = None - preferredVoice: Optional[str] = None dailyReminderTime: Optional[str] = None dailyReminderEnabled: Optional[bool] = None emailSummaryEnabled: Optional[bool] = None diff --git a/modules/features/commcoach/interfaceFeatureCommcoach.py b/modules/features/commcoach/interfaceFeatureCommcoach.py index e612c6ba..825fca5d 100644 --- a/modules/features/commcoach/interfaceFeatureCommcoach.py +++ b/modules/features/commcoach/interfaceFeatureCommcoach.py @@ -269,34 +269,6 @@ class CommcoachObjects: from .datamodelCommcoach import CoachingPersona return self.db.recordDelete(CoachingPersona, personaId) - # ========================================================================= - # Documents - # ========================================================================= - - def getDocuments(self, contextId: str, userId: str) -> List[Dict[str, Any]]: - from .datamodelCommcoach import CoachingDocument - records = self.db.getRecordset(CoachingDocument, recordFilter={"contextId": contextId, "userId": userId}) - records.sort(key=lambda r: r.get("createdAt") or "", reverse=True) - return records - - def getDocument(self, documentId: str) -> Optional[Dict[str, Any]]: - from .datamodelCommcoach import CoachingDocument - records = self.db.getRecordset(CoachingDocument, recordFilter={"id": documentId}) - return records[0] if records else None - - def createDocument(self, data: Dict[str, Any]) -> Dict[str, Any]: - from .datamodelCommcoach import CoachingDocument - data["createdAt"] = getIsoTimestamp() - return self.db.recordCreate(CoachingDocument, data) - - def updateDocument(self, documentId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: - from .datamodelCommcoach import CoachingDocument - return self.db.recordModify(CoachingDocument, documentId, updates) - - def deleteDocument(self, documentId: str) -> bool: - from .datamodelCommcoach import CoachingDocument - return self.db.recordDelete(CoachingDocument, documentId) - # ========================================================================= # Badges # ========================================================================= diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py index 69ac6b1c..e8abcee8 100644 --- a/modules/features/commcoach/mainCommcoach.py +++ b/modules/features/commcoach/mainCommcoach.py @@ -61,18 +61,13 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.commcoach.CoachingUserProfile", "label": {"en": "User Profile", "de": "Benutzerprofil", "fr": "Profil utilisateur"}, - "meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "preferredLanguage"]} + "meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "dailyReminderEnabled"]} }, { "objectKey": "data.feature.commcoach.CoachingPersona", "label": {"en": "Coaching Persona", "de": "Coaching-Persona", "fr": "Persona coaching"}, "meta": {"table": "CoachingPersona", "fields": ["id", "key", "label", "gender"]} }, - { - "objectKey": "data.feature.commcoach.CoachingDocument", - "label": {"en": "Coaching Document", "de": "Coaching-Dokument", "fr": "Document coaching"}, - "meta": {"table": "CoachingDocument", "fields": ["id", "contextId", "fileName"]} - }, { "objectKey": "data.feature.commcoach.CoachingBadge", "label": {"en": "Coaching Badge", "de": "Coaching-Auszeichnung", "fr": "Badge coaching"}, diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py index 9074d2ba..6d6eb44f 100644 --- a/modules/features/commcoach/routeFeatureCommcoach.py +++ b/modules/features/commcoach/routeFeatureCommcoach.py @@ -26,7 +26,7 @@ from .datamodelCommcoach import ( CoachingContext, CoachingContextStatus, CoachingSession, CoachingSessionStatus, CoachingMessage, CoachingMessageRole, CoachingMessageContentType, CoachingTask, CoachingTaskStatus, - CoachingPersona, CoachingDocument, CoachingBadge, + CoachingPersona, CoachingBadge, CreateContextRequest, UpdateContextRequest, SendMessageRequest, CreateTaskRequest, UpdateTaskRequest, UpdateTaskStatusRequest, UpdateProfileRequest, @@ -334,9 +334,8 @@ async def startSession( try: from modules.interfaces.interfaceVoiceObjects import getVoiceInterface voiceInterface = getVoiceInterface(context.user, mandateId) - profile = interface.getProfile(userId, instanceId) - language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE" - voiceName = profile.get("preferredVoice") if profile else None + from .serviceCommcoach import _getUserVoicePrefs + language, voiceName = _getUserVoicePrefs(userId, mandateId) from .serviceCommcoach import _stripMarkdownForTts ttsResult = await voiceInterface.textToSpeech( text=_stripMarkdownForTts(greetingText), @@ -574,8 +573,8 @@ async def sendAudioStream( if not audioBody: raise HTTPException(status_code=400, detail="No audio data received") - profile = interface.getProfile(str(context.user.id), instanceId) - language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE" + from .serviceCommcoach import _getUserVoicePrefs + language, _ = _getUserVoicePrefs(str(context.user.id), mandateId) contextId = session.get("contextId") service = CommcoachService(context.user, mandateId, instanceId) @@ -839,73 +838,6 @@ async def updateProfile( return {"profile": updated} -# ========================================================================= -# Voice Endpoints -# ========================================================================= - -@router.get("/{instanceId}/voice/languages") -@limiter.limit("30/minute") -async def getVoiceLanguages( - request: Request, - instanceId: str, - context: RequestContext = Depends(getRequestContext), -): - mandateId = _validateInstanceAccess(instanceId, context) - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - languagesResult = await voiceInterface.getAvailableLanguages() - languageList = languagesResult.get("languages", []) if isinstance(languagesResult, dict) else languagesResult - return {"languages": languageList} - - -@router.get("/{instanceId}/voice/voices") -@limiter.limit("30/minute") -async def getVoiceVoices( - request: Request, - instanceId: str, - language: str = "de-DE", - context: RequestContext = Depends(getRequestContext), -): - mandateId = _validateInstanceAccess(instanceId, context) - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - voicesResult = await voiceInterface.getAvailableVoices(language) - voiceList = voicesResult.get("voices", []) if isinstance(voicesResult, dict) else voicesResult - return {"voices": voiceList} - - -@router.post("/{instanceId}/voice/tts") -@limiter.limit("10/minute") -async def testVoice( - request: Request, - instanceId: str, - context: RequestContext = Depends(getRequestContext), -): - """TTS preview / voice test.""" - mandateId = _validateInstanceAccess(instanceId, context) - body = await request.json() - text = body.get("text", "Hallo, ich bin dein Coaching-Assistent.") - language = body.get("language", "de-DE") - voiceId = body.get("voiceId") - - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - - try: - result = await voiceInterface.textToSpeech(text=text, languageCode=language, voiceName=voiceId) - if result and isinstance(result, dict): - audioContent = result.get("audioContent") - if audioContent: - audioB64 = base64.b64encode( - audioContent if isinstance(audioContent, bytes) else audioContent.encode() - ).decode() - return {"success": True, "audio": audioB64, "format": "mp3", "text": text} - return {"success": False, "error": "TTS returned no audio"} - except Exception as e: - logger.error(f"Voice test failed: {e}") - raise HTTPException(status_code=500, detail=f"TTS test failed: {str(e)}") - - # ========================================================================= # Export Endpoints (Iteration 2) # ========================================================================= @@ -1074,202 +1006,6 @@ async def deletePersonaRoute( return {"deleted": True} -# ========================================================================= -# Document Endpoints (Iteration 2) -# ========================================================================= - -@router.get("/{instanceId}/contexts/{contextId}/documents") -@limiter.limit("60/minute") -async def listDocuments( - request: Request, - instanceId: str, - contextId: str, - context: RequestContext = Depends(getRequestContext), -): - _validateInstanceAccess(instanceId, context) - interface = _getInterface(context, instanceId) - userId = str(context.user.id) - docs = interface.getDocuments(contextId, userId) - return {"documents": docs} - - -@router.post("/{instanceId}/contexts/{contextId}/documents") -@limiter.limit("10/minute") -async def uploadDocument( - request: Request, - instanceId: str, - contextId: str, - context: RequestContext = Depends(getRequestContext), -): - """Upload a document and bind it to a context. Stores file in Management DB.""" - mandateId = _validateInstanceAccess(instanceId, context) - interface = _getInterface(context, instanceId) - userId = str(context.user.id) - - ctx = interface.getContext(contextId) - if not ctx: - raise HTTPException(status_code=404, detail="Context not found") - _validateOwnership(ctx, context) - - form = await request.form() - file = form.get("file") - if not file or not hasattr(file, "read"): - raise HTTPException(status_code=400, detail="No file uploaded") - - content = await file.read() - fileName = getattr(file, "filename", "document") - mimeType = getattr(file, "content_type", "application/octet-stream") - fileSize = len(content) - - if not content: - raise HTTPException(status_code=400, detail="Leere Datei hochgeladen") - - import modules.interfaces.interfaceDbManagement as interfaceDbManagement - mgmtInterface = interfaceDbManagement.getInterface(currentUser=context.user) - fileItem, _dupType = mgmtInterface.saveUploadedFile(content, fileName) - fileRef = fileItem.id - - extractedText = _extractText(content, mimeType, fileName) - summary = None - if extractedText and len(extractedText.strip()) > 50: - try: - from .serviceCommcoach import CommcoachService - service = CommcoachService(context.user, mandateId, instanceId) - aiResp = await service._callAi( - "Du fasst Dokumente in 2-3 Saetzen zusammen.", - f"Fasse folgendes Dokument zusammen:\n\n{extractedText[:3000]}" - ) - if aiResp and aiResp.errorCount == 0 and aiResp.content: - summary = aiResp.content.strip() - except Exception as e: - logger.warning(f"Document summary failed: {e}") - - docData = CoachingDocument( - contextId=contextId, - userId=userId, - mandateId=mandateId, - instanceId=instanceId, - fileName=fileName, - mimeType=mimeType, - fileSize=fileSize, - extractedText=extractedText[:10000] if extractedText else None, - summary=summary, - fileRef=fileRef, - ).model_dump() - created = interface.createDocument(docData) - return {"document": created} - - -@router.delete("/{instanceId}/documents/{documentId}") -@limiter.limit("10/minute") -async def deleteDocumentRoute( - request: Request, - instanceId: str, - documentId: str, - context: RequestContext = Depends(getRequestContext), -): - mandateId = _validateInstanceAccess(instanceId, context) - interface = _getInterface(context, instanceId) - - doc = interface.getDocument(documentId) - if not doc: - raise HTTPException(status_code=404, detail="Document not found") - _validateOwnership(doc, context) - - fileRef = doc.get("fileRef") - if fileRef: - try: - import modules.interfaces.interfaceDbManagement as interfaceDbManagement - mgmtInterface = interfaceDbManagement.getInterface( - currentUser=context.user, mandateId=mandateId, featureInstanceId=instanceId - ) - mgmtInterface.deleteFile(fileRef) - except Exception as e: - logger.warning(f"Failed to delete file {fileRef}: {e}") - - interface.deleteDocument(documentId) - return {"deleted": True} - - -def _extractText(content: bytes, mimeType: str, fileName: str) -> Optional[str]: - """Extract text from uploaded file content (TXT, MD, HTML, PDF, DOCX, XLSX, PPTX).""" - import io - - lowerName = fileName.lower() - try: - if mimeType in ("text/plain",) or lowerName.endswith(".txt"): - return content.decode("utf-8", errors="replace") - - if mimeType in ("text/markdown",) or lowerName.endswith(".md"): - return content.decode("utf-8", errors="replace") - - if mimeType in ("text/html",) or lowerName.endswith((".html", ".htm")): - from html.parser import HTMLParser - class _Strip(HTMLParser): - def __init__(self): - super().__init__() - self._parts: list[str] = [] - def handle_data(self, d): - self._parts.append(d) - def result(self): - return " ".join(self._parts) - parser = _Strip() - parser.feed(content.decode("utf-8", errors="replace")) - return parser.result() - - if "pdf" in mimeType or lowerName.endswith(".pdf"): - try: - from PyPDF2 import PdfReader - reader = PdfReader(io.BytesIO(content)) - return "".join(page.extract_text() or "" for page in reader.pages) - except ImportError: - logger.warning("PyPDF2 not installed, cannot extract PDF text") - return None - - if "wordprocessingml" in mimeType or lowerName.endswith(".docx"): - try: - from docx import Document - doc = Document(io.BytesIO(content)) - return "\n".join(p.text for p in doc.paragraphs if p.text) - except ImportError: - logger.warning("python-docx not installed, cannot extract DOCX text") - return None - - if "spreadsheetml" in mimeType or lowerName.endswith(".xlsx"): - try: - from openpyxl import load_workbook - wb = load_workbook(io.BytesIO(content), read_only=True, data_only=True) - parts: list[str] = [] - for ws in wb.worksheets: - for row in ws.iter_rows(values_only=True): - cells = [str(c) for c in row if c is not None] - if cells: - parts.append("\t".join(cells)) - return "\n".join(parts) - except ImportError: - logger.warning("openpyxl not installed, cannot extract XLSX text") - return None - - if "presentationml" in mimeType or lowerName.endswith(".pptx"): - try: - from pptx import Presentation - prs = Presentation(io.BytesIO(content)) - parts = [] - for slide in prs.slides: - for shape in slide.shapes: - if shape.has_text_frame: - parts.append(shape.text_frame.text) - return "\n".join(parts) - except ImportError: - logger.warning("python-pptx not installed, cannot extract PPTX text") - return None - - logger.info(f"No text extractor for {fileName} (mime={mimeType})") - except Exception as e: - logger.warning(f"Text extraction failed for {fileName}: {e}") - return None - - # ========================================================================= # Badge + Score History Endpoints (Iteration 2) # ========================================================================= diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py index bf5ec281..36fc6e16 100644 --- a/modules/features/commcoach/serviceCommcoach.py +++ b/modules/features/commcoach/serviceCommcoach.py @@ -42,6 +42,30 @@ from .serviceCommcoachContextRetrieval import ( logger = logging.getLogger(__name__) +def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple: + """Load voice language and voiceName from central UserVoicePreferences. + Returns (language, voiceName) tuple.""" + try: + from modules.datamodels.datamodelUam import UserVoicePreferences + from modules.security.rootAccess import getRootInterface + rootIf = getRootInterface() + prefs = rootIf.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId} + ) + if not prefs and mandateId: + prefs = rootIf.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId} + ) + if prefs: + p = prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() + return (p.get("ttsLanguage") or p.get("sttLanguage") or "de-DE", p.get("ttsVoice")) + except Exception as e: + logger.warning(f"Failed to load UserVoicePreferences for user={userId}: {e}") + return ("de-DE", None) + + def _stripMarkdownForTts(text: str) -> str: """Strip markdown formatting so TTS reads clean speech text.""" t = text @@ -159,9 +183,7 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand from modules.interfaces.interfaceVoiceObjects import getVoiceInterface import base64 voiceInterface = getVoiceInterface(currentUser, mandateId) - profile = interface.getProfile(str(currentUser.id), instanceId) - language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE" - voiceName = profile.get("preferredVoice") if profile else None + language, voiceName = _getUserVoicePrefs(str(currentUser.id), mandateId) ttsResult = await voiceInterface.textToSpeech( text=_stripMarkdownForTts(speechText), languageCode=language, @@ -196,60 +218,36 @@ def _resolveFileNameAndMime(title: str) -> tuple: async def _saveOrUpdateDocument(doc: Dict[str, Any], contextId: str, userId: str, mandateId: str, instanceId: str, interface, sessionId: str, user=None): - """Save a new document or update an existing one. Stores file in Management DB.""" - from .datamodelCommcoach import CoachingDocument + """Save a document as platform FileItem (no CoachingDocument).""" try: - docId = doc.get("id") title = doc.get("title", "Dokument") content = doc.get("content", "") contentBytes = content.encode("utf-8") fileName, mimeType = _resolveFileNameAndMime(title) - fileRef = None - try: - import modules.interfaces.interfaceDbManagement as interfaceDbManagement - mgmtInterface = interfaceDbManagement.getInterface( - currentUser=user, mandateId=mandateId, featureInstanceId=instanceId - ) - fileItem = mgmtInterface.createFile(name=fileName, mimeType=mimeType, content=contentBytes) - mgmtInterface.createFileData(fileItem.id, contentBytes) - fileRef = fileItem.id - except Exception as e: - logger.warning(f"Failed to store document in file DB: {e}") + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + mgmtInterface = interfaceDbManagement.getInterface( + currentUser=user, mandateId=mandateId, featureInstanceId=instanceId + ) + fileItem = mgmtInterface.createFile(name=fileName, mimeType=mimeType, content=contentBytes) + mgmtInterface.createFileData(fileItem.id, contentBytes) + + from modules.datamodels.datamodelFiles import FileItem as FileItemModel + mgmtInterface.db.recordModify(FileItemModel, fileItem.id, { + "scope": "featureInstance", + "featureInstanceId": instanceId, + "mandateId": mandateId, + }) + + await emitSessionEvent(sessionId, "documentCreated", { + "id": fileItem.id, "fileName": fileName, "fileSize": len(contentBytes), + }) + logger.info(f"Document saved as platform FileItem: {fileItem.id} ({title})") - if docId: - updates = { - "fileName": fileName, - "mimeType": mimeType, - "extractedText": content, - "summary": title, - "fileSize": len(contentBytes), - } - if fileRef: - updates["fileRef"] = fileRef - updated = interface.updateDocument(docId, updates) - if updated: - await emitSessionEvent(sessionId, "documentUpdated", updated) - logger.info(f"Document updated: {docId} ({title})") - else: - logger.warning(f"Document update failed, id not found: {docId}") - else: - docData = CoachingDocument( - contextId=contextId, - userId=userId, - mandateId=mandateId, - instanceId=instanceId, - fileName=fileName, - mimeType=mimeType, - fileSize=len(contentBytes), - extractedText=content, - summary=title, - fileRef=fileRef, - ).model_dump() - created = interface.createDocument(docData) - await emitSessionEvent(sessionId, "documentCreated", created) except Exception as e: - logger.warning(f"Failed to save/update document: {e}") + logger.warning(f"Failed to save document as FileItem: {e}") + + async def _resolveDocumentIntent(combinedUserPrompt: str, docs: List[Dict[str, Any]], callAiFn) -> Dict[str, Any]: @@ -269,17 +267,60 @@ async def _resolveDocumentIntent(combinedUserPrompt: str, docs: List[Dict[str, A return {"read": [], "update": [], "create": [], "noDocumentAction": True} -def _loadDocumentContents(docIds: List[str], interface) -> List[Dict[str, Any]]: - """Load full extractedText for the given document IDs.""" - results = [] - for docId in docIds[:DOC_INTENT_MAX_DOCS]: - doc = interface.getDocument(docId) - if doc and doc.get("extractedText"): - results.append({ - "id": doc.get("id", ""), - "title": doc.get("summary") or doc.get("fileName", ""), - "content": doc.get("extractedText", "")[:DOC_CONTENT_MAX_CHARS], +def _getPlatformFileList(mandateId: str = None, instanceId: str = None) -> List[Dict[str, Any]]: + """Get list of platform FileItems for this feature instance (for doc intent detection).""" + try: + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + from modules.datamodels.datamodelFiles import FileItem + mgmtIf = interfaceDbManagement.getInterface( + currentUser=None, mandateId=mandateId, featureInstanceId=instanceId + ) + records = mgmtIf.db.getRecordset( + FileItem, recordFilter={"featureInstanceId": instanceId} + ) if instanceId else [] + result = [] + for r in records: + d = r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else {} + result.append({ + "id": d.get("id", ""), + "fileName": d.get("fileName") or d.get("name") or "Dokument", + "summary": d.get("fileName") or "", }) + return result + except Exception as e: + logger.warning(f"Failed to load platform file list: {e}") + return [] + + +def _loadDocumentContents(docIds: List[str], interface, mandateId: str = None, instanceId: str = None) -> List[Dict[str, Any]]: + """Load file content for given IDs from platform FileItem store.""" + results = [] + try: + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + from modules.datamodels.datamodelFiles import FileItem + mgmtIf = interfaceDbManagement.getInterface( + currentUser=None, mandateId=mandateId, featureInstanceId=instanceId + ) + for fId in docIds[:DOC_INTENT_MAX_DOCS]: + fileRecords = mgmtIf.db.getRecordset(FileItem, recordFilter={"id": fId}) + if fileRecords: + f = fileRecords[0] if isinstance(fileRecords[0], dict) else fileRecords[0].model_dump() + content = "" + try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + idxRecords = mgmtIf.db.getRecordset(FileContentIndex, recordFilter={"fileId": fId}) + if idxRecords: + idx = idxRecords[0] if isinstance(idxRecords[0], dict) else idxRecords[0].model_dump() + content = (idx.get("extractedText") or "")[:DOC_CONTENT_MAX_CHARS] + except Exception: + pass + results.append({ + "id": fId, + "title": f.get("fileName") or f.get("name") or "Dokument", + "content": content, + }) + except Exception as e: + logger.warning(f"Failed to load document contents from platform: {e}") return results @@ -319,20 +360,42 @@ def _resolvePersona(session: Optional[Dict[str, Any]], interface) -> Optional[Di return None -def _getDocumentSummaries(contextId: str, userId: str, interface) -> Optional[List[str]]: - """Get document summaries for context to include in the AI prompt.""" +def _getDocumentSummaries(contextId: str, userId: str, interface, + mandateId: str = None, instanceId: str = None) -> Optional[List[str]]: + """Get document summaries from platform FileItems (UDL) for the coaching instance.""" try: - docs = interface.getDocuments(contextId, userId) + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + from modules.datamodels.datamodelFiles import FileItem + mgmtIf = interfaceDbManagement.getInterface( + currentUser=None, mandateId=mandateId, featureInstanceId=instanceId + ) + files = mgmtIf.db.getRecordset( + FileItem, recordFilter={"featureInstanceId": instanceId} + ) if instanceId else [] summaries = [] - for doc in docs[:5]: - summary = doc.get("summary") - if summary: - summaries.append(f"[{doc.get('fileName', 'Dokument')}] {summary}") - elif doc.get("extractedText"): - summaries.append(f"[{doc.get('fileName', 'Dokument')}] {doc['extractedText'][:200]}...") + for f in files[:10]: + fData = f if isinstance(f, dict) else f.model_dump() if hasattr(f, "model_dump") else {} + name = fData.get("fileName") or fData.get("name") or "Dokument" + fId = fData.get("id") + snippet = None + if fId: + try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + idxRecords = mgmtIf.db.getRecordset( + FileContentIndex, recordFilter={"fileId": fId} + ) + if idxRecords: + idx = idxRecords[0] if isinstance(idxRecords[0], dict) else idxRecords[0].model_dump() + snippet = (idx.get("extractedText") or "")[:200] + except Exception: + pass + if snippet: + summaries.append(f"[{name}] {snippet}...") + else: + summaries.append(f"[{name}]") return summaries if summaries else None except Exception as e: - logger.warning(f"Failed to load document summaries for context {contextId}: {e}") + logger.warning(f"Failed to load platform file summaries for instance {instanceId}: {e}") return None @@ -427,18 +490,22 @@ class CommcoachService: ) persona = _resolvePersona(session, interface) - documentSummaries = _getDocumentSummaries(contextId, self.userId, interface) + documentSummaries = _getDocumentSummaries( + contextId, self.userId, interface, mandateId=self.mandateId, instanceId=self.instanceId + ) # Document intent detection (pre-AI-call) referencedDocumentContents = None - allDocs = interface.getDocuments(contextId, self.userId) if documentSummaries else [] + allDocs = _getPlatformFileList(self.mandateId, self.instanceId) if documentSummaries else [] if allDocs: await emitSessionEvent(sessionId, "status", {"label": "Dokumente werden geprueft..."}) docIntent = await _resolveDocumentIntent(combinedUserPrompt, allDocs, self._callAi) if not docIntent.get("noDocumentAction"): docIdsToLoad = list(set((docIntent.get("read") or []) + (docIntent.get("update") or []))) if docIdsToLoad: - referencedDocumentContents = _loadDocumentContents(docIdsToLoad, interface) + referencedDocumentContents = _loadDocumentContents( + docIdsToLoad, interface, mandateId=self.mandateId, instanceId=self.instanceId + ) systemPrompt = aiPrompts.buildCoachingSystemPrompt( context, @@ -536,7 +603,9 @@ class CommcoachService: session = interface.getSession(sessionId) persona = _resolvePersona(session, interface) - documentSummaries = _getDocumentSummaries(contextId, self.userId, interface) + documentSummaries = _getDocumentSummaries( + contextId, self.userId, interface, mandateId=self.mandateId, instanceId=self.instanceId + ) systemPrompt = aiPrompts.buildCoachingSystemPrompt( context, previousMessages, tasks, diff --git a/modules/features/commcoach/serviceCommcoachContextRetrieval.py b/modules/features/commcoach/serviceCommcoachContextRetrieval.py index d673b04a..f1ccb9a3 100644 --- a/modules/features/commcoach/serviceCommcoachContextRetrieval.py +++ b/modules/features/commcoach/serviceCommcoachContextRetrieval.py @@ -172,20 +172,48 @@ def searchSessionsByTopic( def searchSessionsByTopicRag( - sessions: List[Dict[str, Any]], query: str, - maxResults: int = TOPIC_SEARCH_MAX_RESULTS, - embeddingProvider: Optional[Any] = None, + userId: str, + instanceId: str, + mandateId: str = None, + queryVector: List[float] = None, ) -> List[Dict[str, Any]]: + """Search using platform RAG (semantic search across mandate-wide knowledge data). + + Requires a pre-computed queryVector (embedding). The caller is responsible + for generating the embedding via AiService.callEmbedding before invoking this. """ - Phase 7 RAG: Semantic search via embeddings. - When embeddingProvider is None, falls back to keyword search. - Future: Pass embeddingProvider that has embed(text) -> vector and similarity search. - """ - if embeddingProvider is None: - return searchSessionsByTopic(sessions, query, maxResults) - # TODO: When embedding API exists: embed query, embed session summaries, cosine similarity - return searchSessionsByTopic(sessions, query, maxResults) + if not queryVector: + logger.warning("searchSessionsByTopicRag called without queryVector, skipping RAG search") + return [] + try: + from modules.interfaces.interfaceDbKnowledge import getInterface as _getKnowledgeInterface + + knowledgeDb = _getKnowledgeInterface() + + results = knowledgeDb.semanticSearch( + queryVector=queryVector, + userId=userId, + featureInstanceId=instanceId, + mandateId=mandateId, + isSysAdmin=False, + limit=TOPIC_SEARCH_MAX_RESULTS, + ) + + formatted = [] + for r in (results or []): + rData = r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else {} + contextRef = rData.get("contextRef") or {} + formatted.append({ + "source": "rag", + "content": rData.get("data") or rData.get("summary") or "", + "fileName": contextRef.get("containerPath") or "RAG-Ergebnis", + "score": rData.get("_score") or 0, + }) + return formatted + except Exception as e: + logger.warning(f"RAG search failed for query '{query[:50]}': {e}") + return [] def buildSessionSummariesForPrompt( diff --git a/modules/features/commcoach/tests/test_datamodel.py b/modules/features/commcoach/tests/test_datamodel.py index fb39ba34..05d174c5 100644 --- a/modules/features/commcoach/tests/test_datamodel.py +++ b/modules/features/commcoach/tests/test_datamodel.py @@ -136,7 +136,6 @@ class TestCoachingUserProfile: profile = CoachingUserProfile( userId="u1", mandateId="m1", instanceId="i1", ) - assert profile.preferredLanguage == "de-DE" assert profile.dailyReminderEnabled is False assert profile.emailSummaryEnabled is True assert profile.streakDays == 0 diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 80607268..e2a0dfa4 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -108,6 +108,13 @@ def initBootstrap(db: DatabaseConnector) -> None: except Exception as e: logger.error(f"Root user migration failed: {e}") + # Run voice & documents migration (one-time, sets completion flag) + try: + from modules.migration.migrateVoiceAndDocuments import migrateVoiceAndDocuments + migrateVoiceAndDocuments(db) + except Exception as e: + logger.error(f"Voice & documents migration failed: {e}") + # After migration: root mandate is purely technical — no feature instances if not migrationDone and mandateId: initRootMandateFeatures(db, mandateId) diff --git a/modules/interfaces/interfaceVoiceObjects.py b/modules/interfaces/interfaceVoiceObjects.py index dc391bae..38807bac 100644 --- a/modules/interfaces/interfaceVoiceObjects.py +++ b/modules/interfaces/interfaceVoiceObjects.py @@ -11,9 +11,7 @@ import logging from typing import AsyncGenerator, Callable, Dict, Any, Optional, List from modules.connectors.connectorVoiceGoogle import ConnectorGoogleSpeech -from modules.datamodels.datamodelVoice import VoiceSettings from modules.datamodels.datamodelUam import User -from modules.shared.timeUtils import getUtcTimestamp logger = logging.getLogger(__name__) @@ -335,123 +333,6 @@ class VoiceObjects: "error": str(e) } - # Voice Settings Management - - def getVoiceSettings(self, userId: str) -> Optional[VoiceSettings]: - """ - Get voice settings for a user. - - Args: - userId: User ID to get settings for - - Returns: - VoiceSettings object or None if not found - """ - try: - # This would typically query the database - # For now, return None as this is handled by the database interface - logger.debug(f"Getting voice settings for user: {userId}") - return None - - except Exception as e: - logger.error(f"❌ Error getting voice settings: {e}") - return None - - def createVoiceSettings(self, settingsData: Dict[str, Any]) -> Optional[VoiceSettings]: - """ - Create new voice settings. - - Args: - settingsData: Dictionary containing voice settings data - - Returns: - Created VoiceSettings object or None if failed - """ - try: - logger.info(f"Creating voice settings: {settingsData}") - - # Ensure mandateId is set from context if not provided - if "mandateId" not in settingsData or not settingsData["mandateId"]: - if not self.mandateId: - raise ValueError("mandateId is required but not provided and context has no mandateId") - settingsData["mandateId"] = self.mandateId - - # Add timestamps - currentTime = getUtcTimestamp() - settingsData["creationDate"] = currentTime - settingsData["lastModified"] = currentTime - - # Create VoiceSettings object - voiceSettings = VoiceSettings(**settingsData) - - logger.info(f"✅ Voice settings created: {voiceSettings.id}") - return voiceSettings - - except Exception as e: - logger.error(f"❌ Error creating voice settings: {e}") - return None - - def updateVoiceSettings(self, userId: str, settingsData: Dict[str, Any]) -> Optional[VoiceSettings]: - """ - Update existing voice settings. - - Args: - userId: User ID to update settings for - settingsData: Dictionary containing updated voice settings data - - Returns: - Updated VoiceSettings object or None if failed - """ - try: - logger.info(f"Updating voice settings for user {userId}: {settingsData}") - - # Add last modified timestamp - settingsData["lastModified"] = getUtcTimestamp() - - # Create updated VoiceSettings object - voiceSettings = VoiceSettings(**settingsData) - - logger.info(f"✅ Voice settings updated: {voiceSettings.id}") - return voiceSettings - - except Exception as e: - logger.error(f"❌ Error updating voice settings: {e}") - return None - - def getOrCreateVoiceSettings(self, userId: str) -> Optional[VoiceSettings]: - """ - Get existing voice settings or create default ones. - - Args: - userId: User ID to get/create settings for - - Returns: - VoiceSettings object - """ - try: - # Try to get existing settings - existingSettings = self.getVoiceSettings(userId) - - if existingSettings: - return existingSettings - - # Create default settings if none exist - defaultSettings = { - "userId": userId, - "mandateId": self.mandateId, - "sttLanguage": "de-DE", - "ttsLanguage": "de-DE", - "ttsVoice": "de-DE-Wavenet-A", - "translationEnabled": True, - "targetLanguage": "en-US" - } - - return self.createVoiceSettings(defaultSettings) - - except Exception as e: - logger.error(f"❌ Error getting or creating voice settings: {e}") - return None - # Language and Voice Information async def getAvailableLanguages(self) -> Dict[str, Any]: diff --git a/modules/migration/migrateVoiceAndDocuments.py b/modules/migration/migrateVoiceAndDocuments.py new file mode 100644 index 00000000..0fc5ee02 --- /dev/null +++ b/modules/migration/migrateVoiceAndDocuments.py @@ -0,0 +1,316 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Migration: Voice settings consolidation and CoachingDocument scope-tagging. +Moves VoiceSettings (workspace DB) and CoachingUserProfile voice fields (commcoach DB) +into the unified UserVoicePreferences model, and tags CoachingDocument files with +featureInstance scope before deleting the legacy records. +Called once from bootstrap, sets a DB flag to prevent re-execution. +""" + +import logging +import uuid +from typing import Dict, List, Optional + +from modules.connectors.connectorDbPostgre import DatabaseConnector +from modules.shared.configuration import APP_CONFIG +from modules.datamodels.datamodelUam import UserVoicePreferences + +logger = logging.getLogger(__name__) + +_MIGRATION_FLAG_KEY = "migration_voice_documents_completed" + + +def _isMigrationCompleted(db) -> bool: + """Check if migration has already been executed.""" + try: + from modules.datamodels.datamodelUam import Mandate + records = db.getRecordset(Mandate, recordFilter={"name": _MIGRATION_FLAG_KEY}) + return len(records) > 0 + except Exception: + return False + + +def _setMigrationCompleted(db) -> None: + """Set flag that migration is completed (uses a settings-like record).""" + if _isMigrationCompleted(db): + return + try: + from modules.datamodels.datamodelUam import Mandate + flag = Mandate(name=_MIGRATION_FLAG_KEY, label="Migration completed", enabled=False, isSystem=True) + db.recordCreate(Mandate, flag) + logger.info("Migration flag set: voice & documents migration completed") + except Exception as e: + logger.error(f"Failed to set migration flag: {e}") + + +def _getRawRows(connector: DatabaseConnector, tableName: str, columns: List[str]) -> List[Dict]: + """Read all rows from a table via raw SQL. Returns empty list if table doesn't exist.""" + try: + connector._ensure_connection() + colList = ", ".join(f'"{c}"' for c in columns) + with connector.connection.cursor() as cur: + cur.execute( + "SELECT COUNT(*) FROM information_schema.tables " + "WHERE LOWER(table_name) = LOWER(%s) AND table_schema = 'public'", + (tableName,), + ) + if cur.fetchone()["count"] == 0: + logger.info(f"Table '{tableName}' does not exist, skipping") + return [] + cur.execute(f'SELECT {colList} FROM "{tableName}"') + return [dict(row) for row in cur.fetchall()] + except Exception as e: + logger.warning(f"Raw query on '{tableName}' failed: {e}") + try: + connector.connection.rollback() + except Exception: + pass + return [] + + +def _deleteRawRow(connector: DatabaseConnector, tableName: str, rowId: str) -> bool: + """Delete a single row by id via raw SQL.""" + try: + connector._ensure_connection() + with connector.connection.cursor() as cur: + cur.execute(f'DELETE FROM "{tableName}" WHERE "id" = %s', (rowId,)) + connector.connection.commit() + return True + except Exception as e: + logger.warning(f"Failed to delete row {rowId} from '{tableName}': {e}") + try: + connector.connection.rollback() + except Exception: + pass + return False + + +def _createDbConnector(dbName: str) -> Optional[DatabaseConnector]: + """Create a DatabaseConnector for a named database, returns None on failure.""" + try: + dbHost = APP_CONFIG.get("DB_HOST") + dbUser = APP_CONFIG.get("DB_USER") + dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") + dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) + return DatabaseConnector( + dbHost=dbHost, + dbDatabase=dbName, + dbUser=dbUser, + dbPassword=dbPassword, + dbPort=dbPort, + ) + except Exception as e: + logger.warning(f"Could not connect to database '{dbName}': {e}") + return None + + +# ─── Part A ─────────────────────────────────────────────────────────────────── + +def _migrateVoiceSettings(db, wsDb: DatabaseConnector, dryRun: bool, stats: Dict) -> None: + """Migrate VoiceSettings records from poweron_workspace into UserVoicePreferences.""" + rows = _getRawRows(wsDb, "VoiceSettings", [ + "id", "userId", "mandateId", "ttsVoiceMap", "sttLanguage", "ttsLanguage", "ttsVoice", + ]) + if not rows: + logger.info("Part A: No VoiceSettings records found, skipping") + return + + for row in rows: + userId = row.get("userId") + if not userId: + continue + + existing = db.getRecordset(UserVoicePreferences, recordFilter={"userId": userId}) + if existing: + stats["voiceSettingsSkipped"] += 1 + if not dryRun: + _deleteRawRow(wsDb, "VoiceSettings", row["id"]) + continue + + if dryRun: + logger.info(f"[DRY RUN] Would create UserVoicePreferences for user {userId} from VoiceSettings") + stats["voiceSettingsCreated"] += 1 + continue + + try: + import json + ttsVoiceMap = row.get("ttsVoiceMap") + if isinstance(ttsVoiceMap, str): + try: + ttsVoiceMap = json.loads(ttsVoiceMap) + except (json.JSONDecodeError, TypeError): + ttsVoiceMap = None + + prefs = UserVoicePreferences( + userId=userId, + mandateId=row.get("mandateId"), + ttsVoiceMap=ttsVoiceMap, + sttLanguage=row.get("sttLanguage", "de-DE"), + ttsLanguage=row.get("ttsLanguage", "de-DE"), + ttsVoice=row.get("ttsVoice"), + ) + db.recordCreate(UserVoicePreferences, prefs) + stats["voiceSettingsCreated"] += 1 + _deleteRawRow(wsDb, "VoiceSettings", row["id"]) + except Exception as e: + logger.error(f"Part A: Failed to migrate VoiceSettings {row['id']}: {e}") + stats["errors"] += 1 + + +# ─── Part B ─────────────────────────────────────────────────────────────────── + +def _migrateCoachingProfileVoice(db, ccDb: DatabaseConnector, dryRun: bool, stats: Dict) -> None: + """Migrate preferredLanguage/preferredVoice from CoachingUserProfile into UserVoicePreferences.""" + rows = _getRawRows(ccDb, "CoachingUserProfile", [ + "id", "userId", "mandateId", "preferredLanguage", "preferredVoice", + ]) + if not rows: + logger.info("Part B: No CoachingUserProfile records with voice data found, skipping") + return + + for row in rows: + userId = row.get("userId") + prefLang = row.get("preferredLanguage") + prefVoice = row.get("preferredVoice") + if not userId or (not prefLang and not prefVoice): + continue + + existing = db.getRecordset(UserVoicePreferences, recordFilter={"userId": userId}) + if existing: + stats["coachingProfileSkipped"] += 1 + continue + + if dryRun: + logger.info(f"[DRY RUN] Would create UserVoicePreferences for user {userId} from CoachingUserProfile") + stats["coachingProfileCreated"] += 1 + continue + + try: + prefs = UserVoicePreferences( + userId=userId, + mandateId=row.get("mandateId"), + sttLanguage=prefLang or "de-DE", + ttsLanguage=prefLang or "de-DE", + ttsVoice=prefVoice, + ) + db.recordCreate(UserVoicePreferences, prefs) + stats["coachingProfileCreated"] += 1 + except Exception as e: + logger.error(f"Part B: Failed to migrate CoachingUserProfile {row['id']}: {e}") + stats["errors"] += 1 + + +# ─── Part C ─────────────────────────────────────────────────────────────────── + +def _migrateCoachingDocuments(ccDb: DatabaseConnector, dryRun: bool, stats: Dict) -> None: + """Tag FileItem/FileContentIndex with featureInstance scope for each CoachingDocument.""" + from modules.datamodels.datamodelFiles import FileItem + from modules.datamodels.datamodelKnowledge import FileContentIndex + + rows = _getRawRows(ccDb, "CoachingDocument", [ + "id", "fileRef", "instanceId", + ]) + if not rows: + logger.info("Part C: No CoachingDocument records found, skipping") + return + + mgmtDb = _createDbConnector("poweron_management") + knowledgeDb = _createDbConnector("poweron_knowledge") + if not mgmtDb: + logger.error("Part C: Cannot connect to poweron_management, aborting document migration") + return + + for row in rows: + fileRef = row.get("fileRef") + instanceId = row.get("instanceId") + docId = row.get("id") + if not fileRef: + if not dryRun: + _deleteRawRow(ccDb, "CoachingDocument", docId) + continue + + if dryRun: + logger.info(f"[DRY RUN] Would tag FileItem {fileRef} with featureInstanceId={instanceId}") + stats["documentsTagged"] += 1 + continue + + try: + fileRecords = mgmtDb.getRecordset(FileItem, recordFilter={"id": fileRef}) + if fileRecords: + updateData = {"scope": "featureInstance"} + if instanceId: + updateData["featureInstanceId"] = instanceId + mgmtDb.recordModify(FileItem, fileRef, updateData) + stats["documentsTagged"] += 1 + else: + logger.warning(f"Part C: FileItem {fileRef} not found in management DB") + + if knowledgeDb: + fciRecords = knowledgeDb.getRecordset(FileContentIndex, recordFilter={"id": fileRef}) + if fciRecords: + fciUpdate = {"scope": "featureInstance"} + if instanceId: + fciUpdate["featureInstanceId"] = instanceId + knowledgeDb.recordModify(FileContentIndex, fileRef, fciUpdate) + + _deleteRawRow(ccDb, "CoachingDocument", docId) + except Exception as e: + logger.error(f"Part C: Failed to migrate CoachingDocument {docId}: {e}") + stats["errors"] += 1 + + +# ─── Main entry ─────────────────────────────────────────────────────────────── + +def migrateVoiceAndDocuments(db, dryRun: bool = False) -> dict: + """ + Migrate VoiceSettings + CoachingUserProfile voice fields into UserVoicePreferences, + and tag CoachingDocument files with featureInstance scope. + + Args: + db: Root database connector (poweron_app) + dryRun: If True, log actions without making changes + + Returns: + Summary dict with migration statistics + """ + if _isMigrationCompleted(db): + logger.info("Voice & documents migration already completed, skipping") + return {"status": "already_completed"} + + stats = { + "voiceSettingsCreated": 0, + "voiceSettingsSkipped": 0, + "coachingProfileCreated": 0, + "coachingProfileSkipped": 0, + "documentsTagged": 0, + "errors": 0, + "dryRun": dryRun, + } + + wsDb = _createDbConnector("poweron_workspace") + ccDb = _createDbConnector("poweron_commcoach") + + # Part A + if wsDb: + _migrateVoiceSettings(db, wsDb, dryRun, stats) + else: + logger.warning("Skipping Part A: poweron_workspace DB unavailable") + + # Part B + if ccDb: + _migrateCoachingProfileVoice(db, ccDb, dryRun, stats) + else: + logger.warning("Skipping Part B: poweron_commcoach DB unavailable") + + # Part C + if ccDb: + _migrateCoachingDocuments(ccDb, dryRun, stats) + else: + logger.warning("Skipping Part C: poweron_commcoach DB unavailable") + + if not dryRun: + _setMigrationCompleted(db) + + logger.info(f"Voice & documents migration completed: {stats}") + return {"status": "completed", **stats} diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index c1afb8ff..8b1d9e8e 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -4,7 +4,7 @@ Routes for local security and authentication. """ -from fastapi import APIRouter, HTTPException, status, Depends, Request, Response, Body +from fastapi import APIRouter, HTTPException, status, Depends, Request, Response, Body, Query from fastapi.security import OAuth2PasswordRequestForm import logging from typing import Dict, Any @@ -816,22 +816,19 @@ def getVoicePreferences( currentUser: User = Depends(getCurrentUser), ) -> Dict[str, Any]: """Get user's voice/language preferences (optionally scoped to mandate via header).""" - try: - rootInterface = getRootInterface() - from modules.datamodels.datamodelUam import UserVoicePreferences + rootInterface = getRootInterface() + from modules.datamodels.datamodelUam import UserVoicePreferences - mandateId = request.headers.get("X-Mandate-Id") or None + mandateId = request.headers.get("X-Mandate-Id") or None + userId = str(currentUser.id) - prefs = rootInterface.db.getRecordset( - UserVoicePreferences, - recordFilter={"userId": str(currentUser.id), "mandateId": mandateId} - ) - if prefs: - return prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() - return UserVoicePreferences(userId=str(currentUser.id), mandateId=mandateId).model_dump() - except Exception as e: - logger.error(f"Error getting voice preferences: {e}") - return {"sttLanguage": "de-DE", "ttsLanguage": "de-DE"} + prefs = rootInterface.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId} + ) + if prefs: + return prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() + return UserVoicePreferences(userId=userId, mandateId=mandateId).model_dump() @router.put("/voice-preferences") @@ -841,34 +838,87 @@ def updateVoicePreferences( preferences: Dict[str, Any] = Body(...), currentUser: User = Depends(getCurrentUser), ) -> Dict[str, Any]: - """Update user's voice/language preferences.""" - try: - rootInterface = getRootInterface() - from modules.datamodels.datamodelUam import UserVoicePreferences + """Update user's voice/language preferences (upsert).""" + rootInterface = getRootInterface() + from modules.datamodels.datamodelUam import UserVoicePreferences - mandateId = request.headers.get("X-Mandate-Id") or None - userId = str(currentUser.id) + mandateId = request.headers.get("X-Mandate-Id") or None + userId = str(currentUser.id) - existing = rootInterface.db.getRecordset( - UserVoicePreferences, - recordFilter={"userId": userId, "mandateId": mandateId} - ) + existing = rootInterface.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId} + ) - allowedFields = { - "sttLanguage", "ttsLanguage", "ttsVoice", "ttsVoiceMap", - "translationSourceLanguage", "translationTargetLanguage", - } - updateData = {k: v for k, v in preferences.items() if k in allowedFields} + allowedFields = { + "sttLanguage", "ttsLanguage", "ttsVoice", "ttsVoiceMap", + "translationSourceLanguage", "translationTargetLanguage", + } + updateData = {k: v for k, v in preferences.items() if k in allowedFields} - if existing: - existingRecord = existing[0] - existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id - rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData) - return {"message": "Updated", **updateData} - else: - newPrefs = UserVoicePreferences(userId=userId, mandateId=mandateId, **updateData) - created = rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump()) - return {"message": "Created", **(created if isinstance(created, dict) else created.model_dump())} - except Exception as e: - logger.error(f"Error updating voice preferences: {e}") - raise HTTPException(status_code=500, detail=str(e)) + if existing: + existingRecord = existing[0] + existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id + rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData) + updated = rootInterface.db.getRecordset(UserVoicePreferences, recordFilter={"id": existingId}) + return updated[0] if updated else {"message": "Updated", **updateData} + else: + newPrefs = UserVoicePreferences(userId=userId, mandateId=mandateId, **updateData) + created = rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump()) + return created if isinstance(created, dict) else created.model_dump() + + +@router.get("/voice/languages") +@limiter.limit("120/minute") +async def getVoiceLanguages( + request: Request, + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Return available TTS languages (user-level, no instance context needed).""" + from modules.interfaces.interfaceVoiceObjects import getVoiceInterface + voiceInterface = getVoiceInterface(currentUser) + languagesResult = await voiceInterface.getAvailableLanguages() + languageList = languagesResult.get("languages", []) if isinstance(languagesResult, dict) else languagesResult + return {"languages": languageList} + + +@router.get("/voice/voices") +@limiter.limit("120/minute") +async def getVoiceVoices( + request: Request, + language: str = Query("de-DE"), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Return available TTS voices for a given language.""" + from modules.interfaces.interfaceVoiceObjects import getVoiceInterface + voiceInterface = getVoiceInterface(currentUser) + voicesResult = await voiceInterface.getAvailableVoices(language) + voiceList = voicesResult.get("voices", []) if isinstance(voicesResult, dict) else voicesResult + return {"voices": voiceList} + + +@router.post("/voice/test") +@limiter.limit("30/minute") +async def testVoice( + request: Request, + body: Dict[str, Any] = Body(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Test a specific voice with a sample text.""" + import base64 + from modules.interfaces.interfaceVoiceObjects import getVoiceInterface + + text = body.get("text", "Hallo, das ist ein Stimmtest.") + language = body.get("language", "de-DE") + voiceId = body.get("voiceId") + + voiceInterface = getVoiceInterface(currentUser) + result = await voiceInterface.textToSpeech(text=text, languageCode=language, voiceName=voiceId) + if result and isinstance(result, dict): + audioContent = result.get("audioContent") + if audioContent: + audioB64 = base64.b64encode( + audioContent if isinstance(audioContent, bytes) else audioContent.encode() + ).decode() + return {"success": True, "audio": audioB64, "format": "mp3", "text": text} + return {"success": False, "error": "TTS returned no audio"} diff --git a/modules/routes/routeVoiceGoogle.py b/modules/routes/routeVoiceGoogle.py index af4db355..dc0c7a85 100644 --- a/modules/routes/routeVoiceGoogle.py +++ b/modules/routes/routeVoiceGoogle.py @@ -442,113 +442,50 @@ async def health_check(currentUser: User = Depends(getCurrentUser)): @router.get("/settings") async def get_voice_settings(currentUser: User = Depends(getCurrentUser)): - """Get voice settings for the current user.""" - try: - logger.info(f"Getting voice settings for user: {currentUser.id}") - - # Get voice interface - voiceInterface = _getVoiceInterface(currentUser) - - # Get or create voice settings for the user - voice_settings = voiceInterface.getOrCreateVoiceSettings(currentUser.id) - - if voice_settings: - # Return user settings - return { - "success": True, - "data": { - "user_settings": voice_settings.model_dump(), - "default_settings": { - "sttLanguage": "de-DE", - "ttsLanguage": "de-DE", - "ttsVoice": "de-DE-Wavenet-A", - "translationEnabled": True, - "targetLanguage": "en-US" - } - } - } - else: - # Fallback to default settings if database fails - logger.warning("Failed to get voice settings from database, using defaults") - return { - "success": True, - "data": { - "user_settings": None, - "default_settings": { - "sttLanguage": "de-DE", - "ttsLanguage": "de-DE", - "ttsVoice": "de-DE-Wavenet-A", - "translationEnabled": True, - "targetLanguage": "en-US" - } - } - } - - except Exception as e: - logger.error(f"Error getting voice settings: {e}") - raise HTTPException( - status_code=500, - detail=f"Failed to get voice settings: {str(e)}" - ) + """Get voice settings for the current user (reads from UserVoicePreferences).""" + from modules.datamodels.datamodelUam import UserVoicePreferences + from modules.security.rootAccess import getRootInterface + rootInterface = getRootInterface() + userId = str(currentUser.id) + + prefs = rootInterface.db.getRecordset( + UserVoicePreferences, recordFilter={"userId": userId} + ) + if prefs: + data = prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() + return {"success": True, "data": {"user_settings": data}} + return {"success": True, "data": {"user_settings": UserVoicePreferences(userId=userId).model_dump()}} + @router.post("/settings") async def save_voice_settings( settings: Dict[str, Any] = Body(...), currentUser: User = Depends(getCurrentUser) ): - """Save voice settings for the current user.""" - try: - logger.info(f"Saving voice settings for user: {currentUser.id}") - logger.info(f"Settings: {settings}") - - # Validate required settings - requiredFields = ["sttLanguage", "ttsLanguage", "ttsVoice"] - for field in requiredFields: - if field not in settings: - raise HTTPException( - status_code=400, - detail=f"Missing required field: {field}" - ) - - # Set default values for optional fields if not provided - if "translationEnabled" not in settings: - settings["translationEnabled"] = True - if "targetLanguage" not in settings: - settings["targetLanguage"] = "en-US" - - # Get voice interface - voiceInterface = _getVoiceInterface(currentUser) - - # Check if settings already exist for this user - existing_settings = voiceInterface.getVoiceSettings(currentUser.id) - - if existing_settings: - # Update existing settings - logger.info(f"Updating existing voice settings for user {currentUser.id}") - updated_settings = voiceInterface.updateVoiceSettings(currentUser.id, settings) - logger.info(f"Voice settings updated for user {currentUser.id}: {updated_settings}") - else: - # Create new settings - logger.info(f"Creating new voice settings for user {currentUser.id}") - # Add userId to settings - settings["userId"] = currentUser.id - created_settings = voiceInterface.createVoiceSettings(settings) - logger.info(f"Voice settings created for user {currentUser.id}: {created_settings}") - - return { - "success": True, - "message": "Voice settings saved successfully", - "data": settings - } - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error saving voice settings: {e}") - raise HTTPException( - status_code=500, - detail=f"Failed to save voice settings: {str(e)}" - ) + """Save voice settings for the current user (writes to UserVoicePreferences).""" + from modules.datamodels.datamodelUam import UserVoicePreferences + from modules.security.rootAccess import getRootInterface + rootInterface = getRootInterface() + userId = str(currentUser.id) + + allowedFields = { + "sttLanguage", "ttsLanguage", "ttsVoice", "ttsVoiceMap", + "translationSourceLanguage", "translationTargetLanguage", + } + updateData = {k: v for k, v in settings.items() if k in allowedFields} + + existing = rootInterface.db.getRecordset( + UserVoicePreferences, recordFilter={"userId": userId} + ) + if existing: + existingRecord = existing[0] + existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id + rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData) + else: + newPrefs = UserVoicePreferences(userId=userId, **updateData) + rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump()) + + return {"success": True, "message": "Voice settings saved successfully", "data": updateData} # ========================================================================= # STT Streaming WebSocket — generic, used by all features diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index cbea5631..f9e72ea6 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -2517,55 +2517,55 @@ def _registerCoreTools(registry: ToolRegistry, services): if not voiceName: try: - from modules.features.workspace import interfaceFeatureWorkspace - featureInstanceId = context.get("featureInstanceId", "") + from modules.datamodels.datamodelUam import UserVoicePreferences + from modules.security.rootAccess import getRootInterface userId = context.get("userId", "") if userId: - wsIf = interfaceFeatureWorkspace.getInterface( - services.user, - mandateId=mandateId or None, - featureInstanceId=featureInstanceId or None, + rootIf = getRootInterface() + prefRecords = rootIf.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId} ) - vs = wsIf.getVoiceSettings(userId) if wsIf else None - if vs: - voiceMap = {} - if hasattr(vs, "ttsVoiceMap") and vs.ttsVoiceMap: - voiceMap = vs.ttsVoiceMap if isinstance(vs.ttsVoiceMap, dict) else {} + if not prefRecords and mandateId: + prefRecords = rootIf.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId} + ) + if prefRecords: + vs = prefRecords[0] if isinstance(prefRecords[0], dict) else prefRecords[0].model_dump() if hasattr(prefRecords[0], "model_dump") else prefRecords[0] + voiceMap = vs.get("ttsVoiceMap", {}) or {} + if isinstance(voiceMap, dict) and voiceMap: + selectedKey = None + selectedVoiceEntry = None + baseLanguage = language.split("-")[0].lower() if isinstance(language, str) and language else "" - selectedKey = None - selectedVoiceEntry = None - baseLanguage = language.split("-")[0].lower() if isinstance(language, str) and language else "" + if isinstance(language, str) and language in voiceMap: + selectedKey = language + selectedVoiceEntry = voiceMap[language] - # 1) Exact match first (e.g. de-DE) - if isinstance(language, str) and language in voiceMap: - selectedKey = language - selectedVoiceEntry = voiceMap[language] + if selectedVoiceEntry is None and baseLanguage and baseLanguage in voiceMap: + selectedKey = baseLanguage + selectedVoiceEntry = voiceMap[baseLanguage] - # 2) Match short language key (e.g. de) - if selectedVoiceEntry is None and baseLanguage and baseLanguage in voiceMap: - selectedKey = baseLanguage - selectedVoiceEntry = voiceMap[baseLanguage] + if selectedVoiceEntry is None and baseLanguage: + for mapKey, mapValue in voiceMap.items(): + mapKeyNorm = str(mapKey).lower() + if mapKeyNorm == baseLanguage or mapKeyNorm.startswith(f"{baseLanguage}-"): + selectedKey = str(mapKey) + selectedVoiceEntry = mapValue + break - # 3) Match by same language family (e.g. de-CH -> de-DE mapping) - if selectedVoiceEntry is None and baseLanguage: - for mapKey, mapValue in voiceMap.items(): - mapKeyNorm = str(mapKey).lower() - if mapKeyNorm == baseLanguage or mapKeyNorm.startswith(f"{baseLanguage}-"): - selectedKey = str(mapKey) - selectedVoiceEntry = mapValue - break - - if selectedVoiceEntry is not None: - voiceName = ( - selectedVoiceEntry.get("voiceName") - if isinstance(selectedVoiceEntry, dict) - else selectedVoiceEntry - ) - logger.info( - f"textToSpeech: using configured voice '{voiceName}' for requested language '{language}' (matched key '{selectedKey}')" - ) - elif hasattr(vs, "ttsVoice") and vs.ttsVoice and hasattr(vs, "ttsLanguage") and vs.ttsLanguage == language: - voiceName = vs.ttsVoice + if selectedVoiceEntry is not None: + voiceName = ( + selectedVoiceEntry.get("voiceName") + if isinstance(selectedVoiceEntry, dict) + else selectedVoiceEntry + ) + logger.info( + f"textToSpeech: using configured voice '{voiceName}' for requested language '{language}' (matched key '{selectedKey}')" + ) + if not voiceName and vs.get("ttsVoice") and vs.get("ttsLanguage") == language: + voiceName = vs["ttsVoice"] except Exception as prefErr: logger.debug(f"textToSpeech: could not load voice preferences: {prefErr}") diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index 494389ff..37b8b0ba 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -557,23 +557,24 @@ detectedIntent-Werte: def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool]: """Neutralize the prompt text in an AiCallRequest. - Returns (modifiedRequest, wasNeutralized).""" - try: - neutralSvc = self._get_service("neutralization") - if not neutralSvc or not hasattr(neutralSvc, 'processText'): - return request, False + Returns (modifiedRequest, wasNeutralized). + Raises RuntimeError if neutralization is required but fails (fail-safe).""" + neutralSvc = self._get_service("neutralization") + if not neutralSvc or not hasattr(neutralSvc, 'processText'): + raise RuntimeError("Neutralization required but neutralization service is unavailable") - if request.prompt: - result = neutralSvc.processText(request.prompt) - if result and result.get("neutralized_text"): - request.prompt = result["neutralized_text"] - logger.debug("Neutralized prompt in AiCallRequest") - return request, True + if request.prompt: + result = neutralSvc.processText(request.prompt) + if result and result.get("neutralized_text"): + request.prompt = result["neutralized_text"] + logger.debug("Neutralized prompt in AiCallRequest") + return request, True + raise RuntimeError( + "Neutralization required but processText returned no neutralized_text — " + "AI call blocked to protect sensitive data" + ) - return request, False - except Exception as e: - logger.warning(f"Request neutralization failed: {e}") - return request, False + return request, False def _rehydrateResponse(self, responseText: str) -> str: """Replace neutralization placeholders with original values in AI response.""" From d0f8444bacfdb26aafb2bf9044cb0b07c0907938 Mon Sep 17 00:00:00 2001 From: idittrich-valueon Date: Wed, 25 Mar 2026 09:38:06 +0100 Subject: [PATCH 04/33] next version of visual workflow editor with ClickUp Connections --- app.py | 16 + env_dev.env | 5 + env_int.env | 5 + env_prod.env | 5 + modules/auth/csrf.py | 2 + modules/connectors/connectorDbPostgre.py | 3 + modules/connectors/connectorResolver.py | 6 + modules/connectors/connectorTicketsClickup.py | 3 +- .../connectors/providerClickup/__init__.py | 7 + .../providerClickup/connectorClickup.py | 268 ++++++++++++++ modules/datamodels/datamodelDataSource.py | 4 +- modules/datamodels/datamodelUam.py | 8 +- .../datamodelFeatureAutomation2.py | 6 + modules/features/automation2/entryPoints.py | 96 +++++ .../interfaceFeatureAutomation2.py | 191 ++++++++-- .../features/automation2/mainAutomation2.py | 4 + .../automation2/nodeDefinitions/__init__.py | 6 +- .../automation2/nodeDefinitions/ai.py | 2 - .../automation2/nodeDefinitions/clickup.py | 227 ++++++++++++ .../automation2/nodeDefinitions/data.py | 58 --- .../automation2/nodeDefinitions/file.py | 60 +++ .../automation2/nodeDefinitions/flow.py | 38 +- .../automation2/nodeDefinitions/input.py | 9 +- .../automation2/nodeDefinitions/triggers.py | 62 +++- modules/features/automation2/nodeRegistry.py | 7 + .../automation2/routeFeatureAutomation2.py | 254 ++++++++++++- .../workspace/routeFeatureWorkspace.py | 1 + modules/routes/routeClickup.py | 288 +++++++++++++++ modules/routes/routeDataConnections.py | 8 +- modules/routes/routeSecurityClickup.py | 280 ++++++++++++++ modules/serviceCenter/registry.py | 7 + .../services/serviceAgent/mainServiceAgent.py | 1 + .../services/serviceClickup/__init__.py | 7 + .../serviceClickup/mainServiceClickup.py | 223 +++++++++++ .../renderers/rendererDocx.py | 5 +- .../renderers/rendererText.py | 36 +- .../serviceGeneration/subDocumentUtility.py | 145 ++++++++ modules/shared/eventManagement.py | 31 +- .../automation2/clickupTaskUpdateMerge.py | 174 +++++++++ .../workflows/automation2/executionEngine.py | 158 +++++++- .../automation2/executors/__init__.py | 2 - .../executors/actionNodeExecutor.py | 338 +++++++++++++++-- .../automation2/executors/dataExecutor.py | 120 ------ .../automation2/executors/flowExecutor.py | 256 +++++++++---- .../automation2/executors/ioExecutor.py | 10 +- .../automation2/executors/triggerExecutor.py | 41 +- modules/workflows/automation2/graphUtils.py | 72 +++- modules/workflows/automation2/runEnvelope.py | 109 ++++++ modules/workflows/automation2/scheduleCron.py | 34 ++ .../automation2/subAutomation2Schedule.py | 304 +++++++++++++++ .../methods/methodClickup/__init__.py | 6 + .../methods/methodClickup/actions/__init__.py | 3 + .../methodClickup/actions/create_task.py | 213 +++++++++++ .../methods/methodClickup/actions/get_task.py | 40 ++ .../methodClickup/actions/list_tasks.py | 51 +++ .../methodClickup/actions/search_tasks.py | 221 +++++++++++ .../methodClickup/actions/update_task.py | 57 +++ .../actions/upload_attachment.py | 88 +++++ .../methods/methodClickup/helpers/__init__.py | 2 + .../methodClickup/helpers/connection.py | 50 +++ .../methodClickup/helpers/pathparse.py | 26 ++ .../methods/methodClickup/methodClickup.py | 349 ++++++++++++++++++ .../workflows/methods/methodFile/__init__.py | 6 + .../methods/methodFile/actions/__init__.py | 6 + .../methods/methodFile/actions/create.py | 147 ++++++++ .../methods/methodFile/methodFile.py | 81 ++++ .../composeAndDraftEmailWithContext.py | 77 +++- .../methodSharepoint/actions/uploadFile.py | 12 +- modules/workflows/workflowManager.py | 2 +- .../workflows/test_automation2_graphUtils.py | 62 ++++ 70 files changed, 5058 insertions(+), 443 deletions(-) create mode 100644 modules/connectors/providerClickup/__init__.py create mode 100644 modules/connectors/providerClickup/connectorClickup.py create mode 100644 modules/features/automation2/entryPoints.py create mode 100644 modules/features/automation2/nodeDefinitions/clickup.py delete mode 100644 modules/features/automation2/nodeDefinitions/data.py create mode 100644 modules/features/automation2/nodeDefinitions/file.py create mode 100644 modules/routes/routeClickup.py create mode 100644 modules/routes/routeSecurityClickup.py create mode 100644 modules/serviceCenter/services/serviceClickup/__init__.py create mode 100644 modules/serviceCenter/services/serviceClickup/mainServiceClickup.py create mode 100644 modules/workflows/automation2/clickupTaskUpdateMerge.py delete mode 100644 modules/workflows/automation2/executors/dataExecutor.py create mode 100644 modules/workflows/automation2/runEnvelope.py create mode 100644 modules/workflows/automation2/scheduleCron.py create mode 100644 modules/workflows/automation2/subAutomation2Schedule.py create mode 100644 modules/workflows/methods/methodClickup/__init__.py create mode 100644 modules/workflows/methods/methodClickup/actions/__init__.py create mode 100644 modules/workflows/methods/methodClickup/actions/create_task.py create mode 100644 modules/workflows/methods/methodClickup/actions/get_task.py create mode 100644 modules/workflows/methods/methodClickup/actions/list_tasks.py create mode 100644 modules/workflows/methods/methodClickup/actions/search_tasks.py create mode 100644 modules/workflows/methods/methodClickup/actions/update_task.py create mode 100644 modules/workflows/methods/methodClickup/actions/upload_attachment.py create mode 100644 modules/workflows/methods/methodClickup/helpers/__init__.py create mode 100644 modules/workflows/methods/methodClickup/helpers/connection.py create mode 100644 modules/workflows/methods/methodClickup/helpers/pathparse.py create mode 100644 modules/workflows/methods/methodClickup/methodClickup.py create mode 100644 modules/workflows/methods/methodFile/__init__.py create mode 100644 modules/workflows/methods/methodFile/actions/__init__.py create mode 100644 modules/workflows/methods/methodFile/actions/create.py create mode 100644 modules/workflows/methods/methodFile/methodFile.py create mode 100644 tests/unit/workflows/test_automation2_graphUtils.py diff --git a/app.py b/app.py index 8268377a..b88e6a15 100644 --- a/app.py +++ b/app.py @@ -21,6 +21,7 @@ from datetime import datetime from modules.shared.configuration import APP_CONFIG from modules.shared.eventManagement import eventManager from modules.workflows.automation import subAutomationSchedule +from modules.workflows.automation2 import subAutomation2Schedule from modules.features.automation2.emailPoller import start as startAutomation2EmailPoller from modules.features.automation2.emailPoller import stop as stopAutomation2EmailPoller from modules.interfaces.interfaceDbApp import getRootInterface @@ -355,7 +356,15 @@ async def lifespan(app: FastAPI): logger.warning(f"Could not initialize feature containers: {e}") # --- Init Managers --- + import asyncio + try: + main_loop = asyncio.get_running_loop() + eventManager.set_event_loop(main_loop) + subAutomation2Schedule.set_main_loop(main_loop) + except RuntimeError: + pass subAutomationSchedule.start(eventUser) # Automation scheduler + subAutomation2Schedule.start(eventUser) # Automation2 schedule trigger (cron) # Automation2 email poller: started on-demand when a run pauses for email.checkEmail eventManager.start() @@ -386,6 +395,7 @@ async def lifespan(app: FastAPI): # --- Stop Managers --- stopAutomation2EmailPoller(eventUser) # Automation2 email poller (no-op if not running) + subAutomation2Schedule.stop(eventUser) # Automation2 schedule eventManager.stop() subAutomationSchedule.stop(eventUser) # Automation scheduler @@ -560,6 +570,12 @@ app.include_router(msftRouter) from modules.routes.routeSecurityGoogle import router as googleRouter app.include_router(googleRouter) +from modules.routes.routeSecurityClickup import router as clickupRouter +app.include_router(clickupRouter) + +from modules.routes.routeClickup import router as clickupApiRouter +app.include_router(clickupApiRouter) + from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter app.include_router(voiceGoogleRouter) diff --git a/env_dev.env b/env_dev.env index e6643ca9..2a6d715c 100644 --- a/env_dev.env +++ b/env_dev.env @@ -46,6 +46,11 @@ Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.ap Service_GOOGLE_DATA_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM= Service_GOOGLE_DATA_REDIRECT_URI = http://localhost:8000/api/google/auth/connect/callback +# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. +Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 +Service_CLICKUP_CLIENT_SECRET = CZECD706WLSX6UV13YI4ACNW50ADZHHXDAJALHE0YE030QFSI6Y9HP4Y61JT7CF0 +Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback + # Stripe Billing (both end with _SECRET for encryption script) STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGWDkxSldfM0NCZ3dmbHY5cS1nQlI3UWZ4ZWRrNVdUdEFKa25RckRiQWY0c1E5MjVsZzlfRkZEU0VFU2tNQ01qZnRNQ0pZVU9hVFN6OEU0RXhwdTl3algzLWJlSXRhYmZlMHltSC1XejlGWEU5TDF1LUlYNEh1aG9tRFI4YmlCYzUyei02U1dabWoyb0N2dVFSb1RhWTNnQjBCZkFjV0FfOWdYdDVpX1k5R2pYM1R6SHRiaE10V1l1dnQybjVHWDRiQUJLM0UxRDZnczhJZGFsc3JhOU82QT09 STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGcHNWTWpBWkFHRExtdU01N3RyZzNsMjhUS3NiVTNCZmMwN2NEcFZ6UkQ1a2I0aUkyNU4wR2dUdHJXYmtkaEFRUnFpcThObHBEQmJkdEFnT1FXeUxOTlU3UDFNRzl6LWdpRFpYdExvY3FTTG9MTkswdEhrVkNKQVFucnBjSnhLNm4= diff --git a/env_int.env b/env_int.env index d7105469..5f331e5c 100644 --- a/env_int.env +++ b/env_int.env @@ -46,6 +46,11 @@ Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.ap Service_GOOGLE_DATA_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo= Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/connect/callback +# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. +Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 +Service_CLICKUP_CLIENT_SECRET = CZECD706WLSX6UV13YI4ACNW50ADZHHXDAJALHE0YE030QFSI6Y9HP4Y61JT7CF0 +Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback + # Stripe Billing (both end with _SECRET for encryption script) STRIPE_SECRET_KEY_SECRET = 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 STRIPE_WEBHOOK_SECRET = INT_ENC:Z0FBQUFBQnBudkpGamJBNW91VUdEaThWRTFiTWpyb3NqSDJJcGtjNkhUVVZqVElxUWExY05KcllSYVk1SkRuS1NjYWpZUk1uU29nb2pzdXUxRzBsOEgyRWtmUEw3dUF4ejFIXzNwTVZRM1R1bVVhTUs4ZHJMT0V4Xy1pcHVfWlBaQV9wVXo5MGlQYXA= diff --git a/env_prod.env b/env_prod.env index f10b996e..a4bdea05 100644 --- a/env_prod.env +++ b/env_prod.env @@ -46,6 +46,11 @@ Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.ap Service_GOOGLE_DATA_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3eWFwSEZ4YnRJcjU1OW5kcXZKdkt1Z3gzWDFhVW5Eelh3VnpnNlppcWxweHY5UUQzeDIyVk83cW1XNVE4bllVWnR2MjlSQzFrV1UyUVV6OUt5b3Vqa3QzMUIwNFBqc2FVSXRxTlQ1OHVJZVFibnhBQ2puXzBwSXp5NUZhZjM1d1o= Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/connect/callback +# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. +Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 +Service_CLICKUP_CLIENT_SECRET = CZECD706WLSX6UV13YI4ACNW50ADZHHXDAJALHE0YE030QFSI6Y9HP4Y61JT7CF0 +Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback + # Stripe Billing (both end with _SECRET for encryption script) STRIPE_SECRET_KEY_SECRET = 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 STRIPE_WEBHOOK_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGNUpTWldsakYydFhFelBrR1lSaWxYT3kyMENOMUljZTJUZHBWcEhhdWVCMzYxZXQ5b3VlTFVRalFiTVdsbGxrdUx0RDFwSEpsOC1sTDJRTEJNQlA3S3ZaQzBtV1h6bWp5VnlMZUgwUlF3cXYxcnljZVE5SWdzLVg3V0syOWRYS08= diff --git a/modules/auth/csrf.py b/modules/auth/csrf.py index ba21435b..7cc0c07c 100644 --- a/modules/auth/csrf.py +++ b/modules/auth/csrf.py @@ -35,6 +35,8 @@ class CSRFMiddleware(BaseHTTPMiddleware): "/api/google/auth/login/callback", "/api/google/auth/connect", "/api/google/auth/connect/callback", + "/api/clickup/auth/connect", + "/api/clickup/auth/connect/callback", "/api/billing/webhook/stripe", # Stripe webhook (auth via Stripe-Signature) } diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index 67cceb45..f0e38fdd 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -1026,6 +1026,9 @@ class DatabaseConnector: continue colType = fields.get(key, "TEXT") logger.debug(f"_buildPaginationClauses: filter key='{key}' val={val!r} type(val)={type(val).__name__} colType={colType}") + if val is None: + where_parts.append(f'"{key}" IS NULL') + continue if isinstance(val, dict): op = val.get("operator", "equals") v = val.get("value", "") diff --git a/modules/connectors/connectorResolver.py b/modules/connectors/connectorResolver.py index 4304378e..8ffdd73f 100644 --- a/modules/connectors/connectorResolver.py +++ b/modules/connectors/connectorResolver.py @@ -52,6 +52,12 @@ class ConnectorResolver: except ImportError: logger.debug("FtpConnector not available (stub)") + try: + from modules.connectors.providerClickup.connectorClickup import ClickupConnector + ConnectorResolver._providerRegistry["clickup"] = ClickupConnector + except ImportError: + logger.warning("ClickupConnector not available") + async def resolve(self, connectionId: str) -> ProviderConnector: """Resolve connectionId to a ProviderConnector with a fresh access token.""" connection = await self._loadConnection(connectionId) diff --git a/modules/connectors/connectorTicketsClickup.py b/modules/connectors/connectorTicketsClickup.py index 37480aa9..af02b44a 100644 --- a/modules/connectors/connectorTicketsClickup.py +++ b/modules/connectors/connectorTicketsClickup.py @@ -9,6 +9,7 @@ from typing import Optional import logging import aiohttp from modules.datamodels.datamodelTickets import TicketBase, TicketFieldAttribute +from modules.serviceCenter.services.serviceClickup.mainServiceClickup import clickup_authorization_header logger = logging.getLogger(__name__) @@ -30,7 +31,7 @@ class ConnectorTicketClickup(TicketBase): def _headers(self) -> dict: return { - "Authorization": self.apiToken, + "Authorization": clickup_authorization_header(self.apiToken), "Content-Type": "application/json", } diff --git a/modules/connectors/providerClickup/__init__.py b/modules/connectors/providerClickup/__init__.py new file mode 100644 index 00000000..12439593 --- /dev/null +++ b/modules/connectors/providerClickup/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp provider connector.""" + +from .connectorClickup import ClickupConnector + +__all__ = ["ClickupConnector"] diff --git a/modules/connectors/providerClickup/connectorClickup.py b/modules/connectors/providerClickup/connectorClickup.py new file mode 100644 index 00000000..cd49570e --- /dev/null +++ b/modules/connectors/providerClickup/connectorClickup.py @@ -0,0 +1,268 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp ProviderConnector — virtual paths for teams → lists → tasks (table rows). + +Path convention (leading slash, no trailing slash except root): + / — authorized workspaces (teams) + /team/{teamId} — spaces in the workspace + /team/{teamId}/space/{spaceId} — folders + folderless lists + /team/{teamId}/space/{spaceId}/folder/{folderId} — lists in folder + /team/{teamId}/list/{listId} — tasks in list (rows) + /team/{teamId}/list/{listId}/task/{taskId} — single task (download = JSON) +""" + +from __future__ import annotations + +import json +import logging +import re +from typing import Any, Dict, List, Optional + +from modules.connectors.connectorProviderBase import ( + ProviderConnector, + ServiceAdapter, + DownloadResult, +) +from modules.datamodels.datamodelDataSource import ExternalEntry +from modules.serviceCenter.services.serviceClickup.mainServiceClickup import ClickupService + +logger = logging.getLogger(__name__) + +# type metadata for ExternalEntry.metadata["cuType"] +_CU_TEAM = "team" +_CU_SPACE = "space" +_CU_FOLDER = "folder" +_CU_LIST = "list" +_CU_TASK = "task" + + +def _norm(path: str) -> str: + p = (path or "").strip() or "/" + if not p.startswith("/"): + p = "/" + p + if p != "/" and p.endswith("/"): + p = p.rstrip("/") + return p + + +class ClickupListsAdapter(ServiceAdapter): + """Maps ClickUp hierarchy + list tasks to browse/download/upload/search.""" + + def __init__(self, access_token: str): + self._token = access_token + # Minimal service instance for API calls (no ServiceCenter context) + self._svc = ClickupService(context=None, get_service=lambda _: None) + self._svc.setAccessToken(access_token) + + async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]: + p = _norm(path) + out: List[ExternalEntry] = [] + + if p == "/": + data = await self._svc.getAuthorizedTeams() + if isinstance(data, dict) and data.get("error"): + logger.warning(f"ClickUp browse root: {data.get('error')}") + return [] + teams = data.get("teams", []) if isinstance(data, dict) else [] + for t in teams: + tid = str(t.get("id", "")) + name = t.get("name") or tid + out.append( + ExternalEntry( + name=name, + path=f"/team/{tid}", + isFolder=True, + metadata={"cuType": _CU_TEAM, "id": tid, "raw": t}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)$", p) + if m: + team_id = m.group(1) + data = await self._svc.getSpaces(team_id) + if isinstance(data, dict) and data.get("error"): + return [] + spaces = data.get("spaces", []) if isinstance(data, dict) else [] + for s in spaces: + sid = str(s.get("id", "")) + name = s.get("name") or sid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/space/{sid}", + isFolder=True, + metadata={"cuType": _CU_SPACE, "id": sid, "raw": s}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)/space/([^/]+)$", p) + if m: + team_id, space_id = m.group(1), m.group(2) + folders_r = await self._svc.getFolders(space_id) + lists_r = await self._svc.getFolderlessLists(space_id) + if isinstance(folders_r, dict) and not folders_r.get("error"): + for f in folders_r.get("folders", []) or []: + fid = str(f.get("id", "")) + name = f.get("name") or fid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/space/{space_id}/folder/{fid}", + isFolder=True, + metadata={"cuType": _CU_FOLDER, "id": fid, "raw": f}, + ) + ) + if isinstance(lists_r, dict) and not lists_r.get("error"): + for lst in lists_r.get("lists", []) or []: + lid = str(lst.get("id", "")) + name = lst.get("name") or lid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{lid}", + isFolder=True, + metadata={"cuType": _CU_LIST, "id": lid, "raw": lst}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)/space/([^/]+)/folder/([^/]+)$", p) + if m: + team_id, _space_id, folder_id = m.group(1), m.group(2), m.group(3) + data = await self._svc.getListsInFolder(folder_id) + if isinstance(data, dict) and data.get("error"): + return [] + for lst in data.get("lists", []) or []: + lid = str(lst.get("id", "")) + name = lst.get("name") or lid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{lid}", + isFolder=True, + metadata={"cuType": _CU_LIST, "id": lid, "raw": lst}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)/list/([^/]+)$", p) + if m: + team_id, list_id = m.group(1), m.group(2) + page = 0 + while True: + data = await self._svc.getTasksInList(list_id, page=page) + if isinstance(data, dict) and data.get("error"): + break + tasks = data.get("tasks", []) if isinstance(data, dict) else [] + for task in tasks: + tid = str(task.get("id", "")) + name = task.get("name") or tid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{list_id}/task/{tid}", + isFolder=False, + metadata={ + "cuType": _CU_TASK, + "id": tid, + "task": task, + }, + ) + ) + if len(tasks) < 100: + break + page += 1 + return out + + m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p) + if m: + team_id, list_id, task_id = m.group(1), m.group(2), m.group(3) + out.append( + ExternalEntry( + name=f"task-{task_id}.json", + path=p, + isFolder=False, + metadata={"cuType": _CU_TASK, "id": task_id, "listId": list_id, "teamId": team_id}, + ) + ) + return out + + logger.warning(f"ClickUp browse: unsupported path {p}") + return [] + + async def download(self, path: str) -> Any: + p = _norm(path) + m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p) + if not m: + return b"" + task_id = m.group(3) + data = await self._svc.getTask(task_id) + if isinstance(data, dict) and data.get("error"): + return json.dumps(data).encode("utf-8") + payload = json.dumps(data, indent=2).encode("utf-8") + return DownloadResult(data=payload, fileName=f"task-{task_id}.json", mimeType="application/json") + + async def upload(self, path: str, data: bytes, fileName: str) -> dict: + """Upload attachment to a task. Path must be .../list/{listId}/task/{taskId}.""" + p = _norm(path) + m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p) + if not m: + return {"error": "Path must be /team/{teamId}/list/{listId}/task/{taskId} for upload"} + task_id = m.group(3) + return await self._svc.uploadTaskAttachment(task_id, data, fileName) + + async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]: + base = _norm(path or "/") + team_id: Optional[str] = None + mt = re.match(r"^/team/([^/]+)", base) + if mt: + team_id = mt.group(1) + if not team_id: + teams = await self._svc.getAuthorizedTeams() + if not isinstance(teams, dict) or teams.get("error"): + return [] + tl = teams.get("teams") or [] + if not tl: + return [] + team_id = str(tl[0].get("id", "")) + + out: List[ExternalEntry] = [] + page = 0 + while True: + data = await self._svc.searchTeamTasks(team_id, query=query, page=page) + if isinstance(data, dict) and data.get("error"): + break + tasks = data.get("tasks", []) if isinstance(data, dict) else [] + for task in tasks: + tid = str(task.get("id", "")) + name = task.get("name") or tid + list_obj = task.get("list") or {} + lid = str(list_obj.get("id", "")) if list_obj else "" + if not lid: + continue + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{lid}/task/{tid}", + isFolder=False, + metadata={"cuType": _CU_TASK, "id": tid, "task": task}, + ) + ) + if len(tasks) < 25: + break + page += 1 + return out + + +class ClickupConnector(ProviderConnector): + """One ClickUp connection → clickup virtual file service.""" + + def getAvailableServices(self) -> List[str]: + return ["clickup"] + + def getServiceAdapter(self, service: str) -> ServiceAdapter: + if service != "clickup": + raise ValueError(f"ClickUp only supports 'clickup' service, got '{service}'") + return ClickupListsAdapter(self.accessToken) diff --git a/modules/datamodels/datamodelDataSource.py b/modules/datamodels/datamodelDataSource.py index f8238fab..baeac5ae 100644 --- a/modules/datamodels/datamodelDataSource.py +++ b/modules/datamodels/datamodelDataSource.py @@ -17,7 +17,9 @@ class DataSource(BaseModel): """Configured external data source linked to a UserConnection.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") connectionId: str = Field(description="FK to UserConnection") - sourceType: str = Field(description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder") + sourceType: str = Field( + description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder, clickupList (path under /team/...)" + ) path: str = Field(description="External path (e.g. '/sites/MySite/Documents/Reports')") label: str = Field(description="User-visible label (often the last path segment)") displayPath: Optional[str] = Field( diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 22d94ebe..78858c8d 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -21,6 +21,7 @@ class AuthAuthority(str, Enum): LOCAL = "local" GOOGLE = "google" MSFT = "msft" + CLICKUP = "clickup" class ConnectionStatus(str, Enum): ACTIVE = "active" @@ -141,7 +142,12 @@ class UserConnection(BaseModel): @property def displayLabel(self) -> str: """Human-readable label for display in dropdowns""" - authorityLabels = {"msft": "Microsoft", "google": "Google", "local": "Local"} + authorityLabels = { + "msft": "Microsoft", + "google": "Google", + "local": "Local", + "clickup": "ClickUp", + } return f"{authorityLabels.get(self.authority.value, self.authority.value)}: {self.externalUsername}" diff --git a/modules/features/automation2/datamodelFeatureAutomation2.py b/modules/features/automation2/datamodelFeatureAutomation2.py index f505c7d0..4ccab460 100644 --- a/modules/features/automation2/datamodelFeatureAutomation2.py +++ b/modules/features/automation2/datamodelFeatureAutomation2.py @@ -36,6 +36,11 @@ class Automation2Workflow(BaseModel): description="Whether workflow is active", json_schema_extra={"frontend_type": "checkbox", "frontend_required": False}, ) + invocations: List[Dict[str, Any]] = Field( + default_factory=list, + description="Entry points / starts (manual, form, schedule, webhook, …) configured outside the canvas", + json_schema_extra={"frontend_type": "textarea", "frontend_required": False}, + ) registerModelLabels( @@ -48,6 +53,7 @@ registerModelLabels( "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"}, "graph": {"en": "Graph", "de": "Graph", "fr": "Graphe"}, "active": {"en": "Active", "de": "Aktiv", "fr": "Actif"}, + "invocations": {"en": "Starts / Entry points", "de": "Starts / Einstiegspunkte", "fr": "Points d'entrée"}, }, ) diff --git a/modules/features/automation2/entryPoints.py b/modules/features/automation2/entryPoints.py new file mode 100644 index 00000000..2bcc74ce --- /dev/null +++ b/modules/features/automation2/entryPoints.py @@ -0,0 +1,96 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Workflow entry points (Starts) — configuration outside the flow editor. + +Kinds align with run envelope trigger.type where applicable. +""" + +import uuid +from typing import Any, Dict, List, Optional + +# On-demand (gear: Manueller Trigger, Formular) +KINDS_ON_DEMAND = frozenset({"manual", "form", "api"}) + +# Always-on (gear: Zeitplan, Immer aktiv, plus legacy listener kinds) +KINDS_ALWAYS_ON = frozenset({"schedule", "always_on", "email", "webhook", "event"}) + +ALL_KINDS = KINDS_ON_DEMAND | KINDS_ALWAYS_ON + + +def category_for_kind(kind: str) -> str: + if kind in KINDS_ALWAYS_ON: + return "always_on" + return "on_demand" + + +def default_manual_entry_point() -> Dict[str, Any]: + """Single default manual start when a workflow has no invocations yet.""" + return { + "id": str(uuid.uuid4()), + "kind": "manual", + "category": "on_demand", + "enabled": True, + "title": { + "de": "Jetzt ausführen", + "en": "Run now", + "fr": "Exécuter", + }, + "description": {}, + "config": {}, + } + + +def _normalize_title(title: Any) -> Dict[str, str]: + if isinstance(title, dict): + return {k: str(v) for k, v in title.items() if v is not None} + if isinstance(title, str) and title.strip(): + return {"de": title, "en": title, "fr": title} + return {"de": "Start", "en": "Start", "fr": "Départ"} + + +def normalize_invocation_entry(raw: Dict[str, Any]) -> Dict[str, Any]: + """Validate and normalize a single entry point dict.""" + kind = (raw.get("kind") or "manual").strip() + if kind not in ALL_KINDS: + kind = "manual" + cat = raw.get("category") + if cat not in ("on_demand", "always_on"): + cat = category_for_kind(kind) + eid = raw.get("id") or str(uuid.uuid4()) + enabled = raw.get("enabled", True) + if not isinstance(enabled, bool): + enabled = bool(enabled) + config = raw.get("config") if isinstance(raw.get("config"), dict) else {} + desc = raw.get("description") if isinstance(raw.get("description"), dict) else {} + return { + "id": str(eid), + "kind": kind, + "category": cat, + "enabled": enabled, + "title": _normalize_title(raw.get("title")), + "description": desc, + "config": config, + } + + +def normalize_invocations_list(items: Optional[List[Any]]) -> List[Dict[str, Any]]: + if not items: + return [default_manual_entry_point()] + out: List[Dict[str, Any]] = [] + for raw in items: + if isinstance(raw, dict): + out.append(normalize_invocation_entry(raw)) + if not out: + return [default_manual_entry_point()] + return out + + +# Schedule / cron: wire an external job runner (APScheduler, Celery, system cron) to call +# POST .../execute with entryPointId set to a schedule entry — no separate in-process scheduler here yet. + + +def find_invocation(workflow: Dict[str, Any], entry_point_id: str) -> Optional[Dict[str, Any]]: + for inv in workflow.get("invocations") or []: + if isinstance(inv, dict) and inv.get("id") == entry_point_id: + return inv + return None diff --git a/modules/features/automation2/interfaceFeatureAutomation2.py b/modules/features/automation2/interfaceFeatureAutomation2.py index cdc9bccf..b38b21db 100644 --- a/modules/features/automation2/interfaceFeatureAutomation2.py +++ b/modules/features/automation2/interfaceFeatureAutomation2.py @@ -30,6 +30,7 @@ from modules.features.automation2.datamodelFeatureAutomation2 import ( Automation2WorkflowRun, Automation2HumanTask, ) +from modules.features.automation2.entryPoints import normalize_invocations_list from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.shared.configuration import APP_CONFIG @@ -49,6 +50,83 @@ def getAutomation2Interface( ) +def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]: + """ + Get all active Automation2 workflows that have a schedule entry point (primary invocation). + Used by the scheduler to register cron jobs. Does not filter by mandate/instance. + """ + dbHost = APP_CONFIG.get("DB_HOST", "localhost") + dbDatabase = "poweron_automation2" + dbUser = APP_CONFIG.get("DB_USER") + dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD") + dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) + connector = DatabaseConnector( + dbHost=dbHost, + dbDatabase=dbDatabase, + dbUser=dbUser, + dbPassword=dbPassword, + dbPort=dbPort, + userId=None, + ) + if not connector._ensureTableExists(Automation2Workflow): + logger.warning("Automation2 schedule: table Automation2Workflow does not exist") + return [] + # Don't filter by active in SQL: existing workflows may have active=NULL. + # Treat NULL as active; skip only when active is explicitly False. + records = connector.getRecordset( + Automation2Workflow, + recordFilter=None, + ) + raw_count = len(records) if records else 0 + result = [] + for r in records or []: + if r.get("active") is False: + continue + wf = dict(r) + wf["invocations"] = normalize_invocations_list(wf.get("invocations")) + invocations = wf.get("invocations") or [] + primary = invocations[0] if invocations else {} + if not isinstance(primary, dict): + primary = {} + + # Cron comes from graph start node params (trigger.schedule) + graph = wf.get("graph") or {} + nodes = graph.get("nodes") or [] + cron = None + for n in nodes: + if n.get("type") == "trigger.schedule": + params = n.get("parameters") or {} + cron = params.get("cron") + if cron: + break + + if not cron or not isinstance(cron, str) or not cron.strip(): + continue + + # Prefer invocations; if graph has trigger.schedule but invocations say manual, still schedule + if primary.get("kind") == "schedule" and primary.get("enabled", True): + entry_point_id = primary.get("id") + elif invocations and isinstance(invocations[0], dict) and invocations[0].get("id"): + entry_point_id = invocations[0].get("id") + else: + entry_point_id = str(uuid.uuid4()) + + result.append({ + "workflowId": wf.get("id"), + "mandateId": wf.get("mandateId"), + "featureInstanceId": wf.get("featureInstanceId"), + "entryPointId": entry_point_id, + "cron": cron.strip(), + "workflow": wf, + }) + logger.info( + "Automation2 schedule: DB has %d workflow(s), %d active with trigger.schedule+cron", + raw_count, + len(result), + ) + return result + + class Automation2Objects: """Interface for Automation2 database operations.""" @@ -87,18 +165,26 @@ class Automation2Objects: # Workflow CRUD # ------------------------------------------------------------------------- - def getWorkflows(self) -> List[Dict[str, Any]]: - """Get all workflows for this mandate and feature instance.""" + def getWorkflows(self, active: Optional[bool] = None) -> List[Dict[str, Any]]: + """Get all workflows for this mandate and feature instance. + Optional active filter: True=only active, False=only inactive, None=all. + """ if not self.db._ensureTableExists(Automation2Workflow): return [] + rf: Dict[str, Any] = { + "mandateId": self.mandateId, + "featureInstanceId": self.featureInstanceId, + } + if active is not None: + rf["active"] = active records = self.db.getRecordset( Automation2Workflow, - recordFilter={ - "mandateId": self.mandateId, - "featureInstanceId": self.featureInstanceId, - }, + recordFilter=rf, ) - return [dict(r) for r in records] if records else [] + rows = [dict(r) for r in records] if records else [] + for wf in rows: + wf["invocations"] = normalize_invocations_list(wf.get("invocations")) + return rows def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]: """Get a single workflow by ID.""" @@ -114,7 +200,9 @@ class Automation2Objects: ) if not records: return None - return dict(records[0]) + wf = dict(records[0]) + wf["invocations"] = normalize_invocations_list(wf.get("invocations")) + return wf def createWorkflow(self, data: Dict[str, Any]) -> Dict[str, Any]: """Create a new workflow.""" @@ -122,8 +210,18 @@ class Automation2Objects: data["id"] = str(uuid.uuid4()) data["mandateId"] = self.mandateId data["featureInstanceId"] = self.featureInstanceId + if "active" not in data or data.get("active") is None: + data["active"] = True + data["invocations"] = normalize_invocations_list(data.get("invocations")) created = self.db.recordCreate(Automation2Workflow, data) - return dict(created) + out = dict(created) + out["invocations"] = normalize_invocations_list(out.get("invocations")) + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("automation2.workflow.changed") + except Exception: + pass + return out def updateWorkflow(self, workflowId: str, data: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Update an existing workflow.""" @@ -133,8 +231,17 @@ class Automation2Objects: # Don't overwrite mandateId/featureInstanceId data.pop("mandateId", None) data.pop("featureInstanceId", None) + if "invocations" in data: + data["invocations"] = normalize_invocations_list(data.get("invocations")) updated = self.db.recordModify(Automation2Workflow, workflowId, data) - return dict(updated) + out = dict(updated) + out["invocations"] = normalize_invocations_list(out.get("invocations")) + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("automation2.workflow.changed") + except Exception: + pass + return out def deleteWorkflow(self, workflowId: str) -> bool: """Delete a workflow.""" @@ -142,6 +249,11 @@ class Automation2Objects: if not existing: return False self.db.recordDelete(Automation2Workflow, workflowId) + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("automation2.workflow.changed") + except Exception: + pass return True # ------------------------------------------------------------------------- @@ -209,6 +321,28 @@ class Automation2Objects: ) return [dict(r) for r in records] if records else [] + def getRecentCompletedRuns(self, limit: int = 20) -> List[Dict[str, Any]]: + """Get recently completed runs for workflows in this instance (for output display).""" + if not self.db._ensureTableExists(Automation2WorkflowRun): + return [] + workflows = self.getWorkflows() + wf_ids = [w["id"] for w in workflows if w.get("id")] + if not wf_ids: + return [] + records = self.db.getRecordset( + Automation2WorkflowRun, + recordFilter={"status": "completed"}, + ) + if not records: + return [] + runs = [dict(r) for r in records if r.get("workflowId") in wf_ids] + wf_by_id = {w["id"]: w for w in workflows} + for r in runs: + wf = wf_by_id.get(r.get("workflowId"), {}) + r["workflowLabel"] = wf.get("label") or r.get("workflowId", "") + runs.sort(key=lambda x: (x.get("_modifiedAt") or x.get("_createdAt") or 0), reverse=True) + return runs[:limit] + def getRunsWaitingForEmail(self) -> List[Dict[str, Any]]: """Get all paused runs waiting for a new email (for background poller).""" if not self.db._ensureTableExists(Automation2WorkflowRun): @@ -289,23 +423,38 @@ class Automation2Objects: status: str = None, assigneeId: str = None, ) -> List[Dict[str, Any]]: - """Get tasks with optional filters. AssigneeId filters to that user; None returns all.""" + """Get tasks with optional filters. + When assigneeId is set: returns tasks assigned to that user OR unassigned (so schedule tasks show up). + When assigneeId is None: returns all tasks. + """ if not self.db._ensureTableExists(Automation2HumanTask): return [] - rf = {} + base_rf: Dict[str, Any] = {} if workflowId: - rf["workflowId"] = workflowId + base_rf["workflowId"] = workflowId if runId: - rf["runId"] = runId + base_rf["runId"] = runId if status: - rf["status"] = status + base_rf["status"] = status if assigneeId: - rf["assigneeId"] = assigneeId - records = self.db.getRecordset( - Automation2HumanTask, - recordFilter=rf if rf else None, - ) - items = [dict(r) for r in records] if records else [] + rf_assigned = {**base_rf, "assigneeId": assigneeId} + rf_unassigned = {**base_rf, "assigneeId": None} + records1 = self.db.getRecordset(Automation2HumanTask, recordFilter=rf_assigned) + records2 = self.db.getRecordset(Automation2HumanTask, recordFilter=rf_unassigned) + seen = set() + items = [] + for r in (records1 or []) + (records2 or []): + rec = dict(r) + tid = rec.get("id") + if tid and tid not in seen: + seen.add(tid) + items.append(rec) + else: + records = self.db.getRecordset( + Automation2HumanTask, + recordFilter=base_rf if base_rf else None, + ) + items = [dict(r) for r in records] if records else [] workflows = {w["id"]: w for w in self.getWorkflows()} filtered = [t for t in items if t.get("workflowId") in workflows] return filtered diff --git a/modules/features/automation2/mainAutomation2.py b/modules/features/automation2/mainAutomation2.py index 9ec97eca..4f52e158 100644 --- a/modules/features/automation2/mainAutomation2.py +++ b/modules/features/automation2/mainAutomation2.py @@ -19,6 +19,8 @@ REQUIRED_SERVICES = [ {"serviceKey": "ai", "meta": {"usage": "AI nodes"}}, {"serviceKey": "extraction", "meta": {"usage": "Workflow method actions"}}, {"serviceKey": "sharepoint", "meta": {"usage": "SharePoint actions"}}, + {"serviceKey": "clickup", "meta": {"usage": "ClickUp actions"}}, + {"serviceKey": "generation", "meta": {"usage": "file.create document rendering"}}, ] FEATURE_LABEL = {"en": "Automation 2", "de": "Automatisierung 2", "fr": "Automatisation 2"} FEATURE_ICON = "mdi-sitemap" @@ -157,6 +159,8 @@ class _Automation2ServiceHub: utils = None extraction = None sharepoint = None + clickup = None + generation = None async def onStart(eventUser) -> None: diff --git a/modules/features/automation2/nodeDefinitions/__init__.py b/modules/features/automation2/nodeDefinitions/__init__.py index 61eec51a..2f4920c8 100644 --- a/modules/features/automation2/nodeDefinitions/__init__.py +++ b/modules/features/automation2/nodeDefinitions/__init__.py @@ -3,18 +3,20 @@ from .triggers import TRIGGER_NODES from .flow import FLOW_NODES -from .data import DATA_NODES from .input import INPUT_NODES from .ai import AI_NODES from .email import EMAIL_NODES from .sharepoint import SHAREPOINT_NODES +from .clickup import CLICKUP_NODES +from .file import FILE_NODES STATIC_NODE_TYPES = ( TRIGGER_NODES + FLOW_NODES - + DATA_NODES + INPUT_NODES + AI_NODES + EMAIL_NODES + SHAREPOINT_NODES + + CLICKUP_NODES + + FILE_NODES ) diff --git a/modules/features/automation2/nodeDefinitions/ai.py b/modules/features/automation2/nodeDefinitions/ai.py index 4fdf0db9..bb85e809 100644 --- a/modules/features/automation2/nodeDefinitions/ai.py +++ b/modules/features/automation2/nodeDefinitions/ai.py @@ -9,7 +9,6 @@ AI_NODES = [ "description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"}, "parameters": [ {"name": "prompt", "type": "string", "required": True, "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}}, - {"name": "resultType", "type": "string", "required": False, "description": {"en": "Output format (txt, json, md, etc.)", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "txt"}, ], "inputs": 1, "outputs": 1, @@ -85,7 +84,6 @@ AI_NODES = [ "description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"}, "parameters": [ {"name": "prompt", "type": "string", "required": True, "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}}, - {"name": "format", "type": "string", "required": False, "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "docx"}, ], "inputs": 1, "outputs": 1, diff --git a/modules/features/automation2/nodeDefinitions/clickup.py b/modules/features/automation2/nodeDefinitions/clickup.py new file mode 100644 index 00000000..4acb0db9 --- /dev/null +++ b/modules/features/automation2/nodeDefinitions/clickup.py @@ -0,0 +1,227 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp nodes — map to MethodClickup actions.""" + +CLICKUP_NODES = [ + { + "id": "clickup.searchTasks", + "category": "clickup", + "label": {"en": "Search tasks", "de": "Aufgaben suchen", "fr": "Rechercher tâches"}, + "description": { + "en": "Search tasks in a workspace (team)", + "de": "Aufgaben in einem Workspace suchen", + "fr": "Rechercher des tâches dans un espace", + }, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "teamId", "type": "string", "required": True, "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}}, + {"name": "query", "type": "string", "required": True, "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}}, + {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, + { + "name": "listId", + "type": "string", + "required": False, + "description": { + "en": "If set, search this list via list API (not team search).", + "de": "Wenn gesetzt: Suche in dieser Liste (Listen-API, nicht Team-Suche).", + "fr": "Si défini : recherche dans cette liste (API liste).", + }, + }, + { + "name": "includeClosed", + "type": "boolean", + "required": False, + "default": False, + "description": { + "en": "With listId: include closed tasks.", + "de": "Mit Liste: erledigte Aufgaben einbeziehen.", + "fr": "Avec liste : inclure les tâches terminées.", + }, + }, + { + "name": "fullTaskData", + "type": "boolean", + "required": False, + "default": False, + "description": { + "en": "Return full ClickUp API JSON per task (very large). Default: slim fields only.", + "de": "Vollständige ClickUp-Rohdaten pro Task (sehr groß). Standard: nur schlanke Felder.", + "fr": "Réponse brute complète (très volumineuse). Par défaut : champs réduits.", + }, + }, + { + "name": "matchNameOnly", + "type": "boolean", + "required": False, + "default": True, + "description": { + "en": "Keep only tasks whose title contains the search query (default: on).", + "de": "Nur Aufgaben, deren Titel den Suchbegriff enthält (Standard: an).", + "fr": "Ne garder que les tâches dont le titre contient la requête (défaut : oui).", + }, + }, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-magnify", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "searchTasks", + "_paramMap": { + "connectionId": "connectionReference", + "teamId": "teamId", + "query": "query", + "page": "page", + "listId": "listId", + "fullTaskData": "fullTaskData", + "matchNameOnly": "matchNameOnly", + "includeClosed": "includeClosed", + }, + }, + { + "id": "clickup.listTasks", + "category": "clickup", + "label": {"en": "List tasks", "de": "Aufgaben auflisten", "fr": "Lister les tâches"}, + "description": { + "en": "List tasks in a list (pick list path from browse)", + "de": "Aufgaben einer Liste auflisten (Pfad aus Browse)", + "fr": "Lister les tâches d'une liste", + }, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "path", "type": "string", "required": True, "description": {"en": "Virtual path to list /team/.../list/...", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}}, + {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, + {"name": "includeClosed", "type": "boolean", "required": False, "description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "listTasks", + "_paramMap": { + "connectionId": "connectionReference", + "path": "pathQuery", + "page": "page", + "includeClosed": "includeClosed", + }, + }, + { + "id": "clickup.getTask", + "category": "clickup", + "label": {"en": "Get task", "de": "Aufgabe abrufen", "fr": "Obtenir la tâche"}, + "description": {"en": "Get one task by ID or path", "de": "Eine Aufgabe abrufen", "fr": "Obtenir une tâche"}, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Or path .../task/{id}", "de": "Oder Pfad .../task/{id}", "fr": "Ou chemin .../task/{id}"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "getTask", + "_paramMap": {"connectionId": "connectionReference", "taskId": "taskId", "path": "pathQuery"}, + }, + { + "id": "clickup.createTask", + "category": "clickup", + "label": {"en": "Create task", "de": "Aufgabe erstellen", "fr": "Créer une tâche"}, + "description": {"en": "Create a task in a list", "de": "Aufgabe in einer Liste erstellen", "fr": "Créer une tâche dans une liste"}, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "teamId", "type": "string", "required": False, "description": {"en": "Workspace (team) for list picker", "de": "Workspace für Listen-Auswahl", "fr": "Équipe"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Optional path /team/.../list/...", "de": "Optional: Pfad zur Liste", "fr": "Chemin optionnel"}}, + {"name": "listId", "type": "string", "required": False, "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}}, + {"name": "name", "type": "string", "required": True, "description": {"en": "Task name", "de": "Name", "fr": "Nom"}}, + {"name": "description", "type": "string", "required": False, "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}}, + {"name": "taskStatus", "type": "string", "required": False, "description": {"en": "Status (list status name)", "de": "Status (wie in der Liste)", "fr": "Statut"}}, + {"name": "taskPriority", "type": "string", "required": False, "description": {"en": "1–4 or empty", "de": "1–4 oder leer", "fr": "1–4"}}, + {"name": "taskDueDateMs", "type": "string", "required": False, "description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}}, + {"name": "taskAssigneeIds", "type": "object", "required": False, "description": {"en": "Assignee user ids", "de": "Zugewiesene (User-IDs)", "fr": "Assignés"}}, + {"name": "taskTimeEstimateMs", "type": "string", "required": False, "description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}}, + {"name": "taskTimeEstimateHours", "type": "string", "required": False, "description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (Stunden)", "fr": "Heures"}}, + {"name": "customFieldValues", "type": "object", "required": False, "description": {"en": "Custom field id → value", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}}, + {"name": "taskFields", "type": "string", "required": False, "description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON (fortgeschritten)", "fr": "JSON avancé"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "createTask", + "_paramMap": { + "connectionId": "connectionReference", + "teamId": "teamId", + "path": "pathQuery", + "listId": "listId", + "name": "name", + "description": "description", + "taskStatus": "taskStatus", + "taskPriority": "taskPriority", + "taskDueDateMs": "taskDueDateMs", + "taskAssigneeIds": "taskAssigneeIds", + "taskTimeEstimateMs": "taskTimeEstimateMs", + "taskTimeEstimateHours": "taskTimeEstimateHours", + "customFieldValues": "customFieldValues", + "taskFields": "taskFields", + }, + }, + { + "id": "clickup.updateTask", + "category": "clickup", + "label": {"en": "Update task", "de": "Aufgabe aktualisieren", "fr": "Mettre à jour la tâche"}, + "description": { + "en": "Update task fields (rows or JSON)", + "de": "Felder der Aufgabe ändern (Zeilen oder JSON)", + "fr": "Mettre à jour les champs (lignes ou JSON)", + }, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, + { + "name": "taskUpdateEntries", + "type": "object", + "required": False, + "description": { + "en": "List of {fieldKey, value, customFieldId?}", + "de": "Liste der zu ändernden Felder (fieldKey, value, optional customFieldId)", + "fr": "Liste de champs à mettre à jour", + }, + }, + {"name": "taskUpdate", "type": "string", "required": False, "description": {"en": "JSON body for API (optional if rows set)", "de": "JSON für API (optional wenn Zeilen gesetzt)", "fr": "Corps JSON"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "updateTask", + "_paramMap": { + "connectionId": "connectionReference", + "taskId": "taskId", + "path": "path", + "taskUpdate": "taskUpdate", + }, + }, + { + "id": "clickup.uploadAttachment", + "category": "clickup", + "label": {"en": "Upload attachment", "de": "Anhang hochladen", "fr": "Téléverser pièce jointe"}, + "description": {"en": "Upload file to a task (upstream file)", "de": "Datei an Task anhängen", "fr": "Joindre un fichier à la tâche"}, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, + {"name": "fileName", "type": "string", "required": False, "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-attachment", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "uploadAttachment", + "_paramMap": { + "connectionId": "connectionReference", + "taskId": "taskId", + "path": "path", + "fileName": "fileName", + }, + }, +] diff --git a/modules/features/automation2/nodeDefinitions/data.py b/modules/features/automation2/nodeDefinitions/data.py deleted file mode 100644 index b44618d1..00000000 --- a/modules/features/automation2/nodeDefinitions/data.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2025 Patrick Motsch -# Data transformation node definitions. - -DATA_NODES = [ - { - "id": "data.setFields", - "category": "data", - "label": {"en": "Set Fields", "de": "Felder setzen", "fr": "Définir champs"}, - "description": {"en": "Set or override fields on payload", "de": "Felder setzen oder überschreiben", "fr": "Définir ou écraser des champs"}, - "parameters": [ - {"name": "fields", "type": "object", "required": True, "description": {"en": "Key-value pairs", "de": "Schlüssel-Wert-Paare", "fr": "Paires clé-valeur"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-pencil", "color": "#673AB7"}, - }, - { - "id": "data.filter", - "category": "data", - "label": {"en": "Filter", "de": "Filtern", "fr": "Filtrer"}, - "description": {"en": "Filter array by condition", "de": "Array nach Bedingung filtern", "fr": "Filtrer tableau par condition"}, - "parameters": [ - {"name": "condition", "type": "string", "required": True, "description": {"en": "Expression (e.g. item.active == true)", "de": "Bedingung", "fr": "Condition"}}, - {"name": "itemsPath", "type": "string", "required": False, "description": {"en": "Path to array", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-filter", "color": "#673AB7"}, - }, - { - "id": "data.parseJson", - "category": "data", - "label": {"en": "Parse JSON", "de": "JSON parsen", "fr": "Parser JSON"}, - "description": {"en": "Parse JSON string to object", "de": "JSON-String in Objekt parsen", "fr": "Parser chaîne JSON en objet"}, - "parameters": [ - {"name": "jsonPath", "type": "string", "required": False, "description": {"en": "Path to JSON string (default: input)", "de": "Pfad zum JSON", "fr": "Chemin vers JSON"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-code-json", "color": "#673AB7"}, - }, - { - "id": "data.template", - "category": "data", - "label": {"en": "Template / Interpolation", "de": "Vorlage / Interpolation", "fr": "Modèle / Interpolation"}, - "description": {"en": "Text with {{placeholder}} substitution", "de": "Text mit {{platzhalter}}-Ersetzung", "fr": "Texte avec substitution {{placeholder}}"}, - "parameters": [ - {"name": "template", "type": "string", "required": True, "description": {"en": "Template (use {{path}} for values)", "de": "Vorlage", "fr": "Modèle"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-format-text", "color": "#673AB7"}, - }, -] diff --git a/modules/features/automation2/nodeDefinitions/file.py b/modules/features/automation2/nodeDefinitions/file.py new file mode 100644 index 00000000..bb168218 --- /dev/null +++ b/modules/features/automation2/nodeDefinitions/file.py @@ -0,0 +1,60 @@ +# Copyright (c) 2025 Patrick Motsch +# File node definitions - create files from context (e.g. from AI nodes). + +FILE_NODES = [ + { + "id": "file.create", + "category": "file", + "label": {"en": "Create File", "de": "Datei erstellen", "fr": "Créer fichier"}, + "description": { + "en": "Create a file from context (text/markdown from AI). Configurable format and style.", + "de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI). Format und Stil konfigurierbar.", + "fr": "Crée un fichier à partir du contexte. Format et style configurables.", + }, + "parameters": [ + { + "name": "contentSources", + "type": "json", + "required": False, + "description": { + "en": "Array of context refs (e.g. AI, form). Concatenated in order. Empty = from connected node.", + "de": "Liste von Kontext-Quellen (z.B. KI, Formular). Werden nacheinander zusammengefügt. Leer = vom verbundenen Node.", + "fr": "Liste de sources de contexte. Concaténées dans l'ordre. Vide = du noeud connecté.", + }, + "default": [], + }, + { + "name": "outputFormat", + "type": "string", + "required": True, + "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, + "default": "docx", + }, + { + "name": "title", + "type": "string", + "required": False, + "description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"}, + }, + { + "name": "templateName", + "type": "string", + "required": False, + "description": {"en": "Style preset: default, corporate, minimal", "de": "Stil-Vorlage", "fr": "Prését style"}, + }, + { + "name": "language", + "type": "string", + "required": False, + "description": {"en": "Language code (de, en, fr)", "de": "Sprachcode", "fr": "Code langue"}, + "default": "de", + }, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"}, + "_method": "file", + "_action": "create", + "_paramMap": {}, + }, +] diff --git a/modules/features/automation2/nodeDefinitions/flow.py b/modules/features/automation2/nodeDefinitions/flow.py index 573a83ad..02e25764 100644 --- a/modules/features/automation2/nodeDefinitions/flow.py +++ b/modules/features/automation2/nodeDefinitions/flow.py @@ -12,6 +12,7 @@ FLOW_NODES = [ ], "inputs": 1, "outputs": 2, + "outputLabels": {"en": ["Yes", "No"], "de": ["Ja", "Nein"], "fr": ["Oui", "Non"]}, "executor": "flow", "meta": {"icon": "mdi-source-branch", "color": "#FF9800"}, }, @@ -29,19 +30,6 @@ FLOW_NODES = [ "executor": "flow", "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"}, }, - { - "id": "flow.merge", - "category": "flow", - "label": {"en": "Merge", "de": "Zusammenführen", "fr": "Fusionner"}, - "description": {"en": "Merge multiple inputs", "de": "Mehrere Eingaben zusammenführen", "fr": "Fusionner plusieurs entrées"}, - "parameters": [ - {"name": "mode", "type": "string", "required": False, "description": {"en": "append | combine", "de": "Modus", "fr": "Mode"}}, - ], - "inputs": 2, - "outputs": 1, - "executor": "flow", - "meta": {"icon": "mdi-merge", "color": "#FF9800"}, - }, { "id": "flow.loop", "category": "flow", @@ -55,28 +43,4 @@ FLOW_NODES = [ "executor": "flow", "meta": {"icon": "mdi-repeat", "color": "#FF9800"}, }, - { - "id": "flow.wait", - "category": "flow", - "label": {"en": "Wait / Delay", "de": "Warten / Verzögerung", "fr": "Attendre / Délai"}, - "description": {"en": "Pause for duration", "de": "Pause für Dauer", "fr": "Pause pour durée"}, - "parameters": [ - {"name": "seconds", "type": "number", "required": True, "description": {"en": "Seconds to wait", "de": "Sekunden", "fr": "Secondes"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "flow", - "meta": {"icon": "mdi-timer", "color": "#FF9800"}, - }, - { - "id": "flow.stop", - "category": "flow", - "label": {"en": "Stop / Terminate", "de": "Stopp / Beenden", "fr": "Arrêter / Terminer"}, - "description": {"en": "Stop workflow execution", "de": "Workflow-Ausführung beenden", "fr": "Arrêter l'exécution"}, - "parameters": [], - "inputs": 1, - "outputs": 0, - "executor": "flow", - "meta": {"icon": "mdi-stop", "color": "#F44336"}, - }, ] diff --git a/modules/features/automation2/nodeDefinitions/input.py b/modules/features/automation2/nodeDefinitions/input.py index 8eb43e63..d9c56c78 100644 --- a/modules/features/automation2/nodeDefinitions/input.py +++ b/modules/features/automation2/nodeDefinitions/input.py @@ -12,7 +12,11 @@ INPUT_NODES = [ "name": "fields", "type": "json", "required": True, - "description": {"en": "Form fields: [{name, type, label, required, options?}]", "de": "Formularfelder", "fr": "Champs du formulaire"}, + "description": { + "en": "Form fields: [{name, type, label, required, options?}]. type may include clickup_tasks with clickupConnectionId + clickupListId for a ClickUp task dropdown (value {add, rem}).", + "de": "Formularfelder. type: u. a. clickup_tasks mit clickupConnectionId und clickupListId für ClickUp-Aufgaben-Dropdown (Wert wie Relationship-Feld).", + "fr": "Champs du formulaire", + }, "default": [], }, ], @@ -42,7 +46,8 @@ INPUT_NODES = [ "label": {"en": "Upload", "de": "Upload", "fr": "Téléversement"}, "description": {"en": "User uploads file(s)", "de": "Benutzer lädt Datei(en) hoch", "fr": "L'utilisateur téléverse des fichiers"}, "parameters": [ - {"name": "accept", "type": "string", "required": False, "description": {"en": "MIME types (e.g. .pdf,image/*)", "de": "MIME-Typen", "fr": "Types MIME"}, "default": ""}, + {"name": "accept", "type": "string", "required": False, "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String für Dateiauswahl", "fr": "Chaîne accept"}, "default": ""}, + {"name": "allowedTypes", "type": "json", "required": False, "description": {"en": "Selected file types (from UI multi-select)", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []}, {"name": "maxSize", "type": "number", "required": False, "description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10}, {"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False}, ], diff --git a/modules/features/automation2/nodeDefinitions/triggers.py b/modules/features/automation2/nodeDefinitions/triggers.py index 0e206dc0..5071a762 100644 --- a/modules/features/automation2/nodeDefinitions/triggers.py +++ b/modules/features/automation2/nodeDefinitions/triggers.py @@ -1,12 +1,16 @@ # Copyright (c) 2025 Patrick Motsch -# Trigger node definitions - workflow entry points. +# Canvas start nodes — variant reflects workflow configuration (gear in editor). TRIGGER_NODES = [ { "id": "trigger.manual", "category": "trigger", - "label": {"en": "Manual Trigger", "de": "Manueller Trigger", "fr": "Déclencheur manuel"}, - "description": {"en": "Start workflow on button press", "de": "Startet den Workflow bei Knopfdruck", "fr": "Démarre le workflow sur clic"}, + "label": {"en": "Start", "de": "Start", "fr": "Départ"}, + "description": { + "en": "Manual, API, or background triggers (webhook, email, …).", + "de": "Manuell, API oder Hintergrund-Starts (Webhook, E-Mail, …).", + "fr": "Manuel, API ou déclencheurs en arrière-plan.", + }, "parameters": [], "inputs": 0, "outputs": 1, @@ -14,29 +18,47 @@ TRIGGER_NODES = [ "meta": {"icon": "mdi-play", "color": "#4CAF50"}, }, { - "id": "trigger.schedule", + "id": "trigger.form", "category": "trigger", - "label": {"en": "Schedule", "de": "Zeitplan", "fr": "Planification"}, - "description": {"en": "Run on a cron schedule", "de": "Läuft nach Cron-Zeitplan", "fr": "S'exécute selon un cron"}, + "label": {"en": "Start (form)", "de": "Start (Formular)", "fr": "Départ (formulaire)"}, + "description": { + "en": "Form fields are filled at run time; configure fields on this node.", + "de": "Felder werden beim Start befüllt; konfigurieren Sie die Felder auf dieser Node.", + "fr": "Les champs sont remplis au démarrage.", + }, "parameters": [ - {"name": "cron", "type": "string", "required": True, "description": {"en": "Cron expression (e.g. 0 9 * * * for daily at 9)", "de": "Cron-Ausdruck", "fr": "Expression cron"}}, - ], - "inputs": 0, - "outputs": 1, - "executor": "trigger", - "meta": {"icon": "mdi-clock", "color": "#2196F3"}, - }, - { - "id": "trigger.formSubmit", - "category": "trigger", - "label": {"en": "Form Submit", "de": "Formular-Absendung", "fr": "Soumission formulaire"}, - "description": {"en": "Start when form is submitted", "de": "Startet bei Formular-Absendung", "fr": "Démarre à la soumission du formulaire"}, - "parameters": [ - {"name": "formId", "type": "string", "required": True, "description": {"en": "Form identifier", "de": "Formular-ID", "fr": "Identifiant du formulaire"}}, + { + "name": "formFields", + "type": "json", + "required": False, + "description": {"en": "Field definitions", "de": "Felddefinitionen", "fr": "Définitions"}, + }, ], "inputs": 0, "outputs": 1, "executor": "trigger", "meta": {"icon": "mdi-form-select", "color": "#9C27B0"}, }, + { + "id": "trigger.schedule", + "category": "trigger", + "label": {"en": "Start (schedule)", "de": "Start (Zeitplan)", "fr": "Départ (planification)"}, + "description": { + "en": "Cron expression for scheduled runs (configure on this node).", + "de": "Cron-Ausdruck für geplante Läufe.", + "fr": "Expression cron pour les exécutions planifiées.", + }, + "parameters": [ + { + "name": "cron", + "type": "string", + "required": False, + "description": {"en": "Cron expression", "de": "Cron-Ausdruck", "fr": "Expression cron"}, + }, + ], + "inputs": 0, + "outputs": 1, + "executor": "trigger", + "meta": {"icon": "mdi-clock", "color": "#2196F3"}, + }, ] diff --git a/modules/features/automation2/nodeRegistry.py b/modules/features/automation2/nodeRegistry.py index 39c3e2c9..4bcc9ba5 100644 --- a/modules/features/automation2/nodeRegistry.py +++ b/modules/features/automation2/nodeRegistry.py @@ -36,6 +36,11 @@ def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: out["label"] = node["label"].get(lang, node["label"].get("en", str(node["label"]))) if isinstance(node.get("description"), dict): out["description"] = node["description"].get(lang, node["description"].get("en", str(node["description"]))) + ol = node.get("outputLabels") + if isinstance(ol, dict) and ol: + first = next(iter(ol.values()), None) + if isinstance(first, (list, tuple)): + out["outputLabels"] = ol.get(lang, ol.get("en", list(first))) params = [] for p in node.get("parameters", []): pc = dict(p) @@ -61,8 +66,10 @@ def getNodeTypesForApi( {"id": "flow", "label": {"en": "Flow", "de": "Ablauf", "fr": "Flux"}}, {"id": "data", "label": {"en": "Data", "de": "Daten", "fr": "Données"}}, {"id": "ai", "label": {"en": "AI", "de": "KI", "fr": "IA"}}, + {"id": "file", "label": {"en": "File", "de": "Datei", "fr": "Fichier"}}, {"id": "email", "label": {"en": "Email", "de": "E-Mail", "fr": "Email"}}, {"id": "sharepoint", "label": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"}}, + {"id": "clickup", "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}}, ] return {"nodeTypes": localized, "categories": categories} diff --git a/modules/features/automation2/routeFeatureAutomation2.py b/modules/features/automation2/routeFeatureAutomation2.py index 996c3cb6..eaa49370 100644 --- a/modules/features/automation2/routeFeatureAutomation2.py +++ b/modules/features/automation2/routeFeatureAutomation2.py @@ -5,6 +5,8 @@ Automation2 routes - node-types, execute, workflows, runs, tasks, connections, b """ import logging +from typing import Any, Dict, Optional + from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException from fastapi.responses import JSONResponse from modules.auth import limiter, getRequestContext, RequestContext @@ -13,9 +15,75 @@ from modules.features.automation2.mainAutomation2 import getAutomation2Services from modules.features.automation2.nodeRegistry import getNodeTypesForApi from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface from modules.workflows.automation2.executionEngine import executeGraph +from modules.workflows.automation2.runEnvelope import ( + default_run_envelope, + merge_run_envelope, + normalize_run_envelope, +) +from modules.features.automation2.entryPoints import find_invocation logger = logging.getLogger(__name__) + +def _build_execute_run_envelope( + body: Dict[str, Any], + workflow: Optional[Dict[str, Any]], + user_id: Optional[str], +) -> Dict[str, Any]: + """Build normalized run envelope from POST /execute body.""" + if isinstance(body.get("runEnvelope"), dict): + env = normalize_run_envelope(body["runEnvelope"], user_id=user_id) + pl = body.get("payload") + if isinstance(pl, dict): + env = merge_run_envelope(env, {"payload": pl}) + return env + + entry_point_id = body.get("entryPointId") + if entry_point_id: + if not workflow: + raise HTTPException( + status_code=400, + detail="entryPointId requires a saved workflow (workflowId must refer to a stored workflow)", + ) + inv = find_invocation(workflow, entry_point_id) + if not inv: + raise HTTPException(status_code=400, detail="entryPointId not found on workflow") + if not inv.get("enabled", True): + raise HTTPException(status_code=400, detail="entry point is disabled") + kind = inv.get("kind", "manual") + trig_map = { + "manual": "manual", + "form": "form", + "schedule": "schedule", + "always_on": "event", + "email": "email", + "webhook": "webhook", + "api": "api", + "event": "event", + } + trig = trig_map.get(kind, "manual") + title = inv.get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + base = default_run_envelope( + trig, + entry_point_id=inv.get("id"), + entry_point_label=label or None, + ) + pl = body.get("payload") + if isinstance(pl, dict): + base = merge_run_envelope(base, {"payload": pl}) + return normalize_run_envelope(base, user_id=user_id) + + env = normalize_run_envelope(None, user_id=user_id) + pl = body.get("payload") + if isinstance(pl, dict): + env = merge_run_envelope(env, {"payload": pl}) + return env + router = APIRouter( prefix="/api/automation2", tags=["Automation2"], @@ -55,6 +123,26 @@ def get_automation2_info( } +@router.post("/{instanceId}/schedule-sync") +@limiter.limit("10/minute") +def post_schedule_sync( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Manually trigger schedule sync (re-register cron jobs for all schedule workflows).""" + _validateInstanceAccess(instanceId, context) + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.workflows.automation2.subAutomation2Schedule import sync_automation2_schedule_events + + root = getRootInterface() + event_user = root.getUserByUsername("event") + if not event_user: + return {"success": False, "error": "Event user not available", "synced": 0} + result = sync_automation2_schedule_events(event_user) + return {"success": True, **result} + + @router.get("/{instanceId}/node-types") @limiter.limit("60/minute") def get_node_types( @@ -109,6 +197,10 @@ async def post_execute( graph = body.get("graph") or body workflowId = body.get("workflowId") req_nodes = graph.get("nodes") or [] + workflow_for_envelope: Optional[Dict[str, Any]] = None + if workflowId and not str(workflowId).startswith("transient-"): + a2_pre = getAutomation2Interface(context.user, mandateId, instanceId) + workflow_for_envelope = a2_pre.getWorkflow(workflowId) # When workflowId is set: prefer graph from request (current editor state) if it has nodes. # Only fall back to stored workflow graph when request graph is empty (e.g. resume from email). if workflowId and len(req_nodes) == 0: @@ -117,6 +209,7 @@ async def post_execute( if wf and wf.get("graph"): graph = wf["graph"] logger.info("automation2 execute: loaded graph from workflow %s", workflowId) + workflow_for_envelope = wf # Use transient workflowId when none provided (e.g. execute from editor without save) # Required for email.checkEmail pause/resume - run must be created if not workflowId: @@ -132,6 +225,8 @@ async def post_execute( workflowId, mandateId, ) + run_env = _build_execute_run_envelope(body, workflow_for_envelope, userId) + a2_interface = getAutomation2Interface(context.user, mandateId, instanceId) result = await executeGraph( graph=graph, @@ -141,6 +236,7 @@ async def post_execute( userId=userId, mandateId=mandateId, automation2_interface=a2_interface, + run_envelope=run_env, ) logger.info( "automation2 execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s", @@ -239,6 +335,7 @@ async def list_connection_services( services = provider.getAvailableServices() _serviceLabels = { "sharepoint": "SharePoint", + "clickup": "ClickUp", "outlook": "Outlook", "teams": "Teams", "onedrive": "OneDrive", @@ -248,6 +345,7 @@ async def list_connection_services( } _serviceIcons = { "sharepoint": "sharepoint", + "clickup": "folder", "outlook": "mail", "teams": "chat", "onedrive": "cloud", @@ -342,15 +440,17 @@ def _get_node_label_from_graph(graph: dict, nodeId: str) -> str: def get_workflows( request: Request, instanceId: str = Path(..., description="Feature instance ID"), + active: Optional[bool] = Query(None, description="Filter by active: true|false"), context: RequestContext = Depends(getRequestContext), ) -> dict: """List all workflows for this feature instance. Enriches each workflow with runCount, isRunning, stuckAtNodeId, stuckAtNodeLabel, createdAt, lastStartedAt. + Query param active: filter by active status (true|false). """ mandateId = _validateInstanceAccess(instanceId, context) a2 = getAutomation2Interface(context.user, mandateId, instanceId) - items = a2.getWorkflows() + items = a2.getWorkflows(active=active) enriched = [] for wf in items: wf_id = wf.get("id") @@ -447,11 +547,163 @@ def delete_workflow( return {"success": True} +@router.post("/{instanceId}/workflows/{workflowId}/webhooks/{entryPointId}") +@limiter.limit("60/minute") +async def post_workflow_webhook( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + workflowId: str = Path(..., description="Workflow ID"), + entryPointId: str = Path(..., description="Entry point ID (kind must be webhook)"), + body: dict = Body(default_factory=dict), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """ + Invoke a workflow via a webhook entry point. Optional shared secret in + X-Automation2-Webhook-Secret or X-Webhook-Secret when config.webhookSecret is set. + """ + mandateId = _validateInstanceAccess(instanceId, context) + userId = str(context.user.id) if context.user else None + a2 = getAutomation2Interface(context.user, mandateId, instanceId) + wf = a2.getWorkflow(workflowId) + if not wf or not wf.get("graph"): + raise HTTPException(status_code=404, detail="Workflow not found") + inv = find_invocation(wf, entryPointId) + if not inv: + raise HTTPException(status_code=404, detail="Entry point not found") + if inv.get("kind") != "webhook": + raise HTTPException(status_code=400, detail="Entry point is not a webhook") + if not inv.get("enabled", True): + raise HTTPException(status_code=400, detail="Entry point is disabled") + cfg = inv.get("config") or {} + secret = cfg.get("webhookSecret") + if secret: + hdr = request.headers.get("X-Automation2-Webhook-Secret") or request.headers.get( + "X-Webhook-Secret" + ) + if hdr != str(secret): + raise HTTPException(status_code=403, detail="Invalid webhook secret") + + services = getAutomation2Services( + context.user, + mandateId=mandateId, + featureInstanceId=instanceId, + ) + from modules.workflows.processing.shared.methodDiscovery import discoverMethods + + discoverMethods(services) + + title = inv.get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + pl = body if isinstance(body, dict) else {} + base = default_run_envelope( + "webhook", + entry_point_id=inv.get("id"), + entry_point_label=label or None, + payload=pl, + raw={"httpBody": body}, + ) + run_env = normalize_run_envelope(base, user_id=userId) + + result = await executeGraph( + graph=wf["graph"], + services=services, + workflowId=workflowId, + instanceId=instanceId, + userId=userId, + mandateId=mandateId, + automation2_interface=a2, + run_envelope=run_env, + ) + return result + + +@router.post("/{instanceId}/workflows/{workflowId}/forms/{entryPointId}/submit") +@limiter.limit("60/minute") +async def post_workflow_form_submit( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + workflowId: str = Path(..., description="Workflow ID"), + entryPointId: str = Path(..., description="Entry point ID (kind must be form)"), + body: dict = Body(default_factory=dict), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Form-style submit: same as execute with trigger.type form and payload from body.""" + mandateId = _validateInstanceAccess(instanceId, context) + userId = str(context.user.id) if context.user else None + a2 = getAutomation2Interface(context.user, mandateId, instanceId) + wf = a2.getWorkflow(workflowId) + if not wf or not wf.get("graph"): + raise HTTPException(status_code=404, detail="Workflow not found") + inv = find_invocation(wf, entryPointId) + if not inv: + raise HTTPException(status_code=404, detail="Entry point not found") + if inv.get("kind") != "form": + raise HTTPException(status_code=400, detail="Entry point is not a form") + if not inv.get("enabled", True): + raise HTTPException(status_code=400, detail="Entry point is disabled") + + services = getAutomation2Services( + context.user, + mandateId=mandateId, + featureInstanceId=instanceId, + ) + from modules.workflows.processing.shared.methodDiscovery import discoverMethods + + discoverMethods(services) + + title = inv.get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + pl = body if isinstance(body, dict) else {} + base = default_run_envelope( + "form", + entry_point_id=inv.get("id"), + entry_point_label=label or None, + payload=pl, + raw={"formBody": body}, + ) + run_env = normalize_run_envelope(base, user_id=userId) + + result = await executeGraph( + graph=wf["graph"], + services=services, + workflowId=workflowId, + instanceId=instanceId, + userId=userId, + mandateId=mandateId, + automation2_interface=a2, + run_envelope=run_env, + ) + return result + + # ------------------------------------------------------------------------- # Runs and Resume # ------------------------------------------------------------------------- +@router.get("/{instanceId}/runs/completed") +@limiter.limit("60/minute") +def get_completed_runs( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + limit: int = Query(20, ge=1, le=50), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Get recently completed runs with output (for Tasks page output section).""" + mandateId = _validateInstanceAccess(instanceId, context) + a2 = getAutomation2Interface(context.user, mandateId, instanceId) + runs = a2.getRecentCompletedRuns(limit=limit) + return {"runs": runs} + + @router.get("/{instanceId}/workflows/{workflowId}/runs") @limiter.limit("60/minute") def get_workflow_runs( diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 6b8c529b..6a3500ce 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -162,6 +162,7 @@ _SOURCE_TYPE_TO_SERVICE = { "googleDriveFolder": "drive", "gmailFolder": "gmail", "ftpFolder": "files", + "clickupList": "clickup", } diff --git a/modules/routes/routeClickup.py b/modules/routes/routeClickup.py new file mode 100644 index 00000000..1603fa23 --- /dev/null +++ b/modules/routes/routeClickup.py @@ -0,0 +1,288 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp API routes — teams, hierarchy, lists, tasks (connection-scoped).""" + +import logging +from typing import Any, Dict, Optional + +from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request, status +from pydantic import BaseModel + +from modules.auth import getCurrentUser, limiter +from modules.datamodels.datamodelUam import AuthAuthority, User, UserConnection +from modules.interfaces.interfaceDbApp import getInterface +from modules.serviceHub import getInterface as getServices + +logger = logging.getLogger(__name__) + +router = APIRouter( + prefix="/api/clickup", + tags=["ClickUp"], + responses={ + 404: {"description": "Not found"}, + 400: {"description": "Bad request"}, + 401: {"description": "Unauthorized"}, + 500: {"description": "Internal server error"}, + }, +) + + +def _getUserConnection(interface, connection_id: str, user_id: str) -> Optional[UserConnection]: + try: + connections = interface.getUserConnections(user_id) + for conn in connections: + if conn.id == connection_id: + return conn + return None + except Exception as e: + logger.error(f"Error getting user connection: {e}") + return None + + +def _clickup_connection_or_404(interface, connection_id: str, user_id: str) -> UserConnection: + connection = _getUserConnection(interface, connection_id, user_id) + if not connection: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Connection not found") + authority = connection.authority.value if hasattr(connection.authority, "value") else str(connection.authority) + if authority.lower() != AuthAuthority.CLICKUP.value: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Connection is not a ClickUp connection", + ) + return connection + + +def _svc_for_connection(current_user: User, connection: UserConnection): + services = getServices(current_user, None) + if not services.clickup.setAccessTokenFromConnection(connection): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Failed to set ClickUp access token", + ) + return services.clickup + + +# --- Routes (prefix is /api/clickup; OAuth lives under /api/clickup/auth/* in routeSecurityClickup) --- + + +@router.get("/{connectionId}/teams", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def get_teams( + request: Request, + connectionId: str = Path(..., description="ClickUp UserConnection id"), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getAuthorizedTeams() + + +@router.get("/{connectionId}/teams/{teamId}", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_team( + request: Request, + connectionId: str = Path(...), + teamId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Workspace/team details including members (for assignee pickers).""" + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getTeam(teamId) + + +@router.get("/{connectionId}/teams/{teamId}/spaces", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_spaces( + request: Request, + connectionId: str = Path(...), + teamId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getSpaces(teamId) + + +@router.get("/{connectionId}/spaces/{spaceId}/folders", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_folders( + request: Request, + connectionId: str = Path(...), + spaceId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getFolders(spaceId) + + +@router.get("/{connectionId}/spaces/{spaceId}/lists", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_folderless_lists( + request: Request, + connectionId: str = Path(...), + spaceId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getFolderlessLists(spaceId) + + +@router.get("/{connectionId}/folders/{folderId}/lists", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_lists_in_folder( + request: Request, + connectionId: str = Path(...), + folderId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getListsInFolder(folderId) + + +@router.get("/{connectionId}/lists/{listId}", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_list( + request: Request, + connectionId: str = Path(...), + listId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getList(listId) + + +@router.get("/{connectionId}/lists/{listId}/fields", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_list_fields( + request: Request, + connectionId: str = Path(...), + listId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getListFields(listId) + + +@router.get("/{connectionId}/lists/{listId}/tasks", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_list_tasks( + request: Request, + connectionId: str = Path(...), + listId: str = Path(...), + page: int = Query(0), + include_closed: bool = Query(False), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getTasksInList(listId, page=page, include_closed=include_closed) + + +class TaskCreateBody(BaseModel): + body: Dict[str, Any] + + +@router.post("/{connectionId}/lists/{listId}/tasks", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def create_list_task( + request: Request, + payload: TaskCreateBody, + connectionId: str = Path(...), + listId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.createTask(listId, payload.body) + + +class TaskUpdateBody(BaseModel): + body: Dict[str, Any] + + +@router.get("/{connectionId}/tasks/{taskId}", response_model=Dict[str, Any]) +@limiter.limit("60/minute") +async def get_task( + request: Request, + connectionId: str = Path(...), + taskId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getTask(taskId) + + +@router.put("/{connectionId}/tasks/{taskId}", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def update_task( + request: Request, + payload: TaskUpdateBody, + connectionId: str = Path(...), + taskId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.updateTask(taskId, payload.body) + + +@router.delete("/{connectionId}/tasks/{taskId}", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def delete_task( + request: Request, + connectionId: str = Path(...), + taskId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.deleteTask(taskId) + + +@router.get("/{connectionId}/teams/{teamId}/tasks/search", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def search_team_tasks( + request: Request, + connectionId: str = Path(...), + teamId: str = Path(...), + query: str = Query(..., description="Search query"), + page: int = Query(0), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.searchTeamTasks(teamId, query=query, page=page) + + +@router.get("/{connectionId}/user", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def get_authorized_user( + request: Request, + connectionId: str = Path(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + interface = getInterface(currentUser) + conn = _clickup_connection_or_404(interface, connectionId, currentUser.id) + cu = _svc_for_connection(currentUser, conn) + return await cu.getAuthorizedUser() diff --git a/modules/routes/routeDataConnections.py b/modules/routes/routeDataConnections.py index 17ef0115..d01992c5 100644 --- a/modules/routes/routeDataConnections.py +++ b/modules/routes/routeDataConnections.py @@ -112,7 +112,8 @@ def get_auth_authority_options( authorityLabels = { "local": "Local", "google": "Google", - "msft": "Microsoft" + "msft": "Microsoft", + "clickup": "ClickUp", } return [ {"value": auth.value, "label": authorityLabels.get(auth.value, auth.value)} @@ -347,7 +348,8 @@ def create_connection( # Map type to authority authority_map = { 'msft': AuthAuthority.MSFT, - 'google': AuthAuthority.GOOGLE + 'google': AuthAuthority.GOOGLE, + 'clickup': AuthAuthority.CLICKUP, } authority = authority_map.get(connection_data.get('type')) @@ -493,6 +495,8 @@ def connect_service( auth_url = f"/api/msft/auth/connect?connectionId={quote(connectionId, safe='')}" elif connection.authority == AuthAuthority.GOOGLE: auth_url = f"/api/google/auth/connect?connectionId={quote(connectionId, safe='')}" + elif connection.authority == AuthAuthority.CLICKUP: + auth_url = f"/api/clickup/auth/connect?connectionId={quote(connectionId, safe='')}" else: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, diff --git a/modules/routes/routeSecurityClickup.py b/modules/routes/routeSecurityClickup.py new file mode 100644 index 00000000..3d1aeed5 --- /dev/null +++ b/modules/routes/routeSecurityClickup.py @@ -0,0 +1,280 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp OAuth for data connections (UserConnection + Token).""" + +from fastapi import APIRouter, HTTPException, Request, status, Depends, Query +from fastapi.responses import HTMLResponse, RedirectResponse +import logging +import json +import time +from typing import Dict, Any +from urllib.parse import urlencode +import httpx +from jose import jwt as jose_jwt +from jose import JWTError + +from modules.shared.configuration import APP_CONFIG +from modules.interfaces.interfaceDbApp import getInterface, getRootInterface +from modules.datamodels.datamodelUam import AuthAuthority, User, ConnectionStatus, UserConnection +from modules.datamodels.datamodelSecurity import Token, TokenPurpose +from modules.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM +from modules.shared.timeUtils import createExpirationTimestamp, getUtcTimestamp + +logger = logging.getLogger(__name__) + +_FLOW_CONNECT = "clickup_connect" + +CLICKUP_AUTH_BASE = "https://app.clickup.com/api" +CLICKUP_API_BASE = "https://api.clickup.com/api/v2" + +CLIENT_ID = APP_CONFIG.get("Service_CLICKUP_CLIENT_ID") +CLIENT_SECRET = APP_CONFIG.get("Service_CLICKUP_CLIENT_SECRET") +REDIRECT_URI = APP_CONFIG.get("Service_CLICKUP_OAUTH_REDIRECT_URI") + +# ClickUp states OAuth access tokens do not expire today; store a long horizon for DB status. +_CLICKUP_TOKEN_EXPIRES_IN_SEC = 10 * 365 * 24 * 3600 + + +def _issue_oauth_state(claims: Dict[str, Any]) -> str: + body = {**claims, "exp": int(time.time()) + 600} + return jose_jwt.encode(body, SECRET_KEY, algorithm=ALGORITHM) + + +def _parse_oauth_state(state: str) -> Dict[str, Any]: + try: + return jose_jwt.decode(state, SECRET_KEY, algorithms=[ALGORITHM]) + except JWTError as e: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid OAuth state: {e}" + ) from e + + +def _require_clickup_config(): + if not CLIENT_ID or not CLIENT_SECRET or not REDIRECT_URI: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="ClickUp OAuth is not configured (Service_CLICKUP_CLIENT_ID, Service_CLICKUP_CLIENT_SECRET, Service_CLICKUP_OAUTH_REDIRECT_URI)", + ) + + +router = APIRouter( + prefix="/api/clickup", + tags=["Security ClickUp"], + responses={ + 404: {"description": "Not found"}, + 400: {"description": "Bad request"}, + 401: {"description": "Unauthorized"}, + 500: {"description": "Internal server error"}, + }, +) + + +@router.get("/auth/connect") +@limiter.limit("5/minute") +def auth_connect( + request: Request, + connectionId: str = Query(..., description="UserConnection id"), + currentUser: User = Depends(getCurrentUser), +) -> RedirectResponse: + """Start ClickUp OAuth for an existing connection (requires gateway session).""" + try: + _require_clickup_config() + interface = getInterface(currentUser) + connections = interface.getUserConnections(currentUser.id) + connection = None + for conn in connections: + if conn.id == connectionId and conn.authority == AuthAuthority.CLICKUP: + connection = conn + break + if not connection: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="ClickUp connection not found") + + state_jwt = _issue_oauth_state( + { + "flow": _FLOW_CONNECT, + "connectionId": connectionId, + "userId": str(currentUser.id), + } + ) + query = urlencode( + { + "client_id": CLIENT_ID, + "redirect_uri": REDIRECT_URI, + "state": state_jwt, + } + ) + auth_url = f"{CLICKUP_AUTH_BASE}?{query}" + return RedirectResponse(auth_url) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error initiating ClickUp connect: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Failed to initiate ClickUp connect: {str(e)}", + ) + + +@router.get("/auth/connect/callback") +async def auth_connect_callback( + code: str = Query(...), + state: str = Query(...), +) -> HTMLResponse: + """OAuth callback for ClickUp data connection.""" + state_data = _parse_oauth_state(state) + if state_data.get("flow") != _FLOW_CONNECT: + raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback") + connection_id = state_data.get("connectionId") + user_id = state_data.get("userId") + if not connection_id or not user_id: + raise HTTPException(status_code=400, detail="Missing connection or user in OAuth state") + + _require_clickup_config() + + async with httpx.AsyncClient() as client: + token_resp = await client.post( + f"{CLICKUP_API_BASE}/oauth/token", + json={ + "client_id": CLIENT_ID, + "client_secret": CLIENT_SECRET, + "code": code, + }, + headers={"Content-Type": "application/json"}, + timeout=30.0, + ) + if token_resp.status_code != 200: + logger.error(f"ClickUp token exchange failed: {token_resp.status_code} {token_resp.text}") + return HTMLResponse( + content=f"

Connection Failed

{token_resp.text}

", + status_code=400, + ) + token_json = token_resp.json() + access_token = token_json.get("access_token") + if not access_token: + return HTMLResponse( + content="

Connection Failed

No access token.

", + status_code=400, + ) + + async with httpx.AsyncClient() as client: + user_resp = await client.get( + f"{CLICKUP_API_BASE}/user", + headers={ + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/json", + }, + timeout=30.0, + ) + if user_resp.status_code != 200: + logger.error(f"ClickUp user failed: {user_resp.status_code} {user_resp.text}") + return HTMLResponse( + content="

Connection Failed

Could not load ClickUp user.

", + status_code=400, + ) + user_payload = user_resp.json() + cu_user = user_payload.get("user") or {} + + rootInterface = getRootInterface() + user = rootInterface.getUser(user_id) + if not user: + return HTMLResponse( + content=""" + + """, + status_code=404, + ) + + interface = getInterface(user) + connections = interface.getUserConnections(user_id) + connection = None + for conn in connections: + if conn.id == connection_id: + connection = conn + break + if not connection: + return HTMLResponse( + content=""" + + """, + status_code=404, + ) + + ext_id = str(cu_user.get("id", "")) if cu_user.get("id") is not None else "" + username = cu_user.get("username") or cu_user.get("email") or ext_id + email = cu_user.get("email") + + expires_at = createExpirationTimestamp(_CLICKUP_TOKEN_EXPIRES_IN_SEC) + + try: + connection.status = ConnectionStatus.ACTIVE + connection.lastChecked = getUtcTimestamp() + connection.expiresAt = expires_at + connection.externalId = ext_id + connection.externalUsername = username + if email: + connection.externalEmail = email + connection.grantedScopes = None + rootInterface.db.recordModify(UserConnection, connection_id, connection.model_dump()) + + token = Token( + userId=user.id, + authority=AuthAuthority.CLICKUP, + connectionId=connection_id, + tokenPurpose=TokenPurpose.DATA_CONNECTION, + tokenAccess=access_token, + tokenRefresh=None, + tokenType="bearer", + expiresAt=expires_at, + createdAt=getUtcTimestamp(), + ) + interface.saveConnectionToken(token) + + return HTMLResponse( + content=f""" + + Connection Successful + + + + + """ + ) + except Exception as e: + logger.error(f"Error updating ClickUp connection: {str(e)}", exc_info=True) + return HTMLResponse( + content=f""" + + """, + status_code=500, + ) diff --git a/modules/serviceCenter/registry.py b/modules/serviceCenter/registry.py index cdf57304..851e4894 100644 --- a/modules/serviceCenter/registry.py +++ b/modules/serviceCenter/registry.py @@ -63,6 +63,13 @@ IMPORTABLE_SERVICES: Dict[str, Dict[str, Any]] = { "objectKey": "service.sharepoint", "label": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"}, }, + "clickup": { + "module": "modules.serviceCenter.services.serviceClickup.mainServiceClickup", + "class": "ClickupService", + "dependencies": ["security"], + "objectKey": "service.clickup", + "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}, + }, "chat": { "module": "modules.serviceCenter.services.serviceChat.mainServiceChat", "class": "ChatService", diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 78c69ff3..d65009e6 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -1552,6 +1552,7 @@ def _registerCoreTools(registry: ToolRegistry, services): "googleDriveFolder": "drive", "gmailFolder": "gmail", "ftpFolder": "files", + "clickupList": "clickup", } async def _resolveDataSource(dsId: str): diff --git a/modules/serviceCenter/services/serviceClickup/__init__.py b/modules/serviceCenter/services/serviceClickup/__init__.py new file mode 100644 index 00000000..6b3bb1f3 --- /dev/null +++ b/modules/serviceCenter/services/serviceClickup/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp service.""" + +from .mainServiceClickup import ClickupService, clickup_authorization_header + +__all__ = ["ClickupService", "clickup_authorization_header"] diff --git a/modules/serviceCenter/services/serviceClickup/mainServiceClickup.py b/modules/serviceCenter/services/serviceClickup/mainServiceClickup.py new file mode 100644 index 00000000..6093e1bd --- /dev/null +++ b/modules/serviceCenter/services/serviceClickup/mainServiceClickup.py @@ -0,0 +1,223 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp API service (OAuth or personal token via UserConnection).""" + +import json +import logging +import asyncio +from typing import Any, Callable, Dict, List, Optional, Union + +import aiohttp + +logger = logging.getLogger(__name__) + +_CLICKUP_API_BASE = "https://api.clickup.com/api/v2" + + +def clickup_authorization_header(token: str) -> str: + """ClickUp: personal tokens are `pk_...` without Bearer; OAuth uses Bearer.""" + t = (token or "").strip() + if t.startswith("pk_"): + return t + return f"Bearer {t}" + + +class ClickupService: + """ClickUp REST API v2 — teams, hierarchy, lists as tables (tasks + custom fields).""" + + def __init__(self, context, get_service: Callable[[str], Any]): + self._context = context + self._get_service = get_service + self.accessToken: Optional[str] = None + + def setAccessTokenFromConnection(self, userConnection) -> bool: + """Load OAuth/personal token from SecurityService for this UserConnection.""" + try: + if not userConnection: + logger.error("UserConnection is required to set access token") + return False + if isinstance(userConnection, dict): + connection_id = userConnection.get("id") + else: + connection_id = getattr(userConnection, "id", None) + if not connection_id: + logger.error("UserConnection must have an 'id' field") + return False + security = self._get_service("security") + if not security: + logger.error("Security service not available for token access") + return False + token = security.getFreshToken(connection_id) + if not token: + logger.error(f"No token found for connection {connection_id}") + return False + self.accessToken = token.tokenAccess + return True + except Exception as e: + logger.error(f"Error setting ClickUp access token: {e}") + return False + + def setAccessToken(self, token: str) -> None: + """Set token directly (e.g. connector adapter).""" + self.accessToken = token + + async def _request( + self, + method: str, + path: str, + *, + params: Optional[Dict[str, Any]] = None, + json_body: Optional[Dict[str, Any]] = None, + data: Optional[aiohttp.FormData] = None, + ) -> Union[Dict[str, Any], List[Any], bytes, None]: + if not self.accessToken: + return {"error": "Access token is not set. Call setAccessTokenFromConnection first."} + url = f"{_CLICKUP_API_BASE}/{path.lstrip('/')}" + headers: Dict[str, str] = { + "Authorization": clickup_authorization_header(self.accessToken), + } + if json_body is not None: + headers["Content-Type"] = "application/json" + + timeout = aiohttp.ClientTimeout(total=60) + try: + async with aiohttp.ClientSession(timeout=timeout) as session: + kwargs: Dict[str, Any] = {"headers": headers, "params": params} + if json_body is not None: + kwargs["json"] = json_body + if data is not None: + kwargs["data"] = data + + async with session.request(method.upper(), url, **kwargs) as resp: + if resp.status == 204: + return {} + text = await resp.text() + if resp.status >= 400: + # 404 on GET is common (wrong id / preview) — avoid ERROR noise in logs + log = logger.warning if resp.status == 404 else logger.error + log(f"ClickUp API {method} {url} -> {resp.status}: {text[:500]}") + return {"error": f"HTTP {resp.status}", "body": text} + if not text: + return {} + try: + return json.loads(text) + except Exception: + return {"raw": text} + except asyncio.TimeoutError: + return {"error": f"ClickUp API timeout: {path}"} + except Exception as e: + logger.error(f"ClickUp API error: {e}") + return {"error": str(e)} + + async def requestRaw( + self, + method: str, + path: str, + *, + params: Optional[Dict[str, Any]] = None, + json_body: Optional[Dict[str, Any]] = None, + ) -> Union[Dict[str, Any], List[Any], None]: + """Escape hatch: call any v2 path under /api/v2 (path without leading /api/v2).""" + return await self._request(method, path, params=params, json_body=json_body) + + # --- Teams / user --- + + async def getAuthorizedUser(self) -> Dict[str, Any]: + return await self._request("GET", "/user") + + async def getAuthorizedTeams(self) -> Dict[str, Any]: + return await self._request("GET", "/team") + + async def getTeam(self, team_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/team/{team_id}") + + # --- Hierarchy --- + + async def getSpaces(self, team_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/team/{team_id}/space") + + async def getSpace(self, space_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/space/{space_id}") + + async def getFolders(self, space_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/space/{space_id}/folder") + + async def getFolder(self, folder_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/folder/{folder_id}") + + async def getListsInFolder(self, folder_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/folder/{folder_id}/list") + + async def getFolderlessLists(self, space_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/space/{space_id}/list") + + async def getList(self, list_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/list/{list_id}") + + async def getListFields(self, list_id: str) -> Dict[str, Any]: + return await self._request("GET", f"/list/{list_id}/field") + + # --- Tasks (rows) --- + + async def getTasksInList( + self, + list_id: str, + *, + page: int = 0, + include_closed: bool = False, + subtasks: bool = True, + ) -> Dict[str, Any]: + params: Dict[str, Any] = { + "page": page, + "subtasks": str(subtasks).lower(), + "include_closed": str(include_closed).lower(), + } + return await self._request("GET", f"/list/{list_id}/task", params=params) + + async def getTask(self, task_id: str, *, include_subtasks: bool = True) -> Dict[str, Any]: + params = {"include_subtasks": str(include_subtasks).lower()} + return await self._request("GET", f"/task/{task_id}", params=params) + + async def createTask(self, list_id: str, body: Dict[str, Any]) -> Dict[str, Any]: + return await self._request("POST", f"/list/{list_id}/task", json_body=body) + + async def updateTask(self, task_id: str, body: Dict[str, Any]) -> Dict[str, Any]: + return await self._request("PUT", f"/task/{task_id}", json_body=body) + + async def deleteTask(self, task_id: str) -> Dict[str, Any]: + return await self._request("DELETE", f"/task/{task_id}") + + async def searchTeamTasks( + self, + team_id: str, + *, + query: str, + page: int = 0, + ) -> Dict[str, Any]: + """Search tasks in a workspace (team).""" + params = {"query": query, "page": page} + return await self._request("GET", f"/team/{team_id}/task", params=params) + + async def uploadTaskAttachment(self, task_id: str, file_bytes: bytes, file_name: str) -> Dict[str, Any]: + """Upload a file attachment to a task (multipart).""" + if not self.accessToken: + return {"error": "Access token is not set."} + url = f"{_CLICKUP_API_BASE}/task/{task_id}/attachment" + headers = {"Authorization": clickup_authorization_header(self.accessToken)} + data = aiohttp.FormData() + data.add_field( + "attachment", + file_bytes, + filename=file_name, + content_type="application/octet-stream", + ) + timeout = aiohttp.ClientTimeout(total=120) + try: + async with aiohttp.ClientSession(timeout=timeout) as session: + async with session.post(url, headers=headers, data=data) as resp: + text = await resp.text() + if resp.status >= 400: + return {"error": f"HTTP {resp.status}", "body": text} + return json.loads(text) if text else {} + except Exception as e: + return {"error": str(e)} diff --git a/modules/serviceCenter/services/serviceGeneration/renderers/rendererDocx.py b/modules/serviceCenter/services/serviceGeneration/renderers/rendererDocx.py index 733b9ade..7a1277ca 100644 --- a/modules/serviceCenter/services/serviceGeneration/renderers/rendererDocx.py +++ b/modules/serviceCenter/services/serviceGeneration/renderers/rendererDocx.py @@ -125,9 +125,12 @@ class RendererDocx(BaseRenderer): self.logger.debug(f"_generateDocxFromJson: Document created in {time.time() - start_time:.2f}s") # Get style set: use styles from metadata if available, otherwise enhance with AI + template_from_metadata = None + if json_content and isinstance(json_content.get("metadata"), dict): + template_from_metadata = json_content["metadata"].get("templateName") style_start = time.time() self.logger.debug("_generateDocxFromJson: About to get style set") - styleSet = await self._getStyleSet(json_content, userPrompt, aiService) + styleSet = await self._getStyleSet(json_content, userPrompt, aiService, templateName=template_from_metadata) self.logger.debug(f"_generateDocxFromJson: Style set retrieved in {time.time() - style_start:.2f}s") # Setup basic document styles and create all styles from style set diff --git a/modules/serviceCenter/services/serviceGeneration/renderers/rendererText.py b/modules/serviceCenter/services/serviceGeneration/renderers/rendererText.py index 2d0cc8d2..15a7161c 100644 --- a/modules/serviceCenter/services/serviceGeneration/renderers/rendererText.py +++ b/modules/serviceCenter/services/serviceGeneration/renderers/rendererText.py @@ -4,6 +4,8 @@ Text renderer for report generation. """ +import re + from .documentRendererBaseTemplate import BaseRenderer from modules.datamodels.datamodelDocument import RenderedDocument from typing import Dict, Any, List, Optional @@ -93,9 +95,13 @@ class RendererText(BaseRenderer): metadata = extractedContent.get("metadata", {}) if extractedContent else {} documentType = metadata.get("documentType") if isinstance(metadata, dict) else None + # UTF-8 BOM helps editors/browsers recognize encoding (fixes grössten → grössten) + text_bytes = textContent.encode('utf-8') + if not text_bytes.startswith(b'\xef\xbb\xbf'): + text_bytes = b'\xef\xbb\xbf' + text_bytes return [ RenderedDocument( - documentData=textContent.encode('utf-8'), + documentData=text_bytes, mimeType="text/plain", filename=filename, documentType=documentType, @@ -276,7 +282,7 @@ class RendererText(BaseRenderer): return "" def _renderJsonBulletList(self, listData: Dict[str, Any]) -> str: - """Render a JSON bullet list to text.""" + """Render a JSON bullet list to text. Strips markdown from item text.""" try: # Extract from nested content structure: element.content.{items} content = listData.get("content", {}) @@ -290,9 +296,9 @@ class RendererText(BaseRenderer): textParts = [] for item in items: if isinstance(item, str): - textParts.append(f"- {item}") + textParts.append(f"- {self._stripMarkdownForPlainText(item)}") elif isinstance(item, dict) and "text" in item: - textParts.append(f"- {item['text']}") + textParts.append(f"- {self._stripMarkdownForPlainText(item['text'])}") return '\n'.join(textParts) @@ -301,13 +307,13 @@ class RendererText(BaseRenderer): return "" def _renderJsonHeading(self, headingData: Dict[str, Any]) -> str: - """Render a JSON heading to text.""" + """Render a JSON heading to text. Strips markdown from heading text.""" try: # Extract from nested content structure: element.content.{text, level} content = headingData.get("content", {}) if not isinstance(content, dict): return "" - text = content.get("text", "") + text = self._stripMarkdownForPlainText(content.get("text", "")) level = content.get("level", 1) if text: @@ -325,8 +331,22 @@ class RendererText(BaseRenderer): self.logger.warning(f"Error rendering heading: {str(e)}") return "" + def _stripMarkdownForPlainText(self, text: str) -> str: + """Strip markdown formatting for plain text output (**bold** -> bold, *italic* -> italic).""" + if not text: + return "" + # **bold** and __bold__ -> plain + text = re.sub(r'\*\*(.+?)\*\*', r'\1', text) + text = re.sub(r'__(.+?)__', r'\1', text) + # *italic* and _italic_ -> plain + text = re.sub(r'(? plain + text = re.sub(r'`([^`]+)`', r'\1', text) + return text.strip() + def _renderJsonParagraph(self, paragraphData: Dict[str, Any]) -> str: - """Render a JSON paragraph to text.""" + """Render a JSON paragraph to text. Strips markdown for plain text output.""" try: # Extract from nested content structure content = paragraphData.get("content", {}) @@ -336,7 +356,7 @@ class RendererText(BaseRenderer): text = content else: text = "" - return text if text else "" + return self._stripMarkdownForPlainText(text) if text else "" except Exception as e: self.logger.warning(f"Error rendering paragraph: {str(e)}") diff --git a/modules/serviceCenter/services/serviceGeneration/subDocumentUtility.py b/modules/serviceCenter/services/serviceGeneration/subDocumentUtility.py index 329f09f6..8a3e7cea 100644 --- a/modules/serviceCenter/services/serviceGeneration/subDocumentUtility.py +++ b/modules/serviceCenter/services/serviceGeneration/subDocumentUtility.py @@ -3,10 +3,155 @@ import json import logging import os +import re from typing import Any, Dict logger = logging.getLogger(__name__) + +def markdownToDocumentJson(markdown: str, title: str, language: str = "de") -> Dict[str, Any]: + """ + Convert markdown content to the standard document JSON format expected by renderReport. + Supports headings, code blocks, tables, lists, images (file: refs), paragraphs. + For plain text: wraps entire content in a single paragraph section. + """ + if not isinstance(markdown, str): + markdown = str(markdown) if markdown else "" + + sections = [] + order = 0 + lines = markdown.split("\n") + i = 0 + + def _nextId(): + nonlocal order + order += 1 + return f"s_{order}" + + while i < len(lines): + line = lines[i] + + # Headings + headingMatch = re.match(r"^(#{1,6})\s+(.+)", line) + if headingMatch: + level = len(headingMatch.group(1)) + text = headingMatch.group(2).strip() + sections.append({ + "id": _nextId(), "content_type": "heading", "order": order, + "elements": [{"content": {"text": text, "level": level}}], + }) + i += 1 + continue + + # Fenced code blocks + codeMatch = re.match(r"^```(\w*)", line) + if codeMatch: + lang = codeMatch.group(1) or "text" + codeLines = [] + i += 1 + while i < len(lines) and not lines[i].startswith("```"): + codeLines.append(lines[i]) + i += 1 + i += 1 + sections.append({ + "id": _nextId(), "content_type": "code_block", "order": order, + "elements": [{"content": {"code": "\n".join(codeLines), "language": lang}}], + }) + continue + + # Tables + tableMatch = re.match(r"^\|(.+)\|$", line) + if tableMatch and (i + 1) < len(lines) and re.match(r"^\|[\s\-:|]+\|$", lines[i + 1]): + headerCells = [c.strip() for c in tableMatch.group(1).split("|")] + i += 2 + rows = [] + while i < len(lines) and re.match(r"^\|(.+)\|$", lines[i]): + rowCells = [c.strip() for c in lines[i][1:-1].split("|")] + rows.append(rowCells) + i += 1 + sections.append({ + "id": _nextId(), "content_type": "table", "order": order, + "elements": [{"content": {"headers": headerCells, "rows": rows}}], + }) + continue + + # Bullet / numbered lists + listMatch = re.match(r"^(\s*)([-*+]|\d+[.)]) (.+)", line) + if listMatch: + isNumbered = bool(re.match(r"\d+[.)]", listMatch.group(2))) + items = [] + while i < len(lines) and re.match(r"^(\s*)([-*+]|\d+[.)]) (.+)", lines[i]): + m = re.match(r"^(\s*)([-*+]|\d+[.)]) (.+)", lines[i]) + items.append({"text": m.group(3).strip()}) + i += 1 + sections.append({ + "id": _nextId(), "content_type": "bullet_list", "order": order, + "elements": [{"content": {"items": items, "list_type": "numbered" if isNumbered else "bullet"}}], + }) + continue + + # Empty lines + if not line.strip(): + i += 1 + continue + + # Images (simplified: store as paragraph with ref for now - full resolution needs Knowledge Store) + imgMatch = re.match(r"^!\[([^\]]*)\]\(([^)]+)\)", line) + if imgMatch: + altText = imgMatch.group(1).strip() or "Image" + src = imgMatch.group(2).strip() + fileId = src[5:] if src.startswith("file:") else "" + sections.append({ + "id": _nextId(), "content_type": "image", "order": order, + "elements": [{ + "content": { + "altText": altText, + "base64Data": "", + "_fileRef": fileId, + "_srcUrl": src if not fileId else "", + } + }], + }) + i += 1 + continue + + # Paragraph + paraLines = [] + while i < len(lines) and lines[i].strip() and not re.match( + r"^(#{1,6}\s|```|\|.+\||!\[|(\s*)([-*+]|\d+[.)]) )", lines[i] + ): + paraLines.append(lines[i]) + i += 1 + if paraLines: + sections.append({ + "id": _nextId(), "content_type": "paragraph", "order": order, + "elements": [{"content": {"text": " ".join(paraLines)}}], + }) + continue + + i += 1 + + if not sections: + sections.append({ + "id": _nextId(), "content_type": "paragraph", "order": order, + "elements": [{"content": {"text": markdown.strip() or "(empty)"}}], + }) + + return { + "metadata": { + "split_strategy": "single_document", + "source_documents": [], + "extraction_method": "file_create_rendering", + "title": title, + "language": language, + }, + "documents": [{ + "id": "doc_1", + "title": title, + "sections": sections, + }], + } + def getFileExtension(fileName: str) -> str: """Extract file extension from fileName (without dot, lowercased).""" if '.' in fileName: diff --git a/modules/shared/eventManagement.py b/modules/shared/eventManagement.py index 3bb45af8..ebbf2131 100644 --- a/modules/shared/eventManagement.py +++ b/modules/shared/eventManagement.py @@ -1,5 +1,6 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. +import asyncio import logging from typing import Callable, Optional, Dict, Any @@ -25,14 +26,28 @@ class EventManagement: def __init__(self, timezone: str = "Europe/Zurich"): self._timezone = ZoneInfo(timezone) self._scheduler: Optional[AsyncIOScheduler] = None + self._event_loop: Optional[asyncio.AbstractEventLoop] = None + + def set_event_loop(self, loop: asyncio.AbstractEventLoop) -> None: + """Set the event loop for the scheduler (call from lifespan before start).""" + self._event_loop = loop @property def scheduler(self) -> AsyncIOScheduler: if self._scheduler is None: - self._scheduler = AsyncIOScheduler(timezone=self._timezone) + kwargs = {"timezone": self._timezone} + if self._event_loop is not None: + kwargs["event_loop"] = self._event_loop + self._scheduler = AsyncIOScheduler(**kwargs) return self._scheduler def start(self) -> None: + if self._event_loop is None: + try: + self._event_loop = asyncio.get_running_loop() + logger.debug("EventManagement: using get_running_loop()") + except RuntimeError: + pass if not self.scheduler.running: self.scheduler.start() logger.info("EventManagement scheduler started") @@ -90,10 +105,18 @@ class EventManagement: ) -> None: """ Register a job using IntervalTrigger. + Only passes non-None interval components (IntervalTrigger fails on None). """ - trigger = IntervalTrigger( - seconds=seconds, minutes=minutes, hours=hours, timezone=self._timezone - ) + trigger_kwargs: Dict[str, Any] = {"timezone": self._timezone} + if seconds is not None: + trigger_kwargs["seconds"] = seconds + if minutes is not None: + trigger_kwargs["minutes"] = minutes + if hours is not None: + trigger_kwargs["hours"] = hours + if len(trigger_kwargs) <= 1: + raise ValueError("At least one of seconds, minutes, hours must be provided") + trigger = IntervalTrigger(**trigger_kwargs) self.scheduler.add_job( func, trigger, diff --git a/modules/workflows/automation2/clickupTaskUpdateMerge.py b/modules/workflows/automation2/clickupTaskUpdateMerge.py new file mode 100644 index 00000000..a74cdaef --- /dev/null +++ b/modules/workflows/automation2/clickupTaskUpdateMerge.py @@ -0,0 +1,174 @@ +# Copyright (c) 2025 Patrick Motsch +# Merge clickup.updateTask node parameter taskUpdateEntries into taskUpdate JSON. + +import json +import logging +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional + +logger = logging.getLogger(__name__) + + +def _unwrap_value(v: Any) -> Any: + if isinstance(v, dict) and v.get("type") == "value" and "value" in v: + return v.get("value") + return v + + +def _unwrap_dynamic(v: Any) -> Any: + return _unwrap_value(v) + + +def _parse_int_list(val: Any) -> List[int]: + if val is None: + return [] + val = _unwrap_value(val) + if isinstance(val, str) and val.strip(): + try: + parsed = json.loads(val) + if isinstance(parsed, list): + return [int(x) for x in parsed if x is not None and str(x).strip() != ""] + except (json.JSONDecodeError, ValueError, TypeError): + return [] + if isinstance(val, list): + out: List[int] = [] + for x in val: + if x is None or (isinstance(x, str) and not x.strip()): + continue + try: + out.append(int(x)) + except (ValueError, TypeError): + continue + return out + return [] + + +def _parse_due_date_ms(v: Any) -> Optional[int]: + v = _unwrap_value(v) + if v is None or v == "": + return None + if isinstance(v, str) and len(v) >= 10 and v[4] == "-" and v[7] == "-": + try: + dt = datetime.strptime(v[:10], "%Y-%m-%d").replace(tzinfo=timezone.utc) + return int(dt.timestamp() * 1000) + except ValueError: + pass + try: + i = int(float(v)) + return i if i > 0 else None + except (ValueError, TypeError): + return None + + +def _parse_time_estimate_hours_to_ms(v: Any) -> Optional[int]: + v = _unwrap_value(v) + if v is None or v == "": + return None + try: + h = float(v) + if h < 0: + return None + return int(round(h * 3600 * 1000)) + except (ValueError, TypeError): + return None + + +def merge_clickup_task_update_entries(resolved_params: Dict[str, Any]) -> None: + """ + Pop taskUpdateEntries from resolved_params and merge into taskUpdate (dict or JSON string). + Existing taskUpdate (advanced JSON) is the base; entry rows override by key. + """ + entries = resolved_params.pop("taskUpdateEntries", None) + json_raw = resolved_params.get("taskUpdate") + base: Dict[str, Any] = {} + if isinstance(json_raw, str) and json_raw.strip(): + try: + parsed = json.loads(json_raw) + if isinstance(parsed, dict): + base = dict(parsed) + except json.JSONDecodeError: + logger.warning("clickup.updateTask: taskUpdate is not valid JSON, ignoring base") + elif isinstance(json_raw, dict): + base = dict(json_raw) + + if not isinstance(entries, list) or not entries: + if not base and json_raw not in (None, "", {}): + resolved_params["taskUpdate"] = json_raw + elif base: + resolved_params["taskUpdate"] = json.dumps(base, ensure_ascii=False) + return + + overlay: Dict[str, Any] = {} + custom_rows: List[Dict[str, Any]] = [] + + for row in entries: + if not isinstance(row, dict): + continue + fk = row.get("fieldKey") or row.get("field") + if fk is None: + continue + fk = str(fk).strip() + val = _unwrap_dynamic(row.get("value")) + + if fk == "custom_field": + cfid = _unwrap_dynamic(row.get("customFieldId")) + if not cfid or not str(cfid).strip(): + continue + if val is None or val == "": + continue + custom_rows.append({"id": str(cfid).strip(), "value": val}) + continue + + if fk == "name" and val is not None and str(val).strip(): + overlay["name"] = str(val).strip() + elif fk == "description": + overlay["description"] = "" if val is None else str(val) + elif fk == "status" and val is not None and str(val).strip(): + overlay["status"] = str(val).strip() + elif fk == "priority": + if val is None or val == "": + continue + try: + pi = int(float(val)) + if 1 <= pi <= 4: + overlay["priority"] = pi + except (ValueError, TypeError): + pass + elif fk == "due_date": + dms = _parse_due_date_ms(val) + if dms is not None: + overlay["due_date"] = dms + elif fk == "time_estimate_h": + tms = _parse_time_estimate_hours_to_ms(val) + if tms is not None: + overlay["time_estimate"] = tms + elif fk == "time_estimate_ms": + if val is None or val == "": + continue + try: + tms = int(float(val)) + if tms > 0: + overlay["time_estimate"] = tms + except (ValueError, TypeError): + pass + elif fk == "assignees": + ids = _parse_int_list(val) + if ids: + overlay["assignees"] = ids + else: + logger.debug("clickup.updateTask: unknown fieldKey %s", fk) + + merged = {**base, **overlay} + + if custom_rows: + by_id: Dict[str, Dict[str, Any]] = {} + existing = merged.get("custom_fields") + if isinstance(existing, list): + for x in existing: + if isinstance(x, dict) and x.get("id") is not None: + by_id[str(x["id"])] = x + for x in custom_rows: + by_id[str(x["id"])] = x + merged["custom_fields"] = list(by_id.values()) + + resolved_params["taskUpdate"] = json.dumps(merged, ensure_ascii=False) if merged else "" diff --git a/modules/workflows/automation2/executionEngine.py b/modules/workflows/automation2/executionEngine.py index 2e799707..3ab08992 100644 --- a/modules/workflows/automation2/executionEngine.py +++ b/modules/workflows/automation2/executionEngine.py @@ -11,18 +11,19 @@ from modules.workflows.automation2.graphUtils import ( validateGraph, topoSort, getInputSources, + getLoopBodyNodeIds, ) from modules.workflows.automation2.executors import ( TriggerExecutor, FlowExecutor, - DataExecutor, ActionNodeExecutor, InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError, ) from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES +from modules.workflows.automation2.runEnvelope import normalize_run_envelope logger = logging.getLogger(__name__) @@ -32,6 +33,38 @@ def _getNodeTypeIds(services: Any = None) -> Set[str]: return {n["id"] for n in STATIC_NODE_TYPES} +def _is_node_on_active_path( + nodeId: str, + connectionMap: Dict[str, List], + nodeOutputs: Dict[str, Any], +) -> bool: + """ + Return True if this node receives input only from active branches. + - flow.ifElse: only one output (0=yes, 1=no) is active; uses "branch". + - flow.switch: only one output (0, 1, 2, ...) is active; uses "match". + Nodes connected to inactive outputs must be skipped. + Also skip when a predecessor was skipped (not in nodeOutputs). + """ + for src, source_output, _ in connectionMap.get(nodeId, []): + out = nodeOutputs.get(src) + if out is None: + return False + if not isinstance(out, dict): + continue + branch = out.get("branch") + match = out.get("match") + active_output = None + if branch is not None: + active_output = branch + elif match is not None: + if match < 0: + return False # switch: no case matched, skip all downstream + active_output = match + if active_output is not None and source_output != active_output: + return False + return True + + def _getExecutor( nodeType: str, services: Any, @@ -42,9 +75,7 @@ def _getExecutor( return TriggerExecutor() if nodeType.startswith("flow."): return FlowExecutor() - if nodeType.startswith("data."): - return DataExecutor() - if nodeType.startswith("ai.") or nodeType.startswith("email.") or nodeType.startswith("sharepoint."): + if nodeType.startswith("ai.") or nodeType.startswith("email.") or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.") or nodeType.startswith("file."): return ActionNodeExecutor(services) if nodeType.startswith("input.") and automation2_interface: return InputExecutor(automation2_interface) @@ -62,12 +93,14 @@ async def executeGraph( initialNodeOutputs: Optional[Dict[str, Any]] = None, startAfterNodeId: Optional[str] = None, runId: Optional[str] = None, + run_envelope: Optional[Dict[str, Any]] = None, ) -> Dict[str, Any]: """ Execute automation2 graph. Returns { success, nodeOutputs, error?, stopped? }. When an input node is reached and automation2_interface is provided, creates a task, pauses the run, and returns { success: False, paused: True, taskId, runId }. For resume: pass initialNodeOutputs (with result for the human node) and startAfterNodeId. + For fresh runs: pass run_envelope (unified start payload for the start node); normalized with userId into context.runEnvelope. """ logger.info( "executeGraph start: instanceId=%s workflowId=%s userId=%s mandateId=%s resume=%s", @@ -122,6 +155,8 @@ async def executeGraph( runId = run.get("id") if run else None logger.info("executeGraph created run %s", runId) + env_for_run = normalize_run_envelope(run_envelope, user_id=userId) + context = { "workflowId": workflowId, "instanceId": instanceId, @@ -133,19 +168,76 @@ async def executeGraph( "services": services, "_runId": runId, "_orderedNodes": ordered, + "runEnvelope": env_for_run, } skip_until_passed = bool(startAfterNodeId) + processed_in_loop: Set[str] = set() + + # Check for loop resume: run was paused inside a loop, we're resuming for next iteration + run = automation2_interface.getRun(runId) if (runId and automation2_interface) else None + loop_resume_state = (run.get("context") or {}).get("_loopState") if run else None + if loop_resume_state and startAfterNodeId: + loop_node_id = loop_resume_state.get("loopNodeId") + next_index = loop_resume_state.get("currentIndex", -1) + 1 + items = loop_resume_state.get("items") or [] + body_ids = getLoopBodyNodeIds(loop_node_id, connectionMap) if loop_node_id else set() + body_ordered = [n for n in ordered if n.get("id") in body_ids] + processed_in_loop = set(body_ids) | {loop_node_id} if loop_node_id else set() + while next_index < len(items) and loop_node_id: + nodeOutputs[loop_node_id] = { + "items": items, + "count": len(items), + "currentItem": items[next_index], + "currentIndex": next_index, + } + context["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items} + for body_node in body_ordered: + bnid = body_node.get("id") + if not bnid or context.get("_stopped"): + break + if not _is_node_on_active_path(bnid, connectionMap, nodeOutputs): + continue + executor = _getExecutor(body_node.get("type", ""), services, automation2_interface) + if not executor: + nodeOutputs[bnid] = None + continue + try: + result = await executor.execute(body_node, context) + nodeOutputs[bnid] = result + logger.info("executeGraph loop resume body node %s done (iter %d)", bnid, next_index) + except PauseForHumanTaskError as e: + if automation2_interface: + run_ctx = dict(run.get("context") or {}) + run_ctx["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items} + automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) + return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)} + except Exception as ex: + logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex) + nodeOutputs[bnid] = {"error": str(ex), "success": False} + if runId and automation2_interface: + automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) + return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid} + next_index += 1 + if loop_node_id: + nodeOutputs[loop_node_id] = {"items": items, "count": len(items)} + processed_in_loop = set(body_ids) | {loop_node_id} + for i, node in enumerate(ordered): if skip_until_passed: if node.get("id") == startAfterNodeId: skip_until_passed = False continue + if node.get("id") in processed_in_loop: + continue if context.get("_stopped"): - logger.info("executeGraph stopped early (flow.stop) at step %d", i) + logger.info("executeGraph stopped early at step %d", i) break nodeId = node.get("id") nodeType = node.get("type", "") + if not _is_node_on_active_path(nodeId, connectionMap, nodeOutputs): + logger.info("executeGraph step %d/%d: nodeId=%s SKIP (inactive branch)", i + 1, len(ordered), nodeId) + continue executor = _getExecutor(nodeType, services, automation2_interface) logger.info( "executeGraph step %d/%d: nodeId=%s nodeType=%s executor=%s", @@ -160,14 +252,54 @@ async def executeGraph( logger.debug("executeGraph node %s: no executor, output=None", nodeId) continue try: - result = await executor.execute(node, context) - nodeOutputs[nodeId] = result - logger.info( - "executeGraph node %s done: result_type=%s result_keys=%s", - nodeId, - type(result).__name__, - list(result.keys()) if isinstance(result, dict) else "n/a", - ) + if nodeType == "flow.loop": + result = await executor.execute(node, context) + items = result.get("items") or [] + body_ids = getLoopBodyNodeIds(nodeId, connectionMap) + body_ordered = [n for n in ordered if n.get("id") in body_ids] + processed_in_loop.update(body_ids) + processed_in_loop.add(nodeId) + for idx, item in enumerate(items): + nodeOutputs[nodeId] = {"items": items, "count": len(items), "currentItem": item, "currentIndex": idx} + context["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items} + for body_node in body_ordered: + bnid = body_node.get("id") + if not bnid or context.get("_stopped"): + break + if not _is_node_on_active_path(bnid, connectionMap, nodeOutputs): + continue + bexec = _getExecutor(body_node.get("type", ""), services, automation2_interface) + if not bexec: + nodeOutputs[bnid] = None + continue + try: + bres = await bexec.execute(body_node, context) + nodeOutputs[bnid] = bres + logger.info("executeGraph loop body node %s done (iter %d)", bnid, idx) + except PauseForHumanTaskError as e: + if runId and automation2_interface: + run = automation2_interface.getRun(runId) or {} + run_ctx = dict(run.get("context") or {}) + run_ctx["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items} + automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) + return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)} + except Exception as ex: + logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex) + nodeOutputs[bnid] = {"error": str(ex), "success": False} + if runId and automation2_interface: + automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) + return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid} + nodeOutputs[nodeId] = {"items": items, "count": len(items)} + logger.info("executeGraph flow.loop done: %d iterations", len(items)) + else: + result = await executor.execute(node, context) + nodeOutputs[nodeId] = result + logger.info( + "executeGraph node %s done: result_type=%s result_keys=%s", + nodeId, + type(result).__name__, + list(result.keys()) if isinstance(result, dict) else "n/a", + ) except PauseForHumanTaskError as e: logger.info("executeGraph paused for human task %s", e.taskId) return { diff --git a/modules/workflows/automation2/executors/__init__.py b/modules/workflows/automation2/executors/__init__.py index c147a0d0..2b6768df 100644 --- a/modules/workflows/automation2/executors/__init__.py +++ b/modules/workflows/automation2/executors/__init__.py @@ -3,14 +3,12 @@ from .triggerExecutor import TriggerExecutor from .flowExecutor import FlowExecutor -from .dataExecutor import DataExecutor from .actionNodeExecutor import ActionNodeExecutor from .inputExecutor import InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError __all__ = [ "TriggerExecutor", "FlowExecutor", - "DataExecutor", "ActionNodeExecutor", "InputExecutor", "PauseForHumanTaskError", diff --git a/modules/workflows/automation2/executors/actionNodeExecutor.py b/modules/workflows/automation2/executors/actionNodeExecutor.py index 504fb34e..ab19964d 100644 --- a/modules/workflows/automation2/executors/actionNodeExecutor.py +++ b/modules/workflows/automation2/executors/actionNodeExecutor.py @@ -1,11 +1,31 @@ # Copyright (c) 2025 Patrick Motsch -# Action node executor - maps ai.*, email.*, sharepoint.* to method actions via ActionExecutor. +# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.* to method actions via ActionExecutor. +# +# Unified handover format for all nodes: +# - Node output: { success, error?, documents, documentList, data } – documents and documentList are identical +# - Input merge: downstream receives documents via _getDocumentsFromUpstream(inp) – reads documents or documentList +# - Incoming email handover: (context, documentList, reply_to, subject) via _formatEmailOutputAsContext / _unpackIncomingEmail +import json import logging +import re from typing import Dict, Any, List, Optional logger = logging.getLogger(__name__) +# UserConnection.id (UUID) when connectionId could not be mapped to connection:authority:username +_USER_CONNECTION_ID_RE = re.compile( + r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", + re.IGNORECASE, +) + + +def _is_user_connection_id(val: Any) -> bool: + if val is None or isinstance(val, (dict, list)): + return False + s = str(val).strip() + return bool(_USER_CONNECTION_ID_RE.match(s)) + def _getNodeDefinition(nodeType: str) -> Optional[Dict[str, Any]]: """Get node definition by type id for _method, _action, _paramMap.""" @@ -63,11 +83,12 @@ def _extractEmailContentFromUpstream(inp: Any) -> Optional[Dict[str, Any]]: """ Extract {subject, body, to} from upstream node output (e.g. AI node returning JSON). Expects JSON like {"subject": "...", "body": "...", "to": "..."} in documentData. + Uses unified handover: documents/documentList. """ if not inp: return None import json - docs = inp.get("documents", inp.get("documentList", [])) if isinstance(inp, dict) else [] + docs = _getDocumentsFromUpstream(inp) if not docs: return None doc = docs[0] if isinstance(docs, list) else docs @@ -92,15 +113,12 @@ def _extractContextFromUpstream(inp: Any) -> Optional[str]: Extract plain text context from upstream node output (e.g. AI node returning txt). Use when _extractEmailContentFromUpstream returns None – the generated document content (email body, summary, etc.) should be passed as context to email.draftEmail. + Uses unified handover: documents/documentList. """ if not inp: return None - docs = None - if isinstance(inp, dict): - docs = inp.get("documents") or inp.get("documentList") - if not docs and isinstance(inp.get("data"), dict): - docs = inp.get("data", {}).get("documents") - if not docs or not isinstance(docs, (list, tuple)): + docs = _getDocumentsFromUpstream(inp) + if not docs: return None doc = docs[0] if docs else None if not doc: @@ -114,6 +132,63 @@ def _extractContextFromUpstream(inp: Any) -> Optional[str]: return s if s else None +def _payloadToContext(payload: Any) -> Optional[str]: + """Convert payload (e.g. from form) to readable text for document context.""" + if payload is None: + return None + if isinstance(payload, str) and payload.strip(): + return payload.strip() + if isinstance(payload, dict): + try: + import json + return json.dumps(payload, ensure_ascii=False, indent=2) + except (TypeError, ValueError): + lines = [f"{k}: {v}" for k, v in payload.items()] + return "\n".join(lines) if lines else None + return str(payload).strip() if str(payload).strip() else None + + +def _getContextFromUpstream(out: Any) -> Optional[str]: + """ + Get context from upstream node output. Prefers explicit 'context' field; + falls back to documents/documentList (first doc's documentData), then payload. + Handles: AI (context), form (payload or top-level field dict), upload (document refs). + """ + if not out or not isinstance(out, dict): + return None + ctx = out.get("context") + if isinstance(ctx, str) and ctx.strip(): + return ctx.strip() + doc_ctx = _extractContextFromUpstream(out) + if doc_ctx: + return doc_ctx + payload = out.get("payload") + if payload is not None: + return _payloadToContext(payload) + if "documents" not in out and "documentList" not in out and "success" not in out: + return _payloadToContext(out) + return None + + +def _extractContextFromResult(result: Any) -> Optional[str]: + """ + Extract plain text context from ActionResult (ActionExecutor result). + Used to populate 'context' in unified output for AI nodes. + """ + if not result or not hasattr(result, "documents"): + return None + docs = result.documents or [] + if not docs: + return None + doc = docs[0] + raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None) + if not raw: + return None + if isinstance(raw, bytes): + return raw.decode("utf-8", errors="replace").strip() + return str(raw).strip() if str(raw).strip() else None + + def _gatherAttachmentDocumentsFromUpstream( nodeId: str, inputSources: Dict[str, Dict[int, tuple]], @@ -140,7 +215,7 @@ def _gatherAttachmentDocumentsFromUpstream( if srcType in ("sharepoint.downloadFile", "sharepoint.readFile"): if isinstance(out, dict): - for d in out.get("documents") or out.get("documentList") or []: + for d in _getDocumentsFromUpstream(out): if isinstance(d, dict) and (d.get("documentData") or (d.get("validationMetadata") or {}).get("fileId")): docs.append(d) elif hasattr(d, "documentData") or (getattr(d, "validationMetadata", None) or {}).get("fileId"): @@ -152,6 +227,62 @@ def _gatherAttachmentDocumentsFromUpstream( return docs +def _getDocumentsFromUpstream(out: Any) -> list: + """Unified: extract documents list from any node output. + Supports: documents, documentList, data.documents. + Also: input.upload result format { file, files, fileIds } - converts to doc refs with validationMetadata.fileId. + """ + if not out or not isinstance(out, dict): + return [] + docs = out.get("documents") or out.get("documentList") + if not docs and isinstance(out.get("data"), dict): + docs = out.get("data", {}).get("documents") or out.get("data", {}).get("documentList") + if not docs: + # input.upload task result: { file: {id, fileName}, files: [...], fileIds: [...] } + def _file_to_doc(f: Any) -> Optional[Dict[str, Any]]: + if isinstance(f, dict): + fid = f.get("id") + fname = f.get("fileName") or f.get("filename") or "file" + if fid: + return { + "documentName": fname, + "fileName": fname, + "validationMetadata": {"fileId": str(fid)}, + } + elif isinstance(f, str): + return {"documentName": "file", "fileName": "file", "validationMetadata": {"fileId": f}} + return None + + file_obj = out.get("file") + files_arr = out.get("files") or [] + file_ids = out.get("fileIds") or [] + if file_obj: + d = _file_to_doc(file_obj) + if d: + docs = [d] + if not docs and files_arr: + docs = [d for f in files_arr for d in [_file_to_doc(f)] if d] + if not docs and file_ids: + docs = [_file_to_doc(fid) for fid in file_ids if _file_to_doc(fid)] + if not docs: + return [] + return docs if isinstance(docs, (list, tuple)) else [docs] + + +def _unpackIncomingEmail(incoming: Optional[tuple]) -> Optional[tuple]: + """ + Unified handover: (context, documentList, reply_to, subject). + Returns (ctx, doc_list, reply_to, subject) or None. + """ + if not incoming or not isinstance(incoming, (list, tuple)): + return None + ctx = incoming[0] if len(incoming) > 0 else None + doc_list = incoming[1] if len(incoming) > 1 else [] + reply_to = incoming[2] if len(incoming) > 2 else None + subject = incoming[3] if len(incoming) > 3 else "" + return (ctx, doc_list or [], reply_to, subject) + + def _getIncomingEmailFromUpstream( nodeId: str, inputSources: Dict[str, Dict[int, tuple]], @@ -189,12 +320,14 @@ def _getIncomingEmailFromUpstream( def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]: - """Format email node output as (context, documentList, reply_to) for composeAndDraftEmail. + """Format email node output as (context, documentList, reply_to, subject) for composeAndDraftEmail. reply_to = sender address of first email (recipient for the reply). + subject = original subject (for Re: prefix). + Returns unified handover: (text, files/docs, reply_to, subject). """ if not out: return None - docs = out.get("documents", out.get("documentList", [])) if isinstance(out, dict) else [] + docs = _getDocumentsFromUpstream(out) if not docs: return None doc = docs[0] if isinstance(docs, list) else docs @@ -217,6 +350,7 @@ def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]: if not emails_list: return None reply_to = None + first_subject = "" parts = ["Reply to the following email(s):", ""] for i, em in enumerate(emails_list[:5]): # max 5 if not isinstance(em, dict): @@ -227,6 +361,8 @@ def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]: if from_str and not reply_to: reply_to = addr.get("address", "") or from_str subj = em.get("subject", "") + if subj and not first_subject: + first_subject = subj body = em.get("bodyPreview", "") or (em.get("body") or {}).get("content", "") if isinstance(em.get("body"), dict) else "" if body and len(str(body)) > 1500: body = str(body)[:1500] + "..." @@ -238,7 +374,7 @@ def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]: parts.insert(2, f"Recipient (reply to this address): {reply_to}") parts.insert(3, "") context = "\n".join(parts).strip() - return (context, docs, reply_to) + return (context, docs, reply_to, first_subject) def _buildSearchQuery( @@ -349,6 +485,10 @@ def _buildActionParams( ref = _resolveConnectionIdToReference(chatService, connId, services) if ref: params["connectionReference"] = ref + elif _is_user_connection_id(connId): + # Automation2 worker often has no chat user connection list; pass UUID through — + # method helpers (e.g. ClickupConnectionHelper) resolve via interfaceDbApp.getUserConnectionById. + params["connectionReference"] = str(connId).strip() else: logger.warning(f"Could not resolve connectionId {connId} to connectionReference") params.pop("connectionId", None) @@ -384,7 +524,7 @@ def _buildActionParams( class ActionNodeExecutor: - """Execute ai.*, email.*, sharepoint.* nodes by mapping to method actions.""" + """Execute ai.*, email.*, sharepoint.*, clickup.* nodes by mapping to method actions.""" def __init__(self, services: Any): self.services = services @@ -414,16 +554,57 @@ class ActionNodeExecutor: nodeDef = _getNodeDefinition(nodeType) params = dict(node.get("parameters") or {}) resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {})) + if nodeType == "clickup.updateTask": + from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries - # Merge input from connected nodes (documentList, etc.) + merge_clickup_task_update_entries(resolvedParams) + + # Merge input from connected nodes (unified handover: documents/documentList, context) inputSources = context.get("inputSources", {}).get(nodeId, {}) if 0 in inputSources: srcId, _ = inputSources[0] inp = context.get("nodeOutputs", {}).get(srcId) - if isinstance(inp, dict): - resolvedParams.setdefault("documentList", inp.get("documents", inp.get("documentList", []))) + docs = _getDocumentsFromUpstream(inp) if isinstance(inp, dict) else [] + if docs: + resolvedParams.setdefault("documentList", docs) elif inp is not None: resolvedParams.setdefault("input", inp) + # file.create: build context from contentSources (concatenated) or fallback to upstream + if nodeType == "file.create": + sources = resolvedParams.get("contentSources") + if not isinstance(sources, list): + sources = [resolvedParams.get("contentSource")] if resolvedParams.get("contentSource") else [] + parts = [] + for s in sources: + if s is None or s == "": + continue + if isinstance(s, str): + txt = s.strip() + elif isinstance(s, dict): + txt = _payloadToContext(s) if s else "" + else: + txt = str(s) + if txt: + parts.append(txt) + upstream_context = _getContextFromUpstream(inp) + if parts: + parts_joined = "\n\n".join(parts) + # When upstream is AI and user only selected prompt, use full context (prompt + response) + if ( + isinstance(inp, dict) + and upstream_context + and len(upstream_context) > len(parts_joined) + ): + prompt_only = (inp.get("prompt") or "").strip() + if prompt_only and parts_joined.strip() == prompt_only: + resolvedParams["context"] = upstream_context + else: + resolvedParams["context"] = parts_joined + else: + resolvedParams["context"] = parts_joined + else: + if upstream_context: + resolvedParams["context"] = upstream_context # ai.prompt with email upstream: inject actual email content into prompt so AI has context # (getChatDocumentsFromDocumentList fails in automation2 – workflow has no messages) @@ -434,17 +615,20 @@ class ActionNodeExecutor: srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None) srcType = (srcNode or {}).get("type", "") if srcType in ("email.checkEmail", "email.searchEmail"): - incoming = _getIncomingEmailFromUpstream( + incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream( nodeId, context.get("inputSources", {}), context.get("nodeOutputs", {}), orderedNodes, - ) + )) if incoming: - ctx, _doc_list, _reply_to = incoming + ctx, _doc_list, _reply_to, _ = incoming if ctx and ctx.strip(): - base_prompt = (resolvedParams.get("aiPrompt") or "").strip() - resolvedParams["aiPrompt"] = ( + # Set "prompt" so _paramMap (prompt→aiPrompt) passes it through to ai.process + base_prompt = ( + (resolvedParams.get("prompt") or resolvedParams.get("aiPrompt") or "") + ).strip() + resolvedParams["prompt"] = ( f"Eingehende E-Mail:\n{ctx}\n\nAufgabe: {base_prompt}" if base_prompt else f"Eingehende E-Mail:\n{ctx}" @@ -454,6 +638,11 @@ class ActionNodeExecutor: chatService = getattr(self.services, "chat", None) actionParams = _buildActionParams(node, nodeDef or {}, resolvedParams, chatService, self.services) + # ai.prompt: use simpleMode by default – direct AI call, no document pipeline (chapters/sections) + # For short prompts like "formuliere eine passende email" this avoids ~13 AI calls and verbose output + if nodeType == "ai.prompt" and "simpleMode" not in actionParams: + actionParams["simpleMode"] = True + # email.checkEmail: pause and wait for new email (background poller will resume) if nodeType == "email.checkEmail": runId = context.get("_runId") @@ -492,9 +681,26 @@ class ActionNodeExecutor: if srcType.startswith("ai."): inp = nodeOutputs.get(srcId) email_content = _extractEmailContentFromUpstream(inp) + # Reply flow: get incoming email metadata (replyTo, subject, original docs) when email->AI->draft + incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) + reply_to = None + reply_subject = None + reply_docs = [] + if incoming: + inc_ctx, doc_list, reply_to, first_subject = incoming + reply_docs = doc_list + reply_subject = ("Re: " + first_subject) if first_subject else None if email_content: - actionParams["emailContent"] = email_content - actionParams["context"] = email_content.get("body", "") or "(from connected AI node)" + # Merge reply metadata when available + merged = dict(email_content) + if reply_to and not merged.get("to"): + merged["to"] = reply_to if isinstance(reply_to, list) else [reply_to] + if reply_subject and not merged.get("subject"): + merged["subject"] = reply_subject + actionParams["emailContent"] = merged + actionParams["context"] = merged.get("body", "") or "(from connected AI node)" + if reply_docs: + actionParams["replySourceDocuments"] = reply_docs # Attachments: gather from file nodes upstream of AI (e.g. downloadFile -> AI -> email) attachment_docs = _gatherAttachmentDocumentsFromUpstream( nodeId, inputSources, nodeOutputs, orderedNodes @@ -514,27 +720,44 @@ class ActionNodeExecutor: extra = [x for x in (existing if isinstance(existing, list) else []) if _is_binary_attachment(x)] actionParams["documentList"] = attachment_docs + extra if not email_content: - # AI returns plain text (e.g. email.txt): use as email body directly (no extra AI call) - ctx = _extractContextFromUpstream(inp) + # AI returns plain text or context: use as email body directly (no extra AI call) + ctx = _getContextFromUpstream(inp) if ctx: + # Reply flow: get incoming email metadata (replyTo, subject, original docs) + incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) + reply_to = None + reply_subject = None + reply_docs = [] + if incoming: + inc_ctx, doc_list, reply_to, first_subject = incoming + reply_docs = doc_list + reply_subject = ("Re: " + first_subject) if first_subject else None actionParams["emailContent"] = { - "subject": actionParams.get("subject", "Draft"), + "subject": reply_subject or actionParams.get("subject", "Draft"), "body": ctx, - "to": actionParams.get("to"), + "to": [reply_to] if reply_to else (actionParams.get("to") or []), } actionParams["context"] = ctx + if reply_to and not actionParams.get("to"): + actionParams["to"] = [reply_to] + # Reply flow: attach original email(s) for proper reply + if reply_docs: + actionParams["replySourceDocuments"] = reply_docs else: - # Fallback: incoming email from upstream (if flow is email->AI->draft) - incoming = _getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes) + # Fallback: incoming email from upstream (AI returned nothing usable) + incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) if incoming: - ctx, doc_list, reply_to = incoming - actionParams["context"] = ctx + inc_ctx, doc_list, reply_to, first_subject = incoming + actionParams["context"] = inc_ctx if doc_list and not actionParams.get("documentList"): actionParams["documentList"] = doc_list if reply_to and not actionParams.get("to"): actionParams["to"] = [reply_to] + if first_subject and not actionParams.get("subject"): + actionParams["subject"] = "Re: " + first_subject + actionParams["replySourceDocuments"] = doc_list else: - doc_count = len(inp.get("documents", [])) if isinstance(inp, dict) else 0 + doc_count = len(_getDocumentsFromUpstream(inp)) logger.warning( "email.draftEmail: AI upstream returned %d doc(s) but context extraction failed (no subject/body, no plain text). " "Ensure AI node outputs document with documentData.", @@ -545,7 +768,7 @@ class ActionNodeExecutor: # File itself is the context: pass as attachment, use filename as minimal context (no content extraction) if not actionParams.get("context"): inp = nodeOutputs.get(srcId) - docs = (inp.get("documents") or inp.get("documentList", [])) if isinstance(inp, dict) else [] + docs = _getDocumentsFromUpstream(inp) doc = docs[0] if docs else None name = None if isinstance(doc, dict): @@ -563,32 +786,71 @@ class ActionNodeExecutor: else: # Direct connection to email.checkEmail/searchEmail: use incoming email as context if not actionParams.get("context"): - incoming = _getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes) + incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) if incoming: - ctx, doc_list, reply_to = incoming - actionParams["context"] = ctx + inc_ctx, doc_list, reply_to, first_subject = incoming + actionParams["context"] = inc_ctx if doc_list and not actionParams.get("documentList"): actionParams["documentList"] = doc_list if reply_to and not actionParams.get("to"): actionParams["to"] = [reply_to] + if first_subject and not actionParams.get("subject"): + actionParams["subject"] = "Re: " + first_subject + actionParams["replySourceDocuments"] = doc_list # Generic context handover: when upstream provides documents, pass first doc as content for actions that expect it docList = actionParams.get("documentList") or resolvedParams.get("documentList") if docList and "content" not in actionParams: first = docList[0] if isinstance(docList, list) and docList else docList - # Actions like sharepoint.uploadFile consume content from context + # Actions like sharepoint.uploadFile / clickup.uploadAttachment consume content from context actionParams["content"] = first executor = ActionExecutor(self.services) logger.info("ActionNodeExecutor node %s calling executeAction(%s, %s)", nodeId, methodName, actionName) result = await executor.executeAction(methodName, actionName, actionParams) + # Extract context from result for unified output (AI text for downstream file nodes) + extracted_context = _extractContextFromResult(result) if result else None + + # AI nodes: include prompt in output; context = prompt + AI response (für file.create etc.) + prompt_text = (resolvedParams.get("prompt") or resolvedParams.get("aiPrompt") or "") + if not isinstance(prompt_text, str): + prompt_text = str(prompt_text) if prompt_text else "" + prompt_text = (prompt_text or "").strip() + if nodeType.startswith("ai.") and prompt_text: + full_context = ( + f"{prompt_text}\n\n{extracted_context}" if extracted_context else prompt_text + ) + else: + full_context = extracted_context or "" + out_prompt = prompt_text if nodeType.startswith("ai.") else "" + + docs_list = [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])] + + # result = AI response text (for contentSources refs: prompt + context + result = full output, optionally duplicated) + out_result = extracted_context if nodeType.startswith("ai.") else None + out = { "success": result.success, "error": result.error, - "documents": [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])], + "documents": docs_list, + "documentList": docs_list, + "prompt": out_prompt, + "context": full_context, + "result": out_result, "data": result.model_dump() if hasattr(result, "model_dump") else {"success": result.success, "error": result.error}, } + if result.success and docs_list and nodeType.startswith("clickup."): + try: + d0 = docs_list[0] if isinstance(docs_list[0], dict) else {} + raw = d0.get("documentData") + if isinstance(raw, str) and raw.strip(): + parsed = json.loads(raw) + if isinstance(parsed, dict) and parsed.get("id") is not None: + out["taskId"] = str(parsed["id"]) + out["clickupTask"] = parsed + except (json.JSONDecodeError, TypeError, ValueError): + pass logger.info( "ActionNodeExecutor node %s result: success=%s error=%s doc_count=%d", nodeId, diff --git a/modules/workflows/automation2/executors/dataExecutor.py b/modules/workflows/automation2/executors/dataExecutor.py deleted file mode 100644 index 386c8abd..00000000 --- a/modules/workflows/automation2/executors/dataExecutor.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2025 Patrick Motsch -# Data transformation node executor (setFields, filter, parseJson, template). - -import json -import logging -import re -from typing import Dict, Any, List - -logger = logging.getLogger(__name__) - - -def _get_nested(obj: Any, path: str) -> Any: - """Get nested key from obj, e.g. 'data.items'.""" - for k in path.split("."): - if not k: - continue - if isinstance(obj, dict) and k in obj: - obj = obj[k] - elif isinstance(obj, (list, tuple)) and k.isdigit(): - obj = obj[int(k)] - else: - return None - return obj - - -class DataExecutor: - """Execute data transformation nodes.""" - - async def execute( - self, - node: Dict[str, Any], - context: Dict[str, Any], - ) -> Any: - nodeType = node.get("type", "") - nodeOutputs = context.get("nodeOutputs", {}) - nodeId = node.get("id", "") - inputSources = context.get("inputSources", {}).get(nodeId, {}) - params = node.get("parameters") or {} - logger.info( - "DataExecutor node %s type=%s inputSources=%s params=%s", - nodeId, - nodeType, - inputSources, - params, - ) - - inp = None - if 0 in inputSources: - srcId, _ = inputSources[0] - inp = nodeOutputs.get(srcId) - - from modules.workflows.automation2.graphUtils import resolveParameterReferences - resolvedParams = {k: resolveParameterReferences(v, nodeOutputs) for k, v in params.items()} - - if nodeType == "data.setFields": - out = self._setFields(inp, resolvedParams) - logger.info("DataExecutor node %s setFields inp=%s -> %s", nodeId, type(inp).__name__, out) - return out - if nodeType == "data.filter": - out = self._filter(inp, resolvedParams) - logger.info("DataExecutor node %s filter inp=%s -> len=%d", nodeId, type(inp).__name__, len(out) if isinstance(out, list) else -1) - return out - if nodeType == "data.parseJson": - out = self._parseJson(inp, resolvedParams) - logger.info("DataExecutor node %s parseJson -> %s", nodeId, type(out).__name__) - return out - if nodeType == "data.template": - out = self._template(inp, resolvedParams, nodeOutputs) - logger.info("DataExecutor node %s template -> %s", nodeId, out) - return out - - logger.debug("DataExecutor node %s unhandled type %s -> passThrough", nodeId, nodeType) - return inp - - def _setFields(self, inp: Any, params: Dict) -> Any: - fields = params.get("fields", {}) - if not isinstance(fields, dict): - return inp - base = dict(inp) if isinstance(inp, dict) else {} - base.update(fields) - return base - - def _filter(self, inp: Any, params: Dict) -> Any: - itemsPath = (params.get("itemsPath") or "").strip() - condition = params.get("condition", "True") - items = inp - if itemsPath: - items = _get_nested(inp, itemsPath) - if not isinstance(items, list): - items = [inp] if inp is not None else [] - out = [] - for i, item in enumerate(items): - try: - local = {"item": item, "index": i, "input": inp} - ok = bool(eval(condition, {"__builtins__": {}}, local)) - if ok: - out.append(item) - except Exception: - pass - return out - - def _parseJson(self, inp: Any, params: Dict) -> Any: - jsonPath = (params.get("jsonPath") or "").strip() - raw = inp - if jsonPath: - raw = _get_nested(inp, jsonPath) if isinstance(inp, dict) else inp - if isinstance(raw, dict): - return raw - if isinstance(raw, str): - try: - return json.loads(raw) - except json.JSONDecodeError: - return {"error": "Invalid JSON", "raw": raw[:200]} - return inp - - def _template(self, inp: Any, params: Dict, nodeOutputs: Dict) -> Any: - tpl = params.get("template", "") - from modules.workflows.automation2.graphUtils import resolveParameterReferences - result = resolveParameterReferences(tpl, nodeOutputs) - return {"text": result, "template": tpl} diff --git a/modules/workflows/automation2/executors/flowExecutor.py b/modules/workflows/automation2/executors/flowExecutor.py index de5789e5..0df17335 100644 --- a/modules/workflows/automation2/executors/flowExecutor.py +++ b/modules/workflows/automation2/executors/flowExecutor.py @@ -1,9 +1,8 @@ # Copyright (c) 2025 Patrick Motsch -# Flow control node executor (ifElse, merge, wait, stop). +# Flow control node executor (ifElse, switch, loop). -import asyncio import logging -from typing import Dict, Any +from typing import Any, Dict logger = logging.getLogger(__name__) @@ -33,18 +32,6 @@ class FlowExecutor: out = await self._ifElse(node, nodeOutputs, nodeId, inputSources) logger.info("FlowExecutor node %s ifElse -> %s", nodeId, out) return out - if nodeType == "flow.merge": - out = await self._merge(node, nodeOutputs, nodeId, inputSources) - logger.info("FlowExecutor node %s merge -> %s", nodeId, out) - return out - if nodeType == "flow.wait": - out = await self._wait(node, nodeOutputs, nodeId, inputSources) - logger.info("FlowExecutor node %s wait -> %s", nodeId, out) - return out - if nodeType == "flow.stop": - context["_stopped"] = True - logger.info("FlowExecutor node %s -> STOP", nodeId) - return {"stopped": True} if nodeType == "flow.switch": out = await self._switch(node, nodeOutputs, nodeId, inputSources) logger.info("FlowExecutor node %s switch -> %s", nodeId, out) @@ -72,60 +59,142 @@ class FlowExecutor: nodeId: str, inputSources: Dict, ) -> Any: - condExpr = (node.get("parameters") or {}).get("condition", "") + condParam = (node.get("parameters") or {}).get("condition") inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs) - # Simple eval - in production use safe evaluation - try: - # Replace {{nodeId}} refs with actual values - from modules.workflows.automation2.graphUtils import resolveParameterReferences - resolved = resolveParameterReferences(condExpr, nodeOutputs) - # Minimal eval for simple comparisons (e.g. "True", "1 > 0") - ok = bool(eval(resolved)) if resolved else False - except Exception: - ok = False + ok = self._evalConditionParam(condParam, nodeOutputs) return {"branch": 0 if ok else 1, "conditionResult": ok, "input": inp} - async def _merge(self, node: Dict, nodeOutputs: Dict, nodeId: str, inputSources: Dict) -> Any: - mode = (node.get("parameters") or {}).get("mode", "append") - sources = inputSources - items = [] - for inpIdx in sorted(sources.keys()): - srcId, _ = sources[inpIdx] - data = nodeOutputs.get(srcId) - if data is not None: - if isinstance(data, list): - items.extend(data) + def _evalConditionParam(self, condParam: Any, nodeOutputs: Dict) -> bool: + """Evaluate condition: structured {type,ref,operator,value} or legacy string/ref.""" + if condParam is None: + return False + if isinstance(condParam, dict) and condParam.get("type") == "condition": + return self._evalStructuredCondition(condParam, nodeOutputs) + from modules.workflows.automation2.graphUtils import resolveParameterReferences + resolved = resolveParameterReferences(condParam, nodeOutputs) + return self._evalCondition(resolved) + + def _get_by_path(self, data: Any, path: list) -> Any: + """Traverse data by path (strings and ints).""" + current = data + for seg in path: + if current is None: + return None + if isinstance(current, dict) and isinstance(seg, str) and seg in current: + current = current[seg] + elif isinstance(current, (list, tuple)) and isinstance(seg, (int, str)): + idx = int(seg) if isinstance(seg, str) and str(seg).isdigit() else seg + if isinstance(idx, int) and 0 <= idx < len(current): + current = current[idx] else: - items.append(data) - if mode == "combine" and len(items) == 2: - if isinstance(items[0], dict) and isinstance(items[1], dict): - return {**items[0], **items[1]} - return items + return None + else: + return None + return current - async def _wait(self, node: Dict, nodeOutputs: Dict) -> Any: - secs = (node.get("parameters") or {}).get("seconds", 0) - if secs > 0: - await asyncio.sleep(min(float(secs), 300)) - nodeId = node.get("id") - from modules.workflows.automation2.graphUtils import getInputSources - # Input comes from context - inp = context.get("_inputData") if "context" in dir() else None - return nodeOutputs.get(nodeId, {}) + def _evalStructuredCondition(self, cond: Dict, nodeOutputs: Dict) -> bool: + """Evaluate structured {ref, operator, value} condition.""" + ref = cond.get("ref") + if not ref or ref.get("type") != "ref": + return False + node_id = ref.get("nodeId") + path = ref.get("path") or [] + left = self._get_by_path(nodeOutputs.get(node_id), list(path)) + operator = cond.get("operator", "eq") + right = cond.get("value") - async def _wait( - self, - node: Dict, - nodeOutputs: Dict, - nodeId: str, - inputSources: Dict, - ) -> Any: - secs = (node.get("parameters") or {}).get("seconds", 0) - if secs > 0: - await asyncio.sleep(min(float(secs), 300)) - if 0 in inputSources: - srcId, _ = inputSources[0] - return nodeOutputs.get(srcId) - return None + if operator == "eq": + return left == right + if operator == "neq": + return left != right + if operator in ("lt", "lte", "gt", "gte"): + try: + l, r = float(left) if left is not None else 0, float(right) if right is not None else 0 + if operator == "lt": + return l < r + if operator == "lte": + return l <= r + if operator == "gt": + return l > r + if operator == "gte": + return l >= r + except (TypeError, ValueError): + return False + if operator == "contains": + return right is not None and str(right) in str(left or "") + if operator == "not_contains": + return right is None or str(right) not in str(left or "") + if operator == "empty": + return left is None or left == "" or (isinstance(left, (list, dict)) and len(left) == 0) + if operator == "not_empty": + return left is not None and left != "" and (not isinstance(left, (list, dict)) or len(left) > 0) + if operator == "is_true": + return bool(left) + if operator == "is_false": + return not bool(left) + if operator == "before": + return self._compare_dates(left, right, lambda a, b: a < b) + if operator == "after": + return self._compare_dates(left, right, lambda a, b: a > b) + if operator == "exists": + return self._file_exists(left) + if operator == "not_exists": + return not self._file_exists(left) + return False + + def _compare_dates(self, left: Any, right: Any, op) -> bool: + """Compare left/right as dates; op(a,b) is the comparison.""" + + def parse(v): + if v is None: + return None + if hasattr(v, "timestamp"): + return v + s = str(v).strip() + if not s: + return None + from datetime import datetime + + for fmt in ("%Y-%m-%d", "%d.%m.%Y", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"): + try: + return datetime.strptime(s, fmt) + except ValueError: + continue + try: + return datetime.fromisoformat(s.replace("Z", "+00:00")) + except ValueError: + return None + + try: + a, b = parse(left), parse(right) + if a is None or b is None: + return False + return op(a, b) + except Exception: + return False + + def _file_exists(self, val: Any) -> bool: + """Check if value represents an existing file (object with url or non-empty string).""" + if val is None: + return False + if isinstance(val, dict): + return bool(val.get("url") or val.get("name")) + if isinstance(val, str): + return len(val.strip()) > 0 + return bool(val) + + def _evalCondition(self, resolved: Any) -> bool: + """Evaluate condition: ref resolves to value → use truthiness; string → try eval.""" + if resolved is None: + return False + if isinstance(resolved, (bool, int, float)): + return bool(resolved) + if isinstance(resolved, str): + try: + return bool(eval(resolved)) + except Exception: + return bool(resolved) + return bool(resolved) async def _switch(self, node: Dict, nodeOutputs: Dict, nodeId: str, inputSources: Dict) -> Any: valueExpr = (node.get("parameters") or {}).get("value", "") @@ -133,14 +202,71 @@ class FlowExecutor: value = resolveParameterReferences(valueExpr, nodeOutputs) cases = (node.get("parameters") or {}).get("cases", []) for i, c in enumerate(cases): - if c == value: + if self._evalSwitchCase(value, c): return {"match": i, "value": value} return {"match": -1, "value": value} + def _evalSwitchCase(self, left: Any, case: Any) -> bool: + """ + Evaluate a switch case. Case can be: + - dict: {operator, value} - use operator to compare left vs value + - plain value: legacy format - exact equality (eq) + """ + if isinstance(case, dict): + operator = case.get("operator", "eq") + right = case.get("value") + else: + operator = "eq" + right = case + # Same logic as _evalStructuredCondition but with explicit left/right + if operator == "eq": + return left == right + if operator == "neq": + return left != right + if operator in ("lt", "lte", "gt", "gte"): + try: + l, r = float(left) if left is not None else 0, float(right) if right is not None else 0 + if operator == "lt": + return l < r + if operator == "lte": + return l <= r + if operator == "gt": + return l > r + if operator == "gte": + return l >= r + except (TypeError, ValueError): + return False + if operator == "contains": + return right is not None and str(right) in str(left or "") + if operator == "not_contains": + return right is None or str(right) not in str(left or "") + if operator == "empty": + return left is None or left == "" or (isinstance(left, (list, dict)) and len(left) == 0) + if operator == "not_empty": + return left is not None and left != "" and (not isinstance(left, (list, dict)) or len(left) > 0) + if operator == "is_true": + return bool(left) + if operator == "is_false": + return not bool(left) + if operator == "before": + return self._compare_dates(left, right, lambda a, b: a < b) + if operator == "after": + return self._compare_dates(left, right, lambda a, b: a > b) + if operator == "exists": + return self._file_exists(left) + if operator == "not_exists": + return not self._file_exists(left) + return False + async def _loop(self, node: Dict, nodeOutputs: Dict, nodeId: str, inputSources: Dict) -> Any: itemsPath = (node.get("parameters") or {}).get("items", "[]") from modules.workflows.automation2.graphUtils import resolveParameterReferences items = resolveParameterReferences(itemsPath, nodeOutputs) - if not isinstance(items, list): + if isinstance(items, list): + pass + elif isinstance(items, dict): + # Convert form payload / object to list of {name, value} for "for each field" + items = [{"name": k, "value": v} for k, v in items.items()] + else: items = [items] if items is not None else [] return {"items": items, "count": len(items)} diff --git a/modules/workflows/automation2/executors/ioExecutor.py b/modules/workflows/automation2/executors/ioExecutor.py index eb006c7e..38e2570c 100644 --- a/modules/workflows/automation2/executors/ioExecutor.py +++ b/modules/workflows/automation2/executors/ioExecutor.py @@ -45,18 +45,22 @@ class IOExecutor: if 0 in inputSources: srcId, _ = inputSources[0] inp = nodeOutputs.get(srcId) - if isinstance(inp, dict): - resolvedParams.setdefault("documentList", inp.get("documents", inp.get("documentList", []))) + from modules.workflows.automation2.executors.actionNodeExecutor import _getDocumentsFromUpstream + docs = _getDocumentsFromUpstream(inp) if isinstance(inp, dict) else [] + if docs: + resolvedParams.setdefault("documentList", docs) elif inp is not None: resolvedParams.setdefault("input", inp) executor = ActionExecutor(self.services) logger.info("IOExecutor node %s calling executeAction(%s, %s)", nodeId, methodName, actionName) result = await executor.executeAction(methodName, actionName, resolvedParams) + docs_list = [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])] out = { "success": result.success, "error": result.error, - "documents": [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])], + "documents": docs_list, + "documentList": docs_list, "data": result.model_dump() if hasattr(result, "model_dump") else {"success": result.success, "error": result.error}, } logger.info( diff --git a/modules/workflows/automation2/executors/triggerExecutor.py b/modules/workflows/automation2/executors/triggerExecutor.py index 87ac359e..6fd32b80 100644 --- a/modules/workflows/automation2/executors/triggerExecutor.py +++ b/modules/workflows/automation2/executors/triggerExecutor.py @@ -1,37 +1,34 @@ # Copyright (c) 2025 Patrick Motsch -# Trigger node executor. +# Start node executor (node type trigger.manual) — outputs the unified run envelope from context. import logging -from typing import Dict, Any +from typing import Any, Dict + +from modules.workflows.automation2.runEnvelope import normalize_run_envelope logger = logging.getLogger(__name__) class TriggerExecutor: - """Execute trigger nodes (manual, schedule, formSubmit).""" + """ + Single start node on the canvas. Output is always context['runEnvelope'], normalized. + Invocation mode (manual, form, webhook, …) is configured as workflow entry points, not here. + """ async def execute( self, node: Dict[str, Any], context: Dict[str, Any], ) -> Any: - nodeType = node.get("type", "") - nodeId = node.get("id", "") - logger.info("TriggerExecutor node %s type=%s parameters=%s", nodeId, nodeType, node.get("parameters")) - if nodeType == "trigger.manual": - out = {"triggered": True, "source": "manual"} - logger.info("TriggerExecutor node %s -> manual trigger: %s", nodeId, out) - return out - if nodeType == "trigger.schedule": - out = {"triggered": True, "source": "schedule"} - logger.info("TriggerExecutor node %s -> schedule trigger: %s", nodeId, out) - return out - if nodeType == "trigger.formSubmit": - params = node.get("parameters") or {} - formId = params.get("formId", "") - out = {"triggered": True, "source": "formSubmit", "formId": formId} - logger.info("TriggerExecutor node %s -> formSubmit: %s", nodeId, out) - return out - out = {"triggered": True, "source": "unknown"} - logger.info("TriggerExecutor node %s -> unknown: %s", nodeId, out) + node_id = node.get("id", "") + base = context.get("runEnvelope") + if not isinstance(base, dict): + out = normalize_run_envelope(None, user_id=context.get("userId")) + else: + out = normalize_run_envelope(base, user_id=context.get("userId")) + logger.info( + "TriggerExecutor node %s trigger.type=%s", + node_id, + (out.get("trigger") or {}).get("type"), + ) return out diff --git a/modules/workflows/automation2/graphUtils.py b/modules/workflows/automation2/graphUtils.py index ad58c69c..0f79b882 100644 --- a/modules/workflows/automation2/graphUtils.py +++ b/modules/workflows/automation2/graphUtils.py @@ -47,6 +47,27 @@ def buildConnectionMap(connections: List[Dict]) -> Dict[str, List[Tuple[str, int return out +def getLoopBodyNodeIds(loopNodeId: str, connectionMap: Dict[str, List[Tuple[str, int, int]]]) -> Set[str]: + """Nodes reachable from loop's output (BFS forward). Body = downstream nodes that receive from loop.""" + from collections import deque + body = set() + # connectionMap: target -> [(source, sourceOutput, targetInput)] + rev: Dict[str, List[str]] = {} # source -> [targets] + for tgt, pairs in connectionMap.items(): + for src, _, _ in pairs: + if src not in rev: + rev[src] = [] + rev[src].append(tgt) + q = deque([loopNodeId]) + while q: + nid = q.popleft() + for tgt in rev.get(nid, []): + if tgt not in body: + body.add(tgt) + q.append(tgt) + return body + + def getInputSources(nodeId: str, connectionMap: Dict[str, List[Tuple[str, int, int]]]) -> Dict[int, Tuple[str, int]]: """ For a node, return targetInput -> (sourceNodeId, sourceOutput). @@ -142,12 +163,59 @@ def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, in return order +def _get_by_path(data: Any, path: List[Any]) -> Any: + """Traverse data by path (strings and ints); return None if not found.""" + current = data + for seg in path: + if current is None: + return None + if isinstance(current, dict) and isinstance(seg, str) and seg in current: + current = current[seg] + elif isinstance(current, (list, tuple)) and isinstance(seg, (int, str)): + idx = int(seg) if isinstance(seg, str) and seg.isdigit() else seg + if isinstance(idx, int) and 0 <= idx < len(current): + current = current[idx] + else: + return None + else: + return None + return current + + def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: """ - Resolve {{nodeId.output}} or {{nodeId.output.path}} in strings/structures. + Resolve parameter references: + - {{nodeId.output}} or {{nodeId.output.path}} in strings (legacy) + - { "type": "ref", "nodeId": "...", "path": ["field", "nested"] } -> resolved value + - { "type": "value", "value": ... } -> value (then recursively resolve) """ import json import re + + if isinstance(value, dict): + if value.get("type") == "ref": + node_id = value.get("nodeId") + path = value.get("path") + if node_id is not None and isinstance(path, (list, tuple)): + data = nodeOutputs.get(node_id) + plist = list(path) + resolved = _get_by_path(data, plist) + # input.form historically stored flat field dict; refs use payload. + if ( + resolved is None + and isinstance(data, dict) + and plist + and plist[0] == "payload" + and len(plist) > 1 + ): + resolved = _get_by_path(data, plist[1:]) + return resolveParameterReferences(resolved, nodeOutputs) + return value + if value.get("type") == "value": + inner = value.get("value") + return resolveParameterReferences(inner, nodeOutputs) + return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()} + if isinstance(value, str): def repl(m): ref = m.group(1).strip() @@ -170,8 +238,6 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: return m.group(0) return str(data) if data is not None else m.group(0) return re.sub(r"\{\{\s*([^}]+)\s*\}\}", repl, value) - if isinstance(value, dict): - return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()} if isinstance(value, list): return [resolveParameterReferences(v, nodeOutputs) for v in value] return value diff --git a/modules/workflows/automation2/runEnvelope.py b/modules/workflows/automation2/runEnvelope.py new file mode 100644 index 00000000..44da2fb5 --- /dev/null +++ b/modules/workflows/automation2/runEnvelope.py @@ -0,0 +1,109 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Unified run envelope for Automation2 start/trigger nodes. + +Downstream nodes always see the same structure regardless of entry point +(manual, form, schedule, webhook, email, api, event). +""" + +from copy import deepcopy +from typing import Any, Dict, List, Optional + +# trigger.type values +TRIGGER_TYPES = frozenset( + { + "manual", + "form", + "schedule", + "email", + "webhook", + "api", + "event", + } +) + + +def default_run_envelope( + trigger_type: str = "manual", + *, + entry_point_id: Optional[str] = None, + entry_point_label: Optional[str] = None, + payload: Optional[Dict[str, Any]] = None, + context: Optional[Dict[str, Any]] = None, + files: Optional[List[Any]] = None, + user: Optional[Dict[str, Any]] = None, + metadata: Optional[Dict[str, Any]] = None, + raw: Optional[Dict[str, Any]] = None, +) -> Dict[str, Any]: + """Build a normalized run envelope dict.""" + tt = trigger_type if trigger_type in TRIGGER_TYPES else "manual" + trig: Dict[str, Any] = {"type": tt} + if entry_point_id: + trig["entryPointId"] = entry_point_id + if entry_point_label: + trig["label"] = entry_point_label + return { + "trigger": trig, + "payload": dict(payload or {}), + "context": dict(context or {}), + "files": list(files or []), + "user": dict(user or {}), + "metadata": dict(metadata or {}), + "raw": dict(raw or {}), + } + + +def merge_run_envelope(base: Dict[str, Any], overrides: Optional[Dict[str, Any]]) -> Dict[str, Any]: + """Deep-merge overrides into a copy of base (shallow merge per top-level key except nested dicts).""" + out = deepcopy(base) + if not overrides: + return out + for key in ("payload", "context", "user", "metadata", "raw"): + if key in overrides and isinstance(overrides[key], dict): + merged = dict(out.get(key) or {}) + merged.update(overrides[key]) + out[key] = merged + if "files" in overrides and overrides["files"] is not None: + out["files"] = list(overrides["files"]) + trig = dict(out.get("trigger") or {}) + ot = overrides.get("trigger") + if isinstance(ot, dict): + trig.update(ot) + if trig.get("type") and trig["type"] not in TRIGGER_TYPES: + trig["type"] = "manual" + out["trigger"] = trig + return out + + +def normalize_run_envelope( + incoming: Optional[Dict[str, Any]], + *, + user_id: Optional[str] = None, +) -> Dict[str, Any]: + """ + Normalize partial or missing envelope from API/scheduler. + Ensures all top-level keys exist. + """ + if not incoming or not isinstance(incoming, dict): + env = default_run_envelope("manual") + else: + trig = incoming.get("trigger") if isinstance(incoming.get("trigger"), dict) else {} + ttype = trig.get("type") or "manual" + if ttype not in TRIGGER_TYPES: + ttype = "manual" + env = default_run_envelope( + ttype, + entry_point_id=trig.get("entryPointId"), + entry_point_label=trig.get("label"), + payload=incoming.get("payload"), + context=incoming.get("context"), + files=incoming.get("files"), + user=incoming.get("user"), + metadata=incoming.get("metadata"), + raw=incoming.get("raw"), + ) + if user_id and not env.get("user"): + env["user"] = {"id": user_id} + elif user_id and isinstance(env.get("user"), dict) and "id" not in env["user"]: + env["user"] = {**env["user"], "id": user_id} + return env diff --git a/modules/workflows/automation2/scheduleCron.py b/modules/workflows/automation2/scheduleCron.py new file mode 100644 index 00000000..4a0cfa43 --- /dev/null +++ b/modules/workflows/automation2/scheduleCron.py @@ -0,0 +1,34 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Parse cron strings (5-field or 6-field) to APScheduler CronTrigger kwargs. +Frontend produces: "minute hour day month dow" (5-field) or "sec min hour day month dow" (6-field). +""" + +import re +from typing import Any, Dict + + +def parse_cron_to_kwargs(cron: str) -> Dict[str, Any]: + """ + Parse cron string to kwargs for APScheduler CronTrigger. + Supports 5-field (minute hour day month day_of_week) and 6-field (sec min hour day month day_of_week). + Returns dict with: second, minute, hour, day, month, day_of_week. + """ + if not cron or not isinstance(cron, str): + raise ValueError("Invalid cron: empty or not string") + parts = cron.strip().split() + if len(parts) == 5: + minute, hour, day, month, day_of_week = parts + second = "0" + elif len(parts) == 6: + second, minute, hour, day, month, day_of_week = parts + else: + raise ValueError(f"Invalid cron format: expected 5 or 6 fields, got {len(parts)}") + return { + "second": second, + "minute": minute, + "hour": hour, + "day": day, + "month": month, + "day_of_week": day_of_week, + } diff --git a/modules/workflows/automation2/subAutomation2Schedule.py b/modules/workflows/automation2/subAutomation2Schedule.py new file mode 100644 index 00000000..d0fb3cd8 --- /dev/null +++ b/modules/workflows/automation2/subAutomation2Schedule.py @@ -0,0 +1,304 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Automation2 schedule scheduler. +Starts/stops cron jobs for workflows with schedule entry points. +""" + +import asyncio +import logging +from typing import Any, Dict + +from modules.shared.eventManagement import eventManager + +# Main loop reference for scheduling async work from job executor (may run in thread) +_main_loop = None + + +def set_main_loop(loop) -> None: + global _main_loop + _main_loop = loop +from modules.features.automation2.interfaceFeatureAutomation2 import ( + getAutomation2Interface, + getAllWorkflowsForScheduling, +) +from modules.features.automation2.mainAutomation2 import getAutomation2Services +from modules.features.automation2.entryPoints import find_invocation +from modules.workflows.automation2.scheduleCron import parse_cron_to_kwargs + + +def _cron_to_interval_seconds(cron: str): + """ + If cron represents a simple interval, return seconds. Otherwise None. + E.g. "* * * * *" -> 60, "*/15 * * * *" -> 900, "*/30 * * * * *" -> 30. + """ + if not cron or not isinstance(cron, str): + return None + parts = cron.strip().split() + if len(parts) == 5: + minute, hour, day, month, dow = parts + second = "0" + elif len(parts) == 6: + second, minute, hour, day, month, dow = parts + else: + return None + # Interval minutes: */N * * * * + if minute.startswith("*/") and hour == "*" and day == "*" and month == "*" and dow == "*": + n = int(minute[2:]) if minute[2:].isdigit() else 0 + if n > 0: + return n * 60 + # Every minute: * * * * * + if minute == "*" and hour == "*" and day == "*" and month == "*" and dow == "*" and second == "0": + return 60 + # Interval hours: 0 */N * * * + if minute == "0" and hour.startswith("*/") and day == "*" and month == "*" and dow == "*": + n = int(hour[2:]) if hour[2:].isdigit() else 0 + if n > 0: + return n * 3600 + # Interval seconds: */N * * * * * (6-field) + if len(parts) == 6 and second.startswith("*/") and minute == "*" and hour == "*" and day == "*" and month == "*" and dow in ("*", "?"): + n = int(second[2:]) if second[2:].isdigit() else 0 + if n > 0: + return n + return None +from modules.workflows.automation2.executionEngine import executeGraph +from modules.workflows.automation2.runEnvelope import default_run_envelope, normalize_run_envelope + +logger = logging.getLogger(__name__) + +JOB_ID_PREFIX = "automation2." + + +def _remove_all_automation2_schedule_jobs() -> None: + """Remove all registered Automation2 schedule jobs from the scheduler.""" + if not eventManager.scheduler: + return + for job in list(eventManager.scheduler.get_jobs()): + jid = job.id if hasattr(job, "id") else str(job) + if jid.startswith(JOB_ID_PREFIX): + try: + eventManager.remove(jid) + except Exception as e: + logger.debug("Could not remove job %s: %s", jid, e) + + +def sync_automation2_schedule_events(event_user) -> Dict[str, Any]: + """ + Sync scheduler with all active Automation2 workflows that have schedule entry points. + Registers cron jobs for each; removes jobs for workflows no longer in the list. + """ + if not event_user: + logger.warning("Automation2 schedule: No event user, skipping sync") + return {"synced": 0, "events": {}} + + _remove_all_automation2_schedule_jobs() + + items = getAllWorkflowsForScheduling() + registered = {} + logger.info( + "Automation2 schedule: found %d workflow(s) with trigger.schedule and cron", + len(items), + ) + + for item in items: + workflow_id = item.get("workflowId") + mandate_id = item.get("mandateId") + instance_id = item.get("featureInstanceId") + entry_point_id = item.get("entryPointId") + cron = item.get("cron") + workflow = item.get("workflow") + + if not workflow_id or not instance_id or not cron: + continue + + job_id = f"{JOB_ID_PREFIX}{workflow_id}" + async_handler = _create_schedule_handler( + workflow_id=workflow_id, + mandate_id=mandate_id, + instance_id=instance_id, + entry_point_id=entry_point_id, + workflow=workflow, + event_user=event_user, + ) + + # Sync wrapper: schedule async handler on main loop (job may run in executor thread) + def sync_wrapper(): + loop = _main_loop + if loop and loop.is_running(): + loop.call_soon_threadsafe( + lambda: asyncio.ensure_future(async_handler(), loop=loop) + ) + else: + # Fallback: run inline if no loop (shouldn't happen) + try: + asyncio.run(async_handler()) + except RuntimeError: + logger.warning("Automation2 schedule: could not run handler, no event loop") + + # Use IntervalTrigger for "every N minutes" - more reliable than CronTrigger + interval_seconds = _cron_to_interval_seconds(cron) + if interval_seconds is not None: + eventManager.registerInterval( + jobId=job_id, + func=sync_wrapper, + seconds=interval_seconds, + replaceExisting=True, + ) + else: + try: + cron_kwargs = parse_cron_to_kwargs(cron) + eventManager.registerCron( + jobId=job_id, + func=sync_wrapper, + cronKwargs=cron_kwargs, + replaceExisting=True, + ) + except ValueError as e: + logger.warning("Workflow %s: invalid cron %r: %s", workflow_id, cron, e) + continue + registered[workflow_id] = job_id + mode = "interval" if interval_seconds is not None else "cron" + logger.info( + "Automation2 schedule: registered %s for workflow %s (%s=%s)", + job_id, + workflow_id, + mode, + interval_seconds if interval_seconds is not None else cron, + ) + + if not registered and items: + logger.warning("Automation2 schedule: workflows found but none registered (check cron format)") + elif not items: + logger.info("Automation2 schedule: no workflows with trigger.schedule+cron (save workflow after selecting Zeitplan)") + return {"synced": len(registered), "workflowsFound": len(items), "events": registered} + + +def _create_schedule_handler( + workflow_id: str, + mandate_id: str, + instance_id: str, + entry_point_id: str, + workflow: Dict[str, Any], + event_user, +): + """Create async handler for scheduled workflow execution.""" + + async def handler(): + logger.info("Automation2 schedule: CRON FIRED for workflow %s", workflow_id) + try: + if not event_user: + logger.error("Automation2 schedule: event user not available") + return + + a2 = getAutomation2Interface(event_user, mandate_id, instance_id) + wf = a2.getWorkflow(workflow_id) + if not wf or not wf.get("graph"): + logger.warning("Automation2 schedule: workflow %s not found or no graph", workflow_id) + return + if not wf.get("active", True): + logger.info("Automation2 schedule: workflow %s inactive, skipping", workflow_id) + return + + inv = find_invocation(wf, entry_point_id) + if inv and (inv.get("kind") != "schedule" or not inv.get("enabled", True)): + logger.info("Automation2 schedule: entry point %s disabled for workflow %s", entry_point_id, workflow_id) + return + # If inv not found but graph has trigger.schedule, proceed (invocations may not be synced) + + services = getAutomation2Services( + event_user, + mandateId=mandate_id, + featureInstanceId=instance_id, + ) + from modules.workflows.processing.shared.methodDiscovery import discoverMethods + discoverMethods(services) + + title = (inv or {}).get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + + run_env = default_run_envelope( + "schedule", + entry_point_id=entry_point_id, + entry_point_label=label or None, + ) + run_env = normalize_run_envelope(run_env, user_id=str(event_user.id) if event_user else None) + + # userId=None so tasks are created unassigned and visible to all instance users + result = await executeGraph( + graph=wf["graph"], + services=services, + workflowId=workflow_id, + instanceId=instance_id, + userId=None, + mandateId=mandate_id, + automation2_interface=a2, + run_envelope=run_env, + ) + logger.info( + "Automation2 schedule: executed workflow %s success=%s paused=%s", + workflow_id, + result.get("success"), + result.get("paused"), + ) + except Exception as e: + logger.exception("Automation2 schedule: failed to execute workflow %s: %s", workflow_id, e) + + return handler + + +def start(event_user) -> bool: + """ + Start Automation2 schedule scheduler and sync scheduled workflows. + Registers callback so schedule is re-synced when workflows are created/updated/deleted. + """ + if not event_user: + logger.warning("Automation2 schedule: No event user provided, skipping") + return True + + try: + eventManager.start() + sync_automation2_schedule_events(event_user) + logger.info("Automation2 schedule: sync complete") + + # Delayed sync (5s) in case DB was not ready at startup + def do_delayed_sync(): + import threading + def _run(): + import time + time.sleep(5) + try: + sync_automation2_schedule_events(event_user) + logger.info("Automation2 schedule: delayed sync done") + except Exception as e: + logger.warning("Automation2 schedule: delayed sync failed: %s", e) + t = threading.Thread(target=_run, daemon=True) + t.start() + do_delayed_sync() + + def on_workflow_changed(_context=None): + try: + sync_automation2_schedule_events(event_user) + logger.debug("Automation2 schedule: re-synced after workflow change") + except Exception as e: + logger.warning("Automation2 schedule: re-sync failed: %s", e) + + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.register("automation2.workflow.changed", on_workflow_changed) + except Exception as e: + logger.error("Automation2 schedule: Failed to start: %s", e) + return False + + return True + + +def stop(event_user) -> bool: + """Stop Automation2 schedule scheduler (remove all schedule jobs).""" + try: + _remove_all_automation2_schedule_jobs() + logger.info("Automation2 schedule: all jobs removed") + except Exception as e: + logger.warning("Automation2 schedule: error during stop: %s", e) + return True diff --git a/modules/workflows/methods/methodClickup/__init__.py b/modules/workflows/methods/methodClickup/__init__.py new file mode 100644 index 00000000..9e0362c4 --- /dev/null +++ b/modules/workflows/methods/methodClickup/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +from .methodClickup import MethodClickup + +__all__ = ["MethodClickup"] diff --git a/modules/workflows/methods/methodClickup/actions/__init__.py b/modules/workflows/methods/methodClickup/actions/__init__.py new file mode 100644 index 00000000..5c54c5df --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp workflow actions.""" diff --git a/modules/workflows/methods/methodClickup/actions/create_task.py b/modules/workflows/methods/methodClickup/actions/create_task.py new file mode 100644 index 00000000..d010c234 --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/create_task.py @@ -0,0 +1,213 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import json +import logging +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional + +from modules.datamodels.datamodelChat import ActionDocument, ActionResult +from ..helpers.pathparse import parse_team_and_list + +logger = logging.getLogger(__name__) + + +def _as_str(v: Any) -> str: + if v is None: + return "" + return str(v).strip() + + +def _parse_custom_field_values(parameters: Dict[str, Any]) -> List[Dict[str, Any]]: + """Build ClickUp custom_fields array from customFieldValues map (field id -> value).""" + raw = parameters.get("customFieldValues") + if raw is None: + return [] + if isinstance(raw, str) and raw.strip(): + try: + raw = json.loads(raw) + except json.JSONDecodeError: + return [] + if not isinstance(raw, dict): + return [] + out: List[Dict[str, Any]] = [] + for fid, val in raw.items(): + if val is None or val == "": + continue + if isinstance(val, dict) and val.get("type") in ("ref", "value"): + continue + out.append({"id": str(fid), "value": val}) + return out + + +def _unwrap_value(v: Any) -> Any: + if isinstance(v, dict) and v.get("type") == "value" and "value" in v: + return v.get("value") + return v + + +def _parse_int_list(val: Any) -> List[int]: + if val is None: + return [] + val = _unwrap_value(val) + if isinstance(val, str) and val.strip(): + try: + parsed = json.loads(val) + if isinstance(parsed, list): + return [int(x) for x in parsed if x is not None and str(x).strip() != ""] + except (json.JSONDecodeError, ValueError, TypeError): + return [] + if isinstance(val, list): + out: List[int] = [] + for x in val: + if x is None or (isinstance(x, str) and not x.strip()): + continue + try: + out.append(int(x)) + except (ValueError, TypeError): + continue + return out + return [] + + +def _optional_positive_int(v: Any) -> Optional[int]: + v = _unwrap_value(v) + if v is None or v == "": + return None + try: + i = int(float(v)) + return i if i > 0 else None + except (ValueError, TypeError): + return None + + +def _parse_due_date_ms(v: Any) -> Optional[int]: + """Accept Unix ms or ISO date string (YYYY-MM-DD) from form payload.""" + v = _unwrap_value(v) + if v is None or v == "": + return None + if isinstance(v, str) and len(v) >= 10 and v[4] == "-" and v[7] == "-": + try: + dt = datetime.strptime(v[:10], "%Y-%m-%d").replace(tzinfo=timezone.utc) + return int(dt.timestamp() * 1000) + except ValueError: + pass + try: + i = int(float(v)) + return i if i > 0 else None + except (ValueError, TypeError): + return None + + +def _parse_time_estimate_hours_to_ms(v: Any) -> Optional[int]: + v = _unwrap_value(v) + if v is None or v == "": + return None + try: + h = float(v) + if h < 0: + return None + return int(round(h * 3600 * 1000)) + except (ValueError, TypeError): + return None + + +def _apply_standard_task_fields(body: Dict[str, Any], parameters: Dict[str, Any]) -> None: + """Map first-class node params to ClickUp POST /list/{{id}}/task body (before taskFields merge).""" + ts = _as_str(parameters.get("taskStatus") or parameters.get("clickupStatus")) + if ts: + body["status"] = ts + pr = parameters.get("taskPriority") + pr = _unwrap_value(pr) + if pr is not None and pr != "": + try: + pi = int(float(pr)) + if 1 <= pi <= 4: + body["priority"] = pi + except (ValueError, TypeError): + pass + dd = parameters.get("taskDueDateMs") + dms = _parse_due_date_ms(dd) + if dms is not None: + body["due_date"] = dms + assignees = _parse_int_list(parameters.get("taskAssigneeIds")) + if assignees: + body["assignees"] = assignees + teh = parameters.get("taskTimeEstimateHours") + tem_h = _parse_time_estimate_hours_to_ms(teh) + if tem_h is not None: + body["time_estimate"] = tem_h + else: + te = parameters.get("taskTimeEstimateMs") + tem = _optional_positive_int(te) + if tem is not None: + body["time_estimate"] = tem + + +def _merge_custom_fields(body: Dict[str, Any], items: List[Dict[str, Any]]) -> None: + if not items: + return + existing = body.get("custom_fields") + if isinstance(existing, list) and existing: + by_id: Dict[str, Dict[str, Any]] = {} + for x in existing: + if isinstance(x, dict) and x.get("id") is not None: + by_id[str(x["id"])] = x + for item in items: + by_id[str(item["id"])] = item + body["custom_fields"] = list(by_id.values()) + else: + body["custom_fields"] = items + + +async def create_task(self, parameters: Dict[str, Any]) -> ActionResult: + connection_reference = parameters.get("connectionReference") + list_id = (parameters.get("listId") or "").strip() + path_query = (parameters.get("pathQuery") or parameters.get("path") or "").strip() + name = _as_str(parameters.get("name")) + description = _as_str(parameters.get("description")) + + if not connection_reference: + return ActionResult.isFailure(error="connectionReference is required") + if not list_id and path_query: + _t, lid = parse_team_and_list(path_query) + list_id = lid or list_id + if not list_id: + return ActionResult.isFailure(error="listId or path /team/{teamId}/list/{listId} is required") + if not name: + return ActionResult.isFailure(error="name is required") + + conn = self.connection.get_clickup_connection(connection_reference) + if not conn: + return ActionResult.isFailure(error="No valid ClickUp connection") + + body: Dict[str, Any] = {"name": name} + if description: + body["description"] = description + _apply_standard_task_fields(body, parameters) + extra = parameters.get("taskFields") + if isinstance(extra, str) and extra.strip(): + try: + parsed = json.loads(extra) + if isinstance(parsed, dict): + body.update(parsed) + except json.JSONDecodeError: + return ActionResult.isFailure(error="taskFields must be valid JSON object") + elif isinstance(extra, dict): + body.update(extra) + + cf_items = _parse_custom_field_values(parameters) + if cf_items: + _merge_custom_fields(body, cf_items) + + data = await self.services.clickup.createTask(list_id, body) + if isinstance(data, dict) and data.get("error"): + return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) + + doc = ActionDocument( + documentName="clickup_create_task.json", + documentData=json.dumps(data, ensure_ascii=False, indent=2), + mimeType="application/json", + validationMetadata={"actionType": "clickup.createTask", "listId": list_id}, + ) + return ActionResult.isSuccess(documents=[doc]) diff --git a/modules/workflows/methods/methodClickup/actions/get_task.py b/modules/workflows/methods/methodClickup/actions/get_task.py new file mode 100644 index 00000000..1e3eecad --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/get_task.py @@ -0,0 +1,40 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import json +import logging +from typing import Any, Dict + +from modules.datamodels.datamodelChat import ActionDocument, ActionResult +from ..helpers.pathparse import parse_task_id + +logger = logging.getLogger(__name__) + + +async def get_task(self, parameters: Dict[str, Any]) -> ActionResult: + connection_reference = parameters.get("connectionReference") + task_id = (parameters.get("taskId") or "").strip() + path_hint = (parameters.get("path") or parameters.get("pathQuery") or "").strip() + if not connection_reference: + return ActionResult.isFailure(error="connectionReference is required") + + if not task_id and path_hint: + task_id = parse_task_id(path_hint) or "" + if not task_id: + return ActionResult.isFailure(error="taskId is required (or path ending in .../task/{id})") + + conn = self.connection.get_clickup_connection(connection_reference) + if not conn: + return ActionResult.isFailure(error="No valid ClickUp connection") + + data = await self.services.clickup.getTask(task_id) + if isinstance(data, dict) and data.get("error"): + return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) + + doc = ActionDocument( + documentName=f"clickup_task_{task_id}.json", + documentData=json.dumps(data, ensure_ascii=False, indent=2), + mimeType="application/json", + validationMetadata={"actionType": "clickup.getTask", "taskId": task_id}, + ) + return ActionResult.isSuccess(documents=[doc]) diff --git a/modules/workflows/methods/methodClickup/actions/list_tasks.py b/modules/workflows/methods/methodClickup/actions/list_tasks.py new file mode 100644 index 00000000..4caf9e31 --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/list_tasks.py @@ -0,0 +1,51 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import json +import logging +from typing import Any, Dict + +from modules.datamodels.datamodelChat import ActionDocument, ActionResult +from ..helpers.pathparse import parse_team_and_list + +logger = logging.getLogger(__name__) + + +async def list_tasks(self, parameters: Dict[str, Any]) -> ActionResult: + connection_reference = parameters.get("connectionReference") + path_query = (parameters.get("pathQuery") or parameters.get("path") or "").strip() + if not connection_reference: + return ActionResult.isFailure(error="connectionReference is required") + if not path_query: + return ActionResult.isFailure(error="path (virtual path to a list) is required, e.g. /team/{teamId}/list/{listId}") + + conn = self.connection.get_clickup_connection(connection_reference) + if not conn: + return ActionResult.isFailure(error="No valid ClickUp connection") + + team_id, list_id = parse_team_and_list(path_query) + if not list_id: + return ActionResult.isFailure( + error="path must be /team/{teamId}/list/{listId} (browse to a list in the ClickUp picker)" + ) + + page = int(parameters.get("page") or 0) + include_closed = bool(parameters.get("includeClosed", False)) + data = await self.services.clickup.getTasksInList( + list_id, page=page, include_closed=include_closed, subtasks=True + ) + if isinstance(data, dict) and data.get("error"): + return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) + + doc = ActionDocument( + documentName="clickup_list_tasks.json", + documentData=json.dumps(data, ensure_ascii=False, indent=2), + mimeType="application/json", + validationMetadata={ + "actionType": "clickup.listTasks", + "teamId": team_id, + "listId": list_id, + "path": path_query, + }, + ) + return ActionResult.isSuccess(documents=[doc]) diff --git a/modules/workflows/methods/methodClickup/actions/search_tasks.py b/modules/workflows/methods/methodClickup/actions/search_tasks.py new file mode 100644 index 00000000..b173020c --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/search_tasks.py @@ -0,0 +1,221 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import json +import logging +import re +from typing import Any, Dict, List, Optional + +from modules.datamodels.datamodelChat import ActionDocument, ActionResult + +logger = logging.getLogger(__name__) + +_DESC_MAX = 4000 +_MAX_LIST_PAGES = 50 + +_DASHES = re.compile(r"[\u2010-\u2015\-]") + + +def _norm_title(s: str) -> str: + """Lowercase, unify hyphens/dashes, collapse spaces (helps full-title matches).""" + t = (s or "").strip().lower() + t = _DASHES.sub("-", t) + t = re.sub(r"\s+", " ", t) + return t + + +def _title_contains_query(name: str, query: str) -> bool: + if not query: + return True + n = _norm_title(name) + q = _norm_title(query) + if q in n: + return True + return query.lower() in (name or "").lower() + + +def _task_text_for_broad(t: Dict[str, Any]) -> str: + parts: List[str] = [] + if t.get("name"): + parts.append(str(t["name"])) + d = t.get("description") or t.get("text_content") or t.get("textcontent") or "" + if d: + parts.append(str(d)) + return " ".join(parts).lower() + + +def _task_matches_query(t: Dict[str, Any], query: str, *, match_name_only: bool) -> bool: + if not query: + return True + if match_name_only: + return _title_contains_query((t.get("name") or ""), query) + return query.lower() in _task_text_for_broad(t) + + +def _pick(d: Dict[str, Any], *keys: str, default: Any = None) -> Any: + for k in keys: + if k in d and d[k] is not None: + return d[k] + return default + + +def _slim_custom_field(cf: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Only include custom fields that have a value (omit null noise).""" + val = cf.get("value") + if val is None or val == "": + return None + return { + "id": cf.get("id"), + "name": cf.get("name"), + "type": cf.get("type"), + "value": val, + } + + +def _slim_clickup_task(t: Dict[str, Any]) -> Dict[str, Any]: + """Usable automation payload — not the full ClickUp API mirror (no nested typeconfig).""" + status = t.get("status") + if not isinstance(status, dict): + status = {} + li = t.get("list") + if not isinstance(li, dict): + li = {} + + desc = _pick(t, "description", "Description", default="") or "" + if len(desc) > _DESC_MAX: + desc = desc[:_DESC_MAX] + "…(truncated)" + + assignees: List[Dict[str, Any]] = [] + for a in t.get("assignees") or []: + if isinstance(a, dict): + assignees.append( + { + "id": a.get("id"), + "username": a.get("username"), + "email": a.get("email"), + } + ) + + cfs = t.get("custom_fields") or t.get("customfields") or [] + slim_cf: List[Dict[str, Any]] = [] + for cf in cfs: + if isinstance(cf, dict): + row = _slim_custom_field(cf) + if row is not None: + slim_cf.append(row) + + out: Dict[str, Any] = { + "id": t.get("id"), + "name": t.get("name"), + "text_content": _pick(t, "text_content", "textcontent"), + "description": desc, + "status": status.get("status"), + "url": t.get("url"), + "list": {"id": li.get("id"), "name": li.get("name")} if li else None, + "date_created": _pick(t, "date_created", "datecreated"), + "date_updated": _pick(t, "date_updated", "dateupdated"), + "due_date": _pick(t, "due_date", "duedate"), + } + if assignees: + out["assignees"] = assignees + if slim_cf: + out["custom_fields"] = slim_cf + pr = t.get("priority") + if pr is not None: + out["priority"] = pr + return out + + +def _slim_search_payload(data: Dict[str, Any]) -> Dict[str, Any]: + tasks = data.get("tasks") or [] + slim_tasks = [_slim_clickup_task(t) if isinstance(t, dict) else t for t in tasks] + out: Dict[str, Any] = {k: v for k, v in data.items() if k != "tasks"} + out["tasks"] = slim_tasks + out["_nyla"] = { + "slim": True, + "hint": "Set fullTaskData=true for raw ClickUp API objects.", + } + return out + + +async def search_tasks(self, parameters: Dict[str, Any]) -> ActionResult: + connection_reference = parameters.get("connectionReference") + team_id = (parameters.get("teamId") or "").strip() + query = (parameters.get("query") or parameters.get("searchQuery") or "").strip() + list_id_filter = (parameters.get("listId") or "").strip() + if not connection_reference: + return ActionResult.isFailure(error="connectionReference is required") + if not team_id: + return ActionResult.isFailure(error="teamId is required (workspace id from ClickUp)") + if not query: + return ActionResult.isFailure(error="query is required") + + conn = self.connection.get_clickup_connection(connection_reference) + if not conn: + return ActionResult.isFailure(error="No valid ClickUp connection") + + full_task_data = bool(parameters.get("fullTaskData") or parameters.get("fullPayload")) + if "matchNameOnly" in parameters: + match_name_only = bool(parameters.get("matchNameOnly")) + elif "matchTitle" in parameters: + match_name_only = bool(parameters.get("matchTitle")) + else: + match_name_only = True + + page = int(parameters.get("page") or 0) + include_closed = bool(parameters.get("includeClosed", False)) + + if list_id_filter: + # List API: scan pages in this list and match locally (team search does not scope to one table). + filtered_tasks: List[Dict[str, Any]] = [] + p = page + while p < page + _MAX_LIST_PAGES: + batch = await self.services.clickup.getTasksInList( + list_id_filter, page=p, include_closed=include_closed, subtasks=True + ) + if isinstance(batch, dict) and batch.get("error"): + return ActionResult.isFailure(error=str(batch.get("error")) + (batch.get("body") or "")) + tasks = batch.get("tasks") or [] + last = bool(batch.get("last_page") or batch.get("lastpage")) + for t in tasks: + if isinstance(t, dict) and _task_matches_query(t, query, match_name_only=match_name_only): + filtered_tasks.append(t) + if last or not tasks: + break + p += 1 + data: Dict[str, Any] = {"tasks": filtered_tasks, "lastpage": True} + search_mode = "list" + else: + data = await self.services.clickup.searchTeamTasks(team_id, query=query, page=page) + if isinstance(data, dict) and data.get("error"): + return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) + + if match_name_only and isinstance(data, dict): + tasks = data.get("tasks") or [] + filtered = [ + t + for t in tasks + if isinstance(t, dict) and _title_contains_query((t.get("name") or ""), query) + ] + data = {**data, "tasks": filtered} + search_mode = "team" + + if isinstance(data, dict) and not full_task_data: + data = _slim_search_payload(data) + + doc = ActionDocument( + documentName="clickup_search_tasks.json", + documentData=json.dumps(data, ensure_ascii=False, indent=2), + mimeType="application/json", + validationMetadata={ + "actionType": "clickup.searchTasks", + "teamId": team_id, + "query": query, + "slim": not full_task_data, + "matchNameOnly": match_name_only, + "searchMode": search_mode, + "listId": list_id_filter or None, + "includeClosed": include_closed if list_id_filter else None, + }, + ) + return ActionResult.isSuccess(documents=[doc]) diff --git a/modules/workflows/methods/methodClickup/actions/update_task.py b/modules/workflows/methods/methodClickup/actions/update_task.py new file mode 100644 index 00000000..6282ec78 --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/update_task.py @@ -0,0 +1,57 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import json +import logging +from typing import Any, Dict + +from modules.datamodels.datamodelChat import ActionDocument, ActionResult +from ..helpers.pathparse import parse_task_id + +logger = logging.getLogger(__name__) + + +async def update_task(self, parameters: Dict[str, Any]) -> ActionResult: + connection_reference = parameters.get("connectionReference") + task_id = (parameters.get("taskId") or "").strip() + path_hint = (parameters.get("path") or "").strip() + if not connection_reference: + return ActionResult.isFailure(error="connectionReference is required") + if not task_id and path_hint: + task_id = parse_task_id(path_hint) or "" + if not task_id: + return ActionResult.isFailure(error="taskId is required") + + raw_update = parameters.get("taskUpdate") or parameters.get("taskJson") or parameters.get("body") + if raw_update is None or raw_update == "": + return ActionResult.isFailure(error="taskUpdate (JSON object) is required — add update fields or advanced JSON") + if isinstance(raw_update, str): + try: + body = json.loads(raw_update) + except json.JSONDecodeError as e: + return ActionResult.isFailure(error=f"taskUpdate must be valid JSON: {e}") + elif isinstance(raw_update, dict): + body = raw_update + else: + return ActionResult.isFailure(error="taskUpdate must be a JSON string or object") + + if not isinstance(body, dict): + return ActionResult.isFailure(error="taskUpdate JSON must be an object") + if not body: + return ActionResult.isFailure(error="taskUpdate is empty — set at least one field to update") + + conn = self.connection.get_clickup_connection(connection_reference) + if not conn: + return ActionResult.isFailure(error="No valid ClickUp connection") + + data = await self.services.clickup.updateTask(task_id, body) + if isinstance(data, dict) and data.get("error"): + return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) + + doc = ActionDocument( + documentName=f"clickup_task_{task_id}_updated.json", + documentData=json.dumps(data, ensure_ascii=False, indent=2), + mimeType="application/json", + validationMetadata={"actionType": "clickup.updateTask", "taskId": task_id}, + ) + return ActionResult.isSuccess(documents=[doc]) diff --git a/modules/workflows/methods/methodClickup/actions/upload_attachment.py b/modules/workflows/methods/methodClickup/actions/upload_attachment.py new file mode 100644 index 00000000..8cd1de4d --- /dev/null +++ b/modules/workflows/methods/methodClickup/actions/upload_attachment.py @@ -0,0 +1,88 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import base64 +import json +import logging +from typing import Any, Dict + +from modules.datamodels.datamodelChat import ActionDocument, ActionResult +from ..helpers.pathparse import parse_task_id + +logger = logging.getLogger(__name__) + + +async def upload_attachment(self, parameters: Dict[str, Any]) -> ActionResult: + connection_reference = parameters.get("connectionReference") + task_id = (parameters.get("taskId") or "").strip() + path_hint = (parameters.get("path") or "").strip() + if not connection_reference: + return ActionResult.isFailure(error="connectionReference is required") + if not task_id and path_hint: + task_id = parse_task_id(path_hint) or "" + if not task_id: + return ActionResult.isFailure(error="taskId is required") + + conn = self.connection.get_clickup_connection(connection_reference) + if not conn: + return ActionResult.isFailure(error="No valid ClickUp connection") + + content_param = parameters.get("content") + if not content_param: + return ActionResult.isFailure(error="content is required (connect a file node upstream)") + + content = content_param[0] if isinstance(content_param, (list, tuple)) and content_param else content_param + file_name = parameters.get("fileName") + file_bytes = None + + if isinstance(content, dict): + file_name = file_name or content.get("documentName") or content.get("fileName") or "attachment" + raw_data = content.get("documentData") + if (content.get("validationMetadata") or {}).get("fileId") and not raw_data: + fid = content["validationMetadata"]["fileId"] + try: + raw = self.services.chat.getFileData(fid) + file_bytes = raw if isinstance(raw, bytes) else str(raw).encode("utf-8") + except Exception as e: + return ActionResult.isFailure(error=f"Could not load file {fid}: {e}") + elif raw_data is not None: + if isinstance(raw_data, bytes): + file_bytes = raw_data + elif isinstance(raw_data, str): + try: + file_bytes = base64.b64decode(raw_data) + except Exception: + file_bytes = raw_data.encode("utf-8") + else: + return ActionResult.isFailure(error="Unsupported documentData type") + else: + return ActionResult.isFailure(error="Could not read file bytes from content") + elif hasattr(content, "documentData"): + file_name = file_name or getattr(content, "documentName", None) or getattr(content, "fileName", None) or "attachment" + raw_data = content.documentData + if isinstance(raw_data, bytes): + file_bytes = raw_data + elif isinstance(raw_data, str): + try: + file_bytes = base64.b64decode(raw_data) + except Exception: + file_bytes = raw_data.encode("utf-8") + else: + return ActionResult.isFailure(error="Unsupported documentData on ActionDocument") + else: + return ActionResult.isFailure(error="Unsupported content format") + + if not file_bytes: + return ActionResult.isFailure(error="Empty file content") + + data = await self.services.clickup.uploadTaskAttachment(task_id, file_bytes, file_name or "file") + if isinstance(data, dict) and data.get("error"): + return ActionResult.isFailure(error=str(data.get("error")) + (data.get("body") or "")) + + doc = ActionDocument( + documentName="clickup_upload_attachment.json", + documentData=json.dumps(data, ensure_ascii=False, indent=2), + mimeType="application/json", + validationMetadata={"actionType": "clickup.uploadAttachment", "taskId": task_id}, + ) + return ActionResult.isSuccess(documents=[doc]) diff --git a/modules/workflows/methods/methodClickup/helpers/__init__.py b/modules/workflows/methods/methodClickup/helpers/__init__.py new file mode 100644 index 00000000..fdcc4f0e --- /dev/null +++ b/modules/workflows/methods/methodClickup/helpers/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. diff --git a/modules/workflows/methods/methodClickup/helpers/connection.py b/modules/workflows/methods/methodClickup/helpers/connection.py new file mode 100644 index 00000000..d9b6d4d7 --- /dev/null +++ b/modules/workflows/methods/methodClickup/helpers/connection.py @@ -0,0 +1,50 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Resolve ClickUp UserConnection and configure ClickupService.""" + +import logging +from typing import Any, Dict, Optional + +logger = logging.getLogger(__name__) + + +class ClickupConnectionHelper: + def __init__(self, method_instance: Any): + self.method = method_instance + self.services = method_instance.services + + def get_clickup_connection(self, connection_reference: str) -> Optional[Dict[str, Any]]: + try: + ref = (connection_reference or "").split(" [")[0].strip() + if not ref: + return None + user_connection = None + if ref.startswith("connection:"): + user_connection = self.services.chat.getUserConnectionFromConnectionReference(ref) + else: + app = getattr(self.services, "interfaceDbApp", None) + if app and hasattr(app, "getUserConnectionById"): + user_connection = app.getUserConnectionById(ref) + if not user_connection: + logger.warning("No user connection for reference/id %s", connection_reference) + return None + authority = getattr(user_connection.authority, "value", None) or str( + user_connection.authority + ) + if authority != "clickup": + logger.warning("Connection %s is not ClickUp (authority=%s)", user_connection.id, authority) + return None + status = getattr(user_connection.status, "value", None) or str(user_connection.status) + if status not in ("active", "pending"): + logger.warning("Connection %s status not active: %s", user_connection.id, status) + + cu = getattr(self.services, "clickup", None) + if not cu: + return None + if not cu.setAccessTokenFromConnection(user_connection): + logger.warning("Failed to set ClickUp token for connection %s", user_connection.id) + return None + return {"id": user_connection.id, "userConnection": user_connection} + except Exception as e: + logger.error("get_clickup_connection error: %s", e) + return None diff --git a/modules/workflows/methods/methodClickup/helpers/pathparse.py b/modules/workflows/methods/methodClickup/helpers/pathparse.py new file mode 100644 index 00000000..c97b69b2 --- /dev/null +++ b/modules/workflows/methods/methodClickup/helpers/pathparse.py @@ -0,0 +1,26 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Parse virtual ClickUp paths used by the connector.""" + +import re +from typing import Optional, Tuple + + +def parse_team_and_list(path: str) -> Tuple[Optional[str], Optional[str]]: + p = (path or "").strip() + m = re.match(r"^/team/([^/]+)/list/([^/]+)$", p) + if m: + return m.group(1), m.group(2) + return None, None + + +def parse_task_id(path_or_id: str) -> Optional[str]: + s = (path_or_id or "").strip() + if not s: + return None + m = re.match(r"^.*/task/([^/]+)$", s) + if m: + return m.group(1) + if re.match(r"^[a-zA-Z0-9_-]+$", s) and len(s) > 4: + return s + return None diff --git a/modules/workflows/methods/methodClickup/methodClickup.py b/modules/workflows/methods/methodClickup/methodClickup.py new file mode 100644 index 00000000..00c658a5 --- /dev/null +++ b/modules/workflows/methods/methodClickup/methodClickup.py @@ -0,0 +1,349 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp workflow method — list/search/get/create/update tasks and upload attachments.""" + +import logging + +from modules.datamodels.datamodelWorkflowActions import WorkflowActionDefinition, WorkflowActionParameter +from modules.shared.frontendTypes import FrontendType +from modules.workflows.methods.methodBase import MethodBase + +from .helpers.connection import ClickupConnectionHelper +from .actions.list_tasks import list_tasks +from .actions.search_tasks import search_tasks +from .actions.get_task import get_task +from .actions.create_task import create_task +from .actions.update_task import update_task +from .actions.upload_attachment import upload_attachment + +logger = logging.getLogger(__name__) + + +class MethodClickup(MethodBase): + """ClickUp API actions for automation2 (lists as tables).""" + + def __init__(self, services): + super().__init__(services) + self.name = "clickup" + self.description = "ClickUp task and list operations" + self.connection = ClickupConnectionHelper(self) + + self._actions = { + "listTasks": WorkflowActionDefinition( + actionId="clickup.listTasks", + description="List tasks in a ClickUp list (virtual path /team/{id}/list/{id})", + dynamicMode=True, + parameters={ + "connectionReference": WorkflowActionParameter( + name="connectionReference", + type="str", + frontendType=FrontendType.USER_CONNECTION, + required=True, + description="ClickUp connection", + ), + "pathQuery": WorkflowActionParameter( + name="pathQuery", + type="str", + frontendType=FrontendType.TEXT, + required=True, + description="Virtual path to list: /team/{teamId}/list/{listId}", + ), + "page": WorkflowActionParameter( + name="page", + type="int", + frontendType=FrontendType.NUMBER, + required=False, + default=0, + description="Page index", + ), + "includeClosed": WorkflowActionParameter( + name="includeClosed", + type="bool", + frontendType=FrontendType.CHECKBOX, + required=False, + default=False, + description="Include closed tasks", + ), + }, + execute=list_tasks.__get__(self, self.__class__), + ), + "searchTasks": WorkflowActionDefinition( + actionId="clickup.searchTasks", + description="Search tasks in a ClickUp workspace (team)", + dynamicMode=True, + parameters={ + "connectionReference": WorkflowActionParameter( + name="connectionReference", + type="str", + frontendType=FrontendType.USER_CONNECTION, + required=True, + description="ClickUp connection", + ), + "teamId": WorkflowActionParameter( + name="teamId", + type="str", + frontendType=FrontendType.TEXT, + required=True, + description="Workspace (team) ID", + ), + "query": WorkflowActionParameter( + name="query", + type="str", + frontendType=FrontendType.TEXT, + required=True, + description="Search query", + ), + "page": WorkflowActionParameter( + name="page", + type="int", + frontendType=FrontendType.NUMBER, + required=False, + default=0, + description="Page index", + ), + "listId": WorkflowActionParameter( + name="listId", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description=( + "If set, tasks are loaded from this list via the list API (not team search). " + "Use this to search the selected table." + ), + ), + "includeClosed": WorkflowActionParameter( + name="includeClosed", + type="bool", + frontendType=FrontendType.CHECKBOX, + required=False, + default=False, + description="When listId is set, include closed tasks in list pages.", + ), + "fullTaskData": WorkflowActionParameter( + name="fullTaskData", + type="bool", + frontendType=FrontendType.CHECKBOX, + required=False, + default=False, + description="If true, return raw ClickUp API task objects (large). Default is a slim payload.", + ), + "matchNameOnly": WorkflowActionParameter( + name="matchNameOnly", + type="bool", + frontendType=FrontendType.CHECKBOX, + required=False, + default=True, + description="If true, keep only tasks whose title contains the search query (default: true).", + ), + }, + execute=search_tasks.__get__(self, self.__class__), + ), + "getTask": WorkflowActionDefinition( + actionId="clickup.getTask", + description="Get a single task by ID", + dynamicMode=True, + parameters={ + "connectionReference": WorkflowActionParameter( + name="connectionReference", + type="str", + frontendType=FrontendType.USER_CONNECTION, + required=True, + description="ClickUp connection", + ), + "taskId": WorkflowActionParameter( + name="taskId", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Task ID", + ), + "pathQuery": WorkflowActionParameter( + name="pathQuery", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Optional virtual path ending in /task/{taskId}", + ), + }, + execute=get_task.__get__(self, self.__class__), + ), + "createTask": WorkflowActionDefinition( + actionId="clickup.createTask", + description="Create a task in a list", + dynamicMode=True, + parameters={ + "connectionReference": WorkflowActionParameter( + name="connectionReference", + type="str", + frontendType=FrontendType.USER_CONNECTION, + required=True, + description="ClickUp connection", + ), + "listId": WorkflowActionParameter( + name="listId", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="List ID (if not using path)", + ), + "pathQuery": WorkflowActionParameter( + name="pathQuery", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Virtual path to list /team/{teamId}/list/{listId}", + ), + "name": WorkflowActionParameter( + name="name", + type="str", + frontendType=FrontendType.TEXT, + required=True, + description="Task name", + ), + "description": WorkflowActionParameter( + name="description", + type="str", + frontendType=FrontendType.TEXTAREA, + required=False, + description="Task description", + ), + "customFieldValues": WorkflowActionParameter( + name="customFieldValues", + type="str", + frontendType=FrontendType.TEXTAREA, + required=False, + description="Map of ClickUp custom field id to value (merged into custom_fields).", + ), + "taskFields": WorkflowActionParameter( + name="taskFields", + type="str", + frontendType=FrontendType.TEXTAREA, + required=False, + description="Optional extra JSON object merged into create payload (overrides standard fields)", + ), + "taskStatus": WorkflowActionParameter( + name="taskStatus", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="ClickUp status name for this list", + ), + "taskPriority": WorkflowActionParameter( + name="taskPriority", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Priority 1 (urgent)–4 (low), empty to omit", + ), + "taskDueDateMs": WorkflowActionParameter( + name="taskDueDateMs", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Due date as Unix ms timestamp", + ), + "taskAssigneeIds": WorkflowActionParameter( + name="taskAssigneeIds", + type="str", + frontendType=FrontendType.TEXTAREA, + required=False, + description="JSON array of ClickUp user ids, e.g. [123,456]", + ), + "taskTimeEstimateMs": WorkflowActionParameter( + name="taskTimeEstimateMs", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Time estimate in milliseconds", + ), + "taskTimeEstimateHours": WorkflowActionParameter( + name="taskTimeEstimateHours", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Time estimate in hours (converted to ms; preferred over taskTimeEstimateMs)", + ), + }, + execute=create_task.__get__(self, self.__class__), + ), + "updateTask": WorkflowActionDefinition( + actionId="clickup.updateTask", + description="Update a task (JSON body per ClickUp API)", + dynamicMode=True, + parameters={ + "connectionReference": WorkflowActionParameter( + name="connectionReference", + type="str", + frontendType=FrontendType.USER_CONNECTION, + required=True, + description="ClickUp connection", + ), + "taskId": WorkflowActionParameter( + name="taskId", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Task ID", + ), + "path": WorkflowActionParameter( + name="path", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Optional path ending in /task/{taskId}", + ), + "taskUpdate": WorkflowActionParameter( + name="taskUpdate", + type="str", + frontendType=FrontendType.TEXTAREA, + required=False, + description="JSON object for PUT /task/{id} (e.g. {\"name\":\"...\",\"status\":\"...\"}); built from editor rows if empty", + ), + }, + execute=update_task.__get__(self, self.__class__), + ), + "uploadAttachment": WorkflowActionDefinition( + actionId="clickup.uploadAttachment", + description="Upload a file attachment to a task", + dynamicMode=True, + parameters={ + "connectionReference": WorkflowActionParameter( + name="connectionReference", + type="str", + frontendType=FrontendType.USER_CONNECTION, + required=True, + description="ClickUp connection", + ), + "taskId": WorkflowActionParameter( + name="taskId", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Task ID", + ), + "path": WorkflowActionParameter( + name="path", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Optional path ending in /task/{taskId}", + ), + "fileName": WorkflowActionParameter( + name="fileName", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Attachment file name", + ), + "content": WorkflowActionParameter( + name="content", + type="Any", + frontendType=FrontendType.DOCUMENT_REFERENCE, + required=True, + description="File from upstream node", + ), + }, + execute=upload_attachment.__get__(self, self.__class__), + ), + } + self._validateActions() diff --git a/modules/workflows/methods/methodFile/__init__.py b/modules/workflows/methods/methodFile/__init__.py new file mode 100644 index 00000000..b8c41e0f --- /dev/null +++ b/modules/workflows/methods/methodFile/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +from .methodFile import MethodFile + +__all__ = ["MethodFile"] diff --git a/modules/workflows/methods/methodFile/actions/__init__.py b/modules/workflows/methods/methodFile/actions/__init__.py new file mode 100644 index 00000000..9aef4028 --- /dev/null +++ b/modules/workflows/methods/methodFile/actions/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +from .create import create + +__all__ = ["create"] diff --git a/modules/workflows/methods/methodFile/actions/create.py b/modules/workflows/methods/methodFile/actions/create.py new file mode 100644 index 00000000..73816da0 --- /dev/null +++ b/modules/workflows/methods/methodFile/actions/create.py @@ -0,0 +1,147 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import base64 +import logging +from typing import Dict, Any + +from modules.datamodels.datamodelChat import ActionResult, ActionDocument +from modules.serviceCenter.services.serviceGeneration.subDocumentUtility import markdownToDocumentJson + +logger = logging.getLogger(__name__) + + +def _persistDocumentsToUserFiles( + action_documents: list, + services, +) -> None: + """Persist file.create output documents to user's file storage (like upload). + Adds fileId to each document's validationMetadata for download links in UI.""" + mgmt = getattr(services, "interfaceDbComponent", None) + if not mgmt: + try: + import modules.interfaces.interfaceDbManagement as iface + user = getattr(services, "user", None) + if not user: + return + mgmt = iface.getInterface( + user, + mandateId=getattr(services, "mandateId", None) or "", + featureInstanceId=getattr(services, "featureInstanceId", None) or "", + ) + except Exception as e: + logger.warning("file.create: could not get management interface for persistence: %s", e) + return + if not mgmt: + return + for doc in action_documents: + try: + doc_data = doc.documentData if hasattr(doc, "documentData") else doc.get("documentData") + if not doc_data: + continue + if isinstance(doc_data, str): + content = base64.b64decode(doc_data) + else: + content = doc_data + doc_name = ( + getattr(doc, "documentName", None) + or doc.get("documentName") + or "output.pdf" + ) + mime = ( + getattr(doc, "mimeType", None) + or doc.get("mimeType") + or "application/octet-stream" + ) + file_item = mgmt.createFile(doc_name, mime, content) + mgmt.createFileData(file_item.id, content) + meta = getattr(doc, "validationMetadata", None) or doc.get("validationMetadata") or {} + if isinstance(meta, dict): + meta["fileId"] = file_item.id + if hasattr(doc, "validationMetadata"): + doc.validationMetadata = meta + elif isinstance(doc, dict): + doc["validationMetadata"] = meta + logger.info("file.create: persisted %s to user files (id=%s)", doc_name, file_item.id) + except Exception as e: + dname = getattr(doc, "documentName", None) or doc.get("documentName", "?") + logger.warning("file.create: failed to persist document %s: %s", dname, e) + + +async def create(self, parameters: Dict[str, Any]) -> ActionResult: + """ + Create a file from context (text/markdown from upstream AI node). + Uses GenerationService.renderReport to produce docx, pdf, txt, md, html, xlsx, etc. + """ + context = parameters.get("context", "") or "" + if not isinstance(context, str): + context = str(context) if context else "" + context = context.strip() + + if not context: + return ActionResult.isFailure(error="context is required (connect an AI node or provide text)") + + outputFormat = (parameters.get("outputFormat") or "docx").strip().lower().lstrip(".") + title = (parameters.get("title") or "Document").strip() + templateName = parameters.get("templateName") + language = (parameters.get("language") or "de").strip()[:2] + + try: + structured_content = markdownToDocumentJson(context, title, language) + if templateName: + structured_content.setdefault("metadata", {})["templateName"] = templateName + + generation = getattr(self.services, "generation", None) + if not generation: + return ActionResult.isFailure(error="Generation service not available") + + ai_service = getattr(self.services, "ai", None) + rendered_docs = await generation.renderReport( + extractedContent=structured_content, + outputFormat=outputFormat, + language=language, + title=title, + userPrompt=None, + aiService=ai_service, + parentOperationId=parameters.get("parentOperationId"), + ) + + if not rendered_docs: + return ActionResult.isFailure(error="Rendering produced no output") + + action_documents = [] + mime_map = { + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "pdf": "application/pdf", + "txt": "text/plain", + "md": "text/markdown", + "html": "text/html", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "csv": "text/csv", + "json": "application/json", + } + for rd in rendered_docs: + doc_data = rd.documentData if hasattr(rd, "documentData") else getattr(rd, "document_data", None) + doc_name = getattr(rd, "filename", None) or getattr(rd, "documentName", None) or getattr(rd, "document_name", f"output.{outputFormat}") + mime = getattr(rd, "mimeType", None) or getattr(rd, "mime_type", None) or mime_map.get(outputFormat, "application/octet-stream") + + if isinstance(doc_data, bytes): + doc_data = base64.b64encode(doc_data).decode("ascii") + + action_documents.append(ActionDocument( + documentName=doc_name, + documentData=doc_data, + mimeType=mime, + validationMetadata={ + "actionType": "file.create", + "outputFormat": outputFormat, + "templateName": templateName, + }, + )) + + _persistDocumentsToUserFiles(action_documents, self.services) + return ActionResult.isSuccess(documents=action_documents) + + except Exception as e: + logger.error(f"file.create failed: {e}", exc_info=True) + return ActionResult.isFailure(error=str(e)) diff --git a/modules/workflows/methods/methodFile/methodFile.py b/modules/workflows/methods/methodFile/methodFile.py new file mode 100644 index 00000000..072ca598 --- /dev/null +++ b/modules/workflows/methods/methodFile/methodFile.py @@ -0,0 +1,81 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. + +import logging +from modules.workflows.methods.methodBase import MethodBase +from modules.datamodels.datamodelWorkflowActions import WorkflowActionDefinition, WorkflowActionParameter +from modules.shared.frontendTypes import FrontendType + +from .actions.create import create + +logger = logging.getLogger(__name__) + + +class MethodFile(MethodBase): + """File creation methods - create documents from context (e.g. from AI nodes).""" + + def __init__(self, services): + super().__init__(services) + self.name = "file" + self.description = "File creation from context" + + self._actions = { + "create": WorkflowActionDefinition( + actionId="file.create", + description="Create a file from context (text/markdown from AI). Configurable format and style preset.", + dynamicMode=True, + parameters={ + "contentSources": WorkflowActionParameter( + name="contentSources", + type="list", + frontendType=FrontendType.HIDDEN, + required=False, + description="Array of context refs. Resolved and concatenated. Empty = from connected node.", + ), + "context": WorkflowActionParameter( + name="context", + type="str", + frontendType=FrontendType.HIDDEN, + required=False, + description="Injected from contentSource or upstream connection", + ), + "outputFormat": WorkflowActionParameter( + name="outputFormat", + type="str", + frontendType=FrontendType.SELECT, + frontendOptions=["docx", "pdf", "txt", "md", "html", "xlsx", "csv", "json"], + required=True, + default="docx", + description="Output file format", + ), + "title": WorkflowActionParameter( + name="title", + type="str", + frontendType=FrontendType.TEXT, + required=False, + description="Document title", + ), + "templateName": WorkflowActionParameter( + name="templateName", + type="str", + frontendType=FrontendType.SELECT, + frontendOptions=["default", "corporate", "minimal"], + required=False, + description="Style preset", + ), + "language": WorkflowActionParameter( + name="language", + type="str", + frontendType=FrontendType.SELECT, + frontendOptions=["de", "en", "fr", "it", "es"], + required=False, + default="de", + description="Language code", + ), + }, + execute=create.__get__(self, self.__class__), + ), + } + + self._validateActions() + self.create = create.__get__(self, self.__class__) diff --git a/modules/workflows/methods/methodOutlook/actions/composeAndDraftEmailWithContext.py b/modules/workflows/methods/methodOutlook/actions/composeAndDraftEmailWithContext.py index 09cdd1dd..5c15173e 100644 --- a/modules/workflows/methods/methodOutlook/actions/composeAndDraftEmailWithContext.py +++ b/modules/workflows/methods/methodOutlook/actions/composeAndDraftEmailWithContext.py @@ -16,6 +16,7 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A to = parameters.get("to") or [] # Optional for drafts - can save draft without recipients context = parameters.get("context") documentList = parameters.get("documentList") or [] + replySourceDocuments = parameters.get("replySourceDocuments") or [] # Original email(s) for reply attachment cc = parameters.get("cc") or [] bcc = parameters.get("bcc") or [] emailStyle = parameters.get("emailStyle") or "business" @@ -34,6 +35,7 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A if isinstance(to, str): to = [to] ai_attachments = [] + normalized_ai_attachments = [] # Jump to create-email section (see below) else: direct_subject = parameters.get("subject") @@ -44,6 +46,7 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A if isinstance(to, str): to = [to] ai_attachments = [] + normalized_ai_attachments = [] else: subject = None body = None @@ -51,6 +54,12 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A use_direct_content = bool(subject and body) + # Ensure subject/body are strings (not bytes) for JSON serialization + if subject and isinstance(subject, bytes): + subject = subject.decode("utf-8", errors="replace") + if body and isinstance(body, bytes): + body = body.decode("utf-8", errors="replace") + if not use_direct_content: # Original path: require connectionReference and context if not connectionReference or not context: @@ -263,9 +272,12 @@ Return JSON: # Add documents as attachments if provided # Supports: 1) inline ActionDocuments (dict with documentData from e.g. sharepoint.downloadFile) # 2) docItem:... references (chat workflow documents) - if documentList: + # 3) replySourceDocuments: original email(s) for reply – attach when use_direct_content + # When use_direct_content: upstream AI doc IS the email body – do not attach it, BUT attach reply sources + attachments_doc_list = (replySourceDocuments or []) if use_direct_content else (documentList or []) + if attachments_doc_list: message["attachments"] = [] - for attachment_ref in documentList: + for attachment_ref in attachments_doc_list: base64_content = None attach_name = "attachment" attach_mime = "application/octet-stream" @@ -276,10 +288,57 @@ Return JSON: is_inline = bool(getattr(attachment_ref, "documentData", None)) if is_inline: doc = attachment_ref - base64_content = doc.get("documentData") if isinstance(doc, dict) else getattr(doc, "documentData", None) - attach_name = (doc.get("documentName") or doc.get("fileName")) if isinstance(doc, dict) else (getattr(doc, "documentName", None) or getattr(doc, "fileName", "attachment")) - attach_mime = (doc.get("mimeType") or attach_mime) if isinstance(doc, dict) else (getattr(doc, "mimeType", None) or attach_mime) + raw_data = doc.get("documentData") if isinstance(doc, dict) else getattr(doc, "documentData", None) + vm = doc.get("validationMetadata") or {} if isinstance(doc, dict) else (getattr(doc, "validationMetadata") or {}) + action_type = vm.get("actionType", "") if isinstance(vm, dict) else "" + # Reply source: email search/read result – convert first email to .eml for proper reply attachment + if "outlook" in action_type.lower() and "email" in action_type.lower() and raw_data: + try: + data = json.loads(raw_data) if isinstance(raw_data, str) else raw_data + emails_list = [] + if isinstance(data, dict): + sr = data.get("searchResults") or {} + emails_list = sr.get("results", []) if isinstance(sr, dict) else [] + if not emails_list: + ed = data.get("emails") or {} + emails_list = ed.get("emails", []) if isinstance(ed, dict) else [] + if not emails_list and isinstance(data.get("emails"), list): + emails_list = data["emails"] + if emails_list and isinstance(emails_list[0], dict): + em = emails_list[0] + fr = em.get("from", em.get("sender", {})) + addr = fr.get("emailAddress", {}) if isinstance(fr, dict) else {} + from_addr = addr.get("address", "") or addr.get("name", "") + subj = em.get("subject", "") + body_obj = em.get("body") or {} + body_content = body_obj.get("content", "") if isinstance(body_obj, dict) else str(body_obj) + eml_lines = [ + f"From: {from_addr}", + f"Subject: {subj}", + "MIME-Version: 1.0", + "Content-Type: text/html; charset=utf-8", + "", + body_content or "(no content)" + ] + eml_bytes = "\n".join(eml_lines).encode("utf-8") + base64_content = base64.b64encode(eml_bytes).decode("utf-8") + attach_name = f"original_message_{subj[:30].replace(' ', '_') if subj else 'email'}.eml" + attach_mime = "message/rfc822" + except Exception as e: + logger.debug("Could not convert email JSON to .eml: %s", e) + base64_content = raw_data + attach_name = (doc.get("documentName") or doc.get("fileName") or "attachment") if isinstance(doc, dict) else (getattr(doc, "documentName", None) or getattr(doc, "fileName", "attachment")) + attach_mime = (doc.get("mimeType") or attach_mime) if isinstance(doc, dict) else (getattr(doc, "mimeType", None) or attach_mime) + else: + base64_content = raw_data + attach_name = (doc.get("documentName") or doc.get("fileName")) if isinstance(doc, dict) else (getattr(doc, "documentName", None) or getattr(doc, "fileName", "attachment")) + attach_mime = (doc.get("mimeType") or attach_mime) if isinstance(doc, dict) else (getattr(doc, "mimeType", None) or attach_mime) if base64_content and attach_name: + # Microsoft Graph expects contentBytes as base64 string; documentData may be bytes (e.g. from ai.generateDocument) + if isinstance(base64_content, bytes): + base64_content = base64.b64encode(base64_content).decode("utf-8") + elif not isinstance(base64_content, str): + base64_content = base64.b64encode(str(base64_content).encode("utf-8")).decode("utf-8") message["attachments"].append({ "@odata.type": "#microsoft.graph.fileAttachment", "name": attach_name, @@ -361,7 +420,7 @@ Return JSON: "recipients": to, "cc": cc, "bcc": bcc, - "attachments": len(documentList) if documentList else 0, + "attachments": len(message.get("attachments", [])), "aiSelectedAttachments": normalized_ai_attachments if normalized_ai_attachments else "all documents", "aiGenerated": True, "context": context, @@ -373,10 +432,10 @@ Return JSON: # Extract attachment filenames for validation metadata attachmentFilenames = [] attachmentReferences = [] - if documentList: + if attachments_doc_list: # Inline docs (dict with documentName): use directly - string_refs = [r for r in documentList if isinstance(r, str)] - inline_docs = [r for r in documentList if isinstance(r, dict)] + string_refs = [r for r in attachments_doc_list if isinstance(r, str)] + inline_docs = [r for r in attachments_doc_list if isinstance(r, dict)] for d in inline_docs: name = d.get("documentName") or d.get("fileName") if name: diff --git a/modules/workflows/methods/methodSharepoint/actions/uploadFile.py b/modules/workflows/methods/methodSharepoint/actions/uploadFile.py index 86d5787d..56e9f0b2 100644 --- a/modules/workflows/methods/methodSharepoint/actions/uploadFile.py +++ b/modules/workflows/methods/methodSharepoint/actions/uploadFile.py @@ -15,6 +15,11 @@ async def uploadFile(self, parameters: Dict[str, Any]) -> ActionResult: if not connectionReference: return ActionResult.isFailure(error="connectionReference parameter is required") + # Set SharePoint access token first – required before siteDiscovery/sharepoint calls + connection = self.connection.getMicrosoftConnection(connectionReference) + if not connection: + return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference") + contentParam = parameters.get("content") if not contentParam: return ActionResult.isFailure(error="content parameter is required") @@ -100,12 +105,7 @@ async def uploadFile(self, parameters: Dict[str, Any]) -> ActionResult: except Exception as e: return ActionResult.isFailure(error=f"Could not decode base64 file content: {str(e)}") - # Get Microsoft connection - connection = self.connection.getMicrosoftConnection(connectionReference) - if not connection: - return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference") - - # Upload file + # Upload file (connection/token already set above) uploadResult = await self.services.sharepoint.uploadFile( siteId=siteId, folderPath=folderPath, diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py index b9b64a9a..555e3c6f 100644 --- a/modules/workflows/workflowManager.py +++ b/modules/workflows/workflowManager.py @@ -42,7 +42,7 @@ class WorkflowManager: if ctx is not None: ctx.workflow = workflow # Also update contexts on resolved services (they may be cached with different context refs) - for attr in ('chat', 'ai', 'extraction', 'sharepoint', 'utils', 'billing', 'generation'): + for attr in ('chat', 'ai', 'extraction', 'sharepoint', 'clickup', 'utils', 'billing', 'generation'): svc = getattr(self.services, attr, None) if svc is not None and hasattr(svc, '_context') and svc._context is not None: svc._context.workflow = workflow diff --git a/tests/unit/workflows/test_automation2_graphUtils.py b/tests/unit/workflows/test_automation2_graphUtils.py new file mode 100644 index 00000000..45f4ba0f --- /dev/null +++ b/tests/unit/workflows/test_automation2_graphUtils.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +""" +Unit tests for automation2 graphUtils - resolveParameterReferences (ref/value format). +""" + +import pytest + +from modules.workflows.automation2.graphUtils import resolveParameterReferences + + +class TestResolveParameterReferences: + """Test structured ref/value resolution.""" + + def test_ref_simple(self): + node_outputs = { + "n1": {"payload": {"country": "CH"}}, + } + value = {"type": "ref", "nodeId": "n1", "path": ["payload", "country"]} + assert resolveParameterReferences(value, node_outputs) == "CH" + + def test_ref_root(self): + node_outputs = {"n1": {"a": 1, "b": 2}} + value = {"type": "ref", "nodeId": "n1", "path": []} + assert resolveParameterReferences(value, node_outputs) == {"a": 1, "b": 2} + + def test_ref_nested(self): + node_outputs = {"form_1": {"customer": {"country": "DE", "name": "Test"}}} + value = {"type": "ref", "nodeId": "form_1", "path": ["customer", "country"]} + assert resolveParameterReferences(value, node_outputs) == "DE" + + def test_ref_array_index(self): + node_outputs = {"n1": {"items": ["a", "b", "c"]}} + value = {"type": "ref", "nodeId": "n1", "path": ["items", 1]} + assert resolveParameterReferences(value, node_outputs) == "b" + + def test_ref_missing_node(self): + node_outputs = {} + value = {"type": "ref", "nodeId": "missing", "path": ["x"]} + assert resolveParameterReferences(value, node_outputs) == value + + def test_value_wrapper(self): + value = {"type": "value", "value": "static text"} + assert resolveParameterReferences(value, {}) == "static text" + + def test_value_nested_ref(self): + node_outputs = {"n1": {"x": 42}} + value = {"type": "value", "value": {"type": "ref", "nodeId": "n1", "path": ["x"]}} + assert resolveParameterReferences(value, node_outputs) == 42 + + def test_dict_mixed_ref_value(self): + node_outputs = {"n1": {"result": "hello"}} + value = { + "prompt": {"type": "ref", "nodeId": "n1", "path": ["result"]}, + "suffix": {"type": "value", "value": " world"}, + } + result = resolveParameterReferences(value, node_outputs) + assert result == {"prompt": "hello", "suffix": " world"} + + def test_legacy_string_template(self): + node_outputs = {"n1": {"country": "CH"}} + value = "Land: {{n1.country}}" + assert resolveParameterReferences(value, node_outputs) == "Land: CH" From efe540b4f9c2cc2d617b6688095c02a628138d55 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 16:59:01 +0100 Subject: [PATCH 05/33] fixing round 1 --- app.py | 3 + modules/datamodels/datamodelAi.py | 1 + .../datamodels/datamodelFeatureDataSource.py | 15 ++ modules/datamodels/datamodelKnowledge.py | 5 + modules/datamodels/datamodelUam.py | 6 + .../commcoach/routeFeatureCommcoach.py | 2 +- .../workspace/interfaceFeatureWorkspace.py | 2 +- .../workspace/routeFeatureWorkspace.py | 23 ++ modules/interfaces/interfaceBootstrap.py | 8 + modules/interfaces/interfaceDbApp.py | 248 +++++++++++------- modules/interfaces/interfaceDbManagement.py | 7 +- modules/interfaces/interfaceFeatures.py | 47 +++- modules/migration/migrateRootUsers.py | 129 ++++++++- modules/routes/routeAdminFeatures.py | 110 ++++++-- modules/routes/routeDataFiles.py | 49 ++++ modules/routes/routeDataSources.py | 97 +++++++ modules/routes/routeDataUsers.py | 9 +- modules/routes/routeSecurityLocal.py | 53 +++- modules/routes/routeStore.py | 60 ++--- modules/routes/routeSubscription.py | 58 +++- modules/routes/routeSystem.py | 4 +- .../services/serviceAi/mainServiceAi.py | 98 +++++-- .../serviceKnowledge/mainServiceKnowledge.py | 15 +- modules/shared/attributeUtils.py | 21 -- 24 files changed, 844 insertions(+), 226 deletions(-) create mode 100644 modules/routes/routeDataSources.py diff --git a/app.py b/app.py index 8268377a..63a18f94 100644 --- a/app.py +++ b/app.py @@ -545,6 +545,9 @@ app.include_router(userRouter) from modules.routes.routeDataFiles import router as fileRouter app.include_router(fileRouter) +from modules.routes.routeDataSources import router as dataSourceRouter +app.include_router(dataSourceRouter) + from modules.routes.routeDataPrompts import router as promptRouter app.include_router(promptRouter) diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py index 296500aa..c31d5696 100644 --- a/modules/datamodels/datamodelAi.py +++ b/modules/datamodels/datamodelAi.py @@ -168,6 +168,7 @@ class AiCallRequest(BaseModel): contentParts: Optional[List['ContentPart']] = None # Content parts for model-aware chunking messages: Optional[List[Dict[str, Any]]] = Field(default=None, description="OpenAI-style messages for multi-turn agent conversations") tools: Optional[List[Dict[str, Any]]] = Field(default=None, description="Tool definitions for native function calling") + requireNeutralization: Optional[bool] = Field(default=None, description="Per-request neutralization override: True=force, False=skip, None=use config") class AiCallResponse(BaseModel): diff --git a/modules/datamodels/datamodelFeatureDataSource.py b/modules/datamodels/datamodelFeatureDataSource.py index 89b8b372..5aa834eb 100644 --- a/modules/datamodels/datamodelFeatureDataSource.py +++ b/modules/datamodels/datamodelFeatureDataSource.py @@ -25,6 +25,21 @@ class FeatureDataSource(BaseModel): userId: str = Field(default="", description="Owner user ID") workspaceInstanceId: str = Field(description="Workspace instance where this source is used") createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]} + ) + neutralize: bool = Field( + default=False, + description="Whether this data source should be neutralized before AI processing", + json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} + ) registerModelLabels( diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py index ac1c4ecc..e9dcc857 100644 --- a/modules/datamodels/datamodelKnowledge.py +++ b/modules/datamodels/datamodelKnowledge.py @@ -42,6 +42,10 @@ class FileContentIndex(BaseModel): default=None, description="Neutralization status: completed, failed, skipped, None = not required", ) + isNeutralized: bool = Field( + default=False, + description="True if content was neutralized before indexing", + ) registerModelLabels( @@ -64,6 +68,7 @@ registerModelLabels( "status": {"en": "Status", "fr": "Statut"}, "scope": {"en": "Scope", "de": "Sichtbarkeit"}, "neutralizationStatus": {"en": "Neutralization Status", "de": "Neutralisierungsstatus"}, + "isNeutralized": {"en": "Is Neutralized", "de": "Neutralisiert"}, }, ) diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index d56bd861..3e1250c7 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -103,6 +103,11 @@ class Mandate(BaseModel): {"value": "company", "label": {"en": "Company", "de": "Unternehmen"}}, ]} ) + deletedAt: Optional[float] = Field( + default=None, + description="Timestamp when the mandate was soft-deleted. After 30 days, hard-delete is triggered.", + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False} + ) @field_validator('isSystem', mode='before') @classmethod @@ -135,6 +140,7 @@ registerModelLabels( "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"}, "isSystem": {"en": "System Mandate", "de": "System-Mandant", "fr": "Mandat système"}, "mandateType": {"en": "Mandate Type", "de": "Mandantentyp", "fr": "Type de mandat"}, + "deletedAt": {"en": "Deleted at", "de": "Gelöscht am", "fr": "Supprimé le"}, }, ) diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py index 6d6eb44f..ccb4d342 100644 --- a/modules/features/commcoach/routeFeatureCommcoach.py +++ b/modules/features/commcoach/routeFeatureCommcoach.py @@ -2,7 +2,7 @@ # All rights reserved. """ CommCoach routes for the backend API. -Implements coaching context management, session streaming, tasks, dashboard, and voice endpoints. +Implements coaching context management, session streaming, tasks, and dashboard. """ import logging diff --git a/modules/features/workspace/interfaceFeatureWorkspace.py b/modules/features/workspace/interfaceFeatureWorkspace.py index 525ac62e..05bda01d 100644 --- a/modules/features/workspace/interfaceFeatureWorkspace.py +++ b/modules/features/workspace/interfaceFeatureWorkspace.py @@ -1,7 +1,7 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. """ -Interface for Workspace feature — manages VoiceSettings and WorkspaceUserSettings. +Interface for Workspace feature — manages WorkspaceUserSettings. Uses a dedicated poweron_workspace database. """ diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 1828cba6..7698181a 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -893,6 +893,23 @@ async def listWorkspaceWorkflows( _validateInstanceAccess(instanceId, context) chatInterface = _getChatInterface(context, featureInstanceId=instanceId) workflows = chatInterface.getWorkflows() or [] + + from modules.interfaces.interfaceDbApp import getRootInterface + rootIf = getRootInterface() + _fiCache: Dict[str, Dict[str, str]] = {} + + def _resolveFeatureLabels(fiId: str) -> Dict[str, str]: + if fiId not in _fiCache: + fi = rootIf.getFeatureInstance(fiId) + if fi: + _fiCache[fiId] = { + "featureLabel": getattr(fi, "label", "") or getattr(fi, "featureCode", fiId), + "featureCode": getattr(fi, "featureCode", ""), + } + else: + _fiCache[fiId] = {"featureLabel": fiId[:8], "featureCode": ""} + return _fiCache[fiId] + items = [] for wf in workflows: if isinstance(wf, dict): @@ -904,9 +921,15 @@ async def listWorkspaceWorkflows( "status": getattr(wf, "status", ""), "startedAt": getattr(wf, "startedAt", None), "lastActivity": getattr(wf, "lastActivity", None), + "featureInstanceId": getattr(wf, "featureInstanceId", instanceId), } if not includeArchived and item.get("status") == "archived": continue + fiId = item.get("featureInstanceId") or instanceId + labels = _resolveFeatureLabels(fiId) + item.setdefault("featureLabel", labels["featureLabel"]) + item.setdefault("featureCode", labels["featureCode"]) + item.setdefault("featureInstanceId", fiId) items.append(item) return JSONResponse({"workflows": items}) diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index e2a0dfa4..0a3e24ad 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -133,6 +133,14 @@ def initBootstrap(db: DatabaseConnector) -> None: # Auto-provision Stripe Products/Prices for paid plans (idempotent) _bootstrapStripePrices() + # Purge soft-deleted mandates past 30-day retention + try: + from modules.interfaces.interfaceDbApp import getRootInterface + rootIf = getRootInterface() + rootIf.purgeExpiredMandates(retentionDays=30) + except Exception as e: + logger.warning(f"Mandate retention purge failed: {e}") + def initAutomationTemplates(dbApp: DatabaseConnector, adminUserId: Optional[str] = None) -> None: """ diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 6645e929..183bedb6 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -800,48 +800,6 @@ class AppObjects: logger.error(f"Error updating user: {str(e)}") raise ValueError(f"Failed to update user: {str(e)}") - def _assignUserToRootMandate(self, userId: str) -> None: - """ - Assign a new user to the root mandate with the mandate-instance 'user' role. - This ensures every user has a base membership in the system mandate. - - Uses the mandate-instance role (mandateId=rootMandateId), not the global template. - Feature instance access is NOT granted here - it is managed separately - via invitations or admin assignment. - - Args: - userId: User ID to assign - """ - try: - from modules.datamodels.datamodelRbac import Role - - rootMandateId = self._getRootMandateId() - if not rootMandateId: - logger.warning("No root mandate found, skipping root mandate assignment") - return - - # Check if user already has a mandate membership - existing = self.getUserMandate(userId, rootMandateId) - if existing: - logger.debug(f"User {userId} already assigned to root mandate") - return - - # Mandate-instance 'user' role (bound to this mandate, not a global template) - mandateUserRoles = self.db.getRecordset( - Role, - recordFilter={"roleLabel": "user", "mandateId": rootMandateId, "featureInstanceId": None} - ) - userRoleId = mandateUserRoles[0].get("id") if mandateUserRoles else None - - roleIds = [userRoleId] if userRoleId else [] - - self.createUserMandate(userId, rootMandateId, roleIds) - logger.info(f"Assigned user {userId} to root mandate with user role") - - except Exception as e: - # Log but don't fail user creation - logger.error(f"Error assigning user {userId} to root mandate: {e}") - def disableUser(self, userId: str) -> User: """Disables a user if current user has permission.""" return self.updateUser(userId, {"enabled": False}) @@ -1493,11 +1451,10 @@ class AppObjects: adminRoleId = r.get("id") break - userMandate = UserMandate(userId=userId, mandateId=mandateId, enabled=True) - createdUm = self.db.recordCreate(UserMandate, userMandate.model_dump()) - if adminRoleId and createdUm: - umRole = UserMandateRole(userMandateId=createdUm["id"], roleId=adminRoleId) - self.db.recordCreate(UserMandateRole, umRole.model_dump()) + if not adminRoleId: + raise ValueError(f"No admin role found for mandate {mandateId} — cannot assign user without role") + + self.createUserMandate(userId, mandateId, roleIds=[adminRoleId]) subscription = MandateSubscription( mandateId=mandateId, @@ -1533,14 +1490,16 @@ class AppObjects: instanceRoles = self.db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) adminInstRoleId = None for ir in instanceRoles: - if "admin" in (ir.get("roleLabel") or "").lower(): + roleLabel = (ir.get("roleLabel") or "").lower() + if roleLabel.endswith("-admin"): adminInstRoleId = ir.get("id") break - fa = FeatureAccess(userId=userId, featureInstanceId=instanceId, enabled=True) - createdFa = self.db.recordCreate(FeatureAccess, fa.model_dump()) - if adminInstRoleId and createdFa: - far = FeatureAccessRole(featureAccessId=createdFa["id"], roleId=adminInstRoleId) - self.db.recordCreate(FeatureAccessRole, far.model_dump()) + if not adminInstRoleId: + raise ValueError( + f"No feature-specific admin role (e.g. {featureCode}-admin) for instance {instanceId}. " + f"Template roles not synced for feature '{featureCode}'." + ) + self.createFeatureAccess(userId, instanceId, roleIds=[adminInstRoleId]) except Exception as e: logger.error(f"Error auto-creating instance for '{featureName}': {e}") @@ -1669,15 +1628,72 @@ class AppObjects: raise PermissionError(f"No permission to delete mandate {mandateId}") if not force: - self.db.recordModify(Mandate, mandateId, {"enabled": False}) - logger.info(f"Soft-deleted mandate {mandateId}") + from modules.shared.timeUtils import getUtcTimestamp + self.db.recordModify(Mandate, mandateId, {"enabled": False, "deletedAt": getUtcTimestamp()}) + logger.info(f"Soft-deleted mandate {mandateId} (30-day retention)") return True # Hard delete with cascade from modules.datamodels.datamodelSubscription import MandateSubscription + from modules.datamodels.datamodelChat import ChatWorkflow, ChatMessage, ChatLog + from modules.datamodels.datamodelFiles import FileItem + from modules.datamodels.datamodelDataSource import DataSource + from modules.datamodels.datamodelKnowledge import FileContentIndex + from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes + + instances = self.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + + # 0. Delete instance-scoped data for each FeatureInstance + for inst in instances: + instId = inst.get("id") + if not instId: + continue + + # 0a. FileContentIndex (knowledge/RAG) + fciRecords = self.db.getRecordset(FileContentIndex, recordFilter={"featureInstanceId": instId}) + for rec in fciRecords: + self.db.recordDelete(FileContentIndex, rec.get("id")) + if fciRecords: + logger.info(f"Cascade: deleted {len(fciRecords)} FileContentIndex records for instance {instId}") + + # 0b. DataNeutralizerAttributes + dnaRecords = self.db.getRecordset(DataNeutralizerAttributes, recordFilter={"featureInstanceId": instId}) + for rec in dnaRecords: + self.db.recordDelete(DataNeutralizerAttributes, rec.get("id")) + if dnaRecords: + logger.info(f"Cascade: deleted {len(dnaRecords)} DataNeutralizerAttributes for instance {instId}") + + # 0c. DataSource + dsRecords = self.db.getRecordset(DataSource, recordFilter={"featureInstanceId": instId}) + for rec in dsRecords: + self.db.recordDelete(DataSource, rec.get("id")) + if dsRecords: + logger.info(f"Cascade: deleted {len(dsRecords)} DataSource records for instance {instId}") + + # 0d. FileItem + fileRecords = self.db.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) + for rec in fileRecords: + self.db.recordDelete(FileItem, rec.get("id")) + if fileRecords: + logger.info(f"Cascade: deleted {len(fileRecords)} FileItem records for instance {instId}") + + # 0e. ChatWorkflow + ChatMessage + ChatLog + workflows = self.db.getRecordset(ChatWorkflow, recordFilter={"featureInstanceId": instId}) + for wf in workflows: + wfId = wf.get("id") + if not wfId: + continue + msgs = self.db.getRecordset(ChatMessage, recordFilter={"workflowId": wfId}) + for msg in msgs: + self.db.recordDelete(ChatMessage, msg.get("id")) + logs = self.db.getRecordset(ChatLog, recordFilter={"workflowId": wfId}) + for log in logs: + self.db.recordDelete(ChatLog, log.get("id")) + self.db.recordDelete(ChatWorkflow, wfId) + if workflows: + logger.info(f"Cascade: deleted {len(workflows)} ChatWorkflows (with messages/logs) for instance {instId}") # 1. Delete FeatureAccess + FeatureAccessRole for all instances in this mandate - instances = self.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) for inst in instances: instId = inst.get("id") accesses = self.db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instId}) @@ -1692,10 +1708,20 @@ class AppObjects: self.db.recordDelete(UserMandate, um.get("id")) logger.info(f"Cascade: deleted {len(memberships)} UserMandates for mandate {mandateId}") - # 3. Delete MandateSubscriptions + # 3. Cancel Stripe subscriptions + delete MandateSubscription records subs = self.db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) for sub in subs: - self.db.recordDelete(MandateSubscription, sub.get("id")) + subId = sub.get("id") + stripeSubId = sub.get("stripeSubscriptionId") + if stripeSubId: + try: + from modules.shared.stripeClient import getStripeClient + stripe = getStripeClient() + stripe.Subscription.cancel(stripeSubId) + logger.info(f"Cancelled Stripe subscription {stripeSubId} for mandate {mandateId}") + except Exception as e: + logger.warning(f"Failed to cancel Stripe sub {stripeSubId}: {e}") + self.db.recordDelete(MandateSubscription, subId) logger.info(f"Cascade: deleted {len(subs)} subscriptions for mandate {mandateId}") # 4. Delete mandate-level Roles @@ -1717,6 +1743,35 @@ class AppObjects: logger.error(f"Error deleting mandate: {str(e)}") raise ValueError(f"Failed to delete mandate: {str(e)}") + def restoreMandate(self, mandateId: str) -> bool: + """Restore a soft-deleted mandate (undo soft-delete within the 30-day retention window).""" + mandate = self.getMandate(mandateId) + if not mandate: + return False + self.db.recordModify(Mandate, mandateId, {"enabled": True, "deletedAt": None}) + logger.info(f"Restored soft-deleted mandate {mandateId}") + return True + + def purgeExpiredMandates(self, retentionDays: int = 30) -> int: + """Hard-delete all mandates whose soft-delete timestamp exceeds the retention period.""" + import time + cutoff = time.time() - (retentionDays * 86400) + allMandates = self.db.getRecordset(Mandate) + purged = 0 + for m in allMandates: + deletedAt = m.get("deletedAt") if isinstance(m, dict) else getattr(m, "deletedAt", None) + enabled = m.get("enabled") if isinstance(m, dict) else getattr(m, "enabled", True) + mandateId = m.get("id") if isinstance(m, dict) else getattr(m, "id", None) + if deletedAt and not enabled and deletedAt < cutoff and mandateId: + try: + self.deleteMandate(mandateId, force=True) + purged += 1 + except Exception as e: + logger.error(f"Failed to purge expired mandate {mandateId}: {e}") + if purged: + logger.info(f"Purged {purged} expired mandate(s) beyond {retentionDays}-day retention") + return purged + # ============================================ # User-Mandate Membership Methods (Multi-Tenant) # ============================================ @@ -1774,45 +1829,44 @@ class AppObjects: Create a UserMandate record (add user to mandate). Also creates a billing account for the user if billing is configured for PREPAY_USER. + INVARIANT: A UserMandate MUST have at least one UserMandateRole. + Args: userId: User ID mandateId: Mandate ID - roleIds: Optional list of role IDs to assign + roleIds: List of role IDs to assign (at least one required) Returns: Created UserMandate object """ + if not roleIds: + raise ValueError(f"Cannot create UserMandate without roles for user {userId} in mandate {mandateId}") + try: - # Check if already exists existing = self.getUserMandate(userId, mandateId) if existing: raise ValueError(f"User {userId} is already member of mandate {mandateId}") - # Subscription capacity check (before insert) self._checkSubscriptionCapacity(mandateId, "users", delta=1) - # Create UserMandate userMandate = UserMandate( userId=userId, mandateId=mandateId, enabled=True ) createdRecord = self.db.recordCreate(UserMandate, userMandate.model_dump()) + if not createdRecord: + raise ValueError("Database failed to create UserMandate record") - # Assign roles via junction table - if roleIds and createdRecord: - userMandateId = createdRecord.get("id") - for roleId in roleIds: - userMandateRole = UserMandateRole( - userMandateId=userMandateId, - roleId=roleId - ) - self.db.recordCreate(UserMandateRole, userMandateRole.model_dump()) + userMandateId = createdRecord.get("id") + for roleId in roleIds: + userMandateRole = UserMandateRole( + userMandateId=userMandateId, + roleId=roleId + ) + self.db.recordCreate(UserMandateRole, userMandateRole.model_dump()) - # Create billing account for user if billing is configured self._ensureUserBillingAccount(userId, mandateId) - - # Sync Stripe quantity after successful insert self._syncSubscriptionQuantity(mandateId) cleanedRecord = {k: v for k, v in createdRecord.items() if not k.startswith("_")} @@ -2198,40 +2252,42 @@ class AppObjects: Create a FeatureAccess record (grant user access to feature instance). Also auto-assigns the user to the mandate with the 'user' role if not already a member. + INVARIANT: A FeatureAccess MUST have at least one FeatureAccessRole. + Args: userId: User ID featureInstanceId: FeatureInstance ID - roleIds: Optional list of role IDs to assign + roleIds: List of role IDs to assign (at least one required) Returns: Created FeatureAccess object """ + if not roleIds: + raise ValueError(f"Cannot create FeatureAccess without roles for user {userId} on instance {featureInstanceId}") + try: - # Check if already exists existing = self.getFeatureAccess(userId, featureInstanceId) if existing: raise ValueError(f"User {userId} already has access to feature instance {featureInstanceId}") - # Auto-assign user to mandate with 'user' role if not already a member self._ensureUserMandateMembership(userId, featureInstanceId) - # Create FeatureAccess featureAccess = FeatureAccess( userId=userId, featureInstanceId=featureInstanceId, enabled=True ) createdRecord = self.db.recordCreate(FeatureAccess, featureAccess.model_dump()) + if not createdRecord: + raise ValueError("Database failed to create FeatureAccess record") - # Assign roles via junction table - if roleIds and createdRecord: - featureAccessId = createdRecord.get("id") - for roleId in roleIds: - featureAccessRole = FeatureAccessRole( - featureAccessId=featureAccessId, - roleId=roleId - ) - self.db.recordCreate(FeatureAccessRole, featureAccessRole.model_dump()) + featureAccessId = createdRecord.get("id") + for roleId in roleIds: + featureAccessRole = FeatureAccessRole( + featureAccessId=featureAccessId, + roleId=roleId + ) + self.db.recordCreate(FeatureAccessRole, featureAccessRole.model_dump()) cleanedRecord = {k: v for k, v in createdRecord.items() if not k.startswith("_")} return FeatureAccess(**cleanedRecord) @@ -2242,7 +2298,7 @@ class AppObjects: def _ensureUserMandateMembership(self, userId: str, featureInstanceId: str) -> None: """ Ensure user is a member of the mandate that owns the feature instance. - If not already a member, adds them with the 'user' role (no access rights, membership only). + If not already a member, adds them with the 'user' role. """ try: from modules.interfaces.interfaceFeatures import getFeatureInterface @@ -2255,28 +2311,30 @@ class AppObjects: mandateId = str(instance.mandateId) - # Check if user already has mandate membership existing = self.getUserMandate(userId, mandateId) if existing: logger.debug(f"User {userId} already member of mandate {mandateId}") return - # Find the mandate-level 'user' role (membership marker, no access rights) userRoles = self.db.getRecordset( Role, recordFilter={"roleLabel": "user", "mandateId": mandateId, "featureInstanceId": None} ) userRoleId = userRoles[0].get("id") if userRoles else None - roleIds = [userRoleId] if userRoleId else [] + if not userRoleId: + raise ValueError(f"No 'user' role found for mandate {mandateId} — cannot assign user without role") - self.createUserMandate(userId, mandateId, roleIds) + self.createUserMandate(userId, mandateId, roleIds=[userRoleId]) logger.info(f"Auto-assigned user {userId} to mandate {mandateId} with 'user' role (via feature instance {featureInstanceId})") - except ValueError: - # createUserMandate raises ValueError if already exists - safe to ignore - pass + except ValueError as ve: + if "already member" in str(ve): + pass + else: + raise except Exception as e: logger.error(f"Error auto-assigning user {userId} to mandate: {e}") + raise def getRoleIdsForFeatureAccess(self, featureAccessId: str) -> List[str]: """ diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py index 64883b95..58fd6926 100644 --- a/modules/interfaces/interfaceDbManagement.py +++ b/modules/interfaces/interfaceDbManagement.py @@ -940,7 +940,12 @@ class ComponentObjects: fileName = file.get("fileName") if not fileName or fileName == "None": continue - + + if file.get("scope") is None: + file["scope"] = "personal" + if file.get("neutralize") is None: + file["neutralize"] = False + fileItem = FileItem(**file) fileItems.append(fileItem) except Exception as e: diff --git a/modules/interfaces/interfaceFeatures.py b/modules/interfaces/interfaceFeatures.py index b261e76e..56311f01 100644 --- a/modules/interfaces/interfaceFeatures.py +++ b/modules/interfaces/interfaceFeatures.py @@ -208,7 +208,11 @@ class FeatureInterface: def _copyTemplateRoles(self, featureCode: str, mandateId: str, instanceId: str) -> int: """ - Copy global template roles for a feature to a new instance. + Copy feature-specific template roles to a new instance. + + INVARIANT: Feature instances MUST receive feature-specific roles + (e.g. workspace-admin, workspace-user). NEVER generic mandate roles. + Feature templates have featureCode set and isSystemRole=False. Args: featureCode: Feature code @@ -217,19 +221,30 @@ class FeatureInterface: Returns: Number of roles copied + + Raises: + ValueError: If no feature-specific template roles exist """ try: - # Find global template roles for this feature (mandateId=None) - globalRoles = self.db.getRecordset( + allTemplates = self.db.getRecordset( Role, - recordFilter={"featureCode": featureCode, "mandateId": None} + recordFilter={"featureCode": featureCode} ) - if not globalRoles: - logger.debug(f"No template roles found for feature {featureCode}") - return 0 + featureTemplates = [ + r for r in allTemplates + if r.get("mandateId") is None and r.get("featureInstanceId") is None + ] - templateRoleIds = [r.get("id") for r in globalRoles] + if not featureTemplates: + raise ValueError( + f"No feature-specific template roles found for '{featureCode}'. " + f"Each feature module must define TEMPLATE_ROLES and sync them to DB on startup." + ) + + logger.info(f"Found {len(featureTemplates)} feature-specific template roles for '{featureCode}'") + + templateRoleIds = [r.get("id") for r in featureTemplates] # BULK: Load all template AccessRules in one query allTemplateRules = [] @@ -246,7 +261,7 @@ class FeatureInterface: # Copy roles and their AccessRules copiedCount = 0 - for templateRole in globalRoles: + for templateRole in featureTemplates: newRoleId = str(uuid.uuid4()) # Create new role for this instance @@ -282,9 +297,11 @@ class FeatureInterface: logger.info(f"Copied {copiedCount} template roles for instance {instanceId}") return copiedCount + except ValueError: + raise except Exception as e: logger.error(f"Error copying template roles: {e}") - return 0 + raise ValueError(f"Failed to copy template roles for '{featureCode}': {e}") def syncRolesFromTemplate(self, featureInstanceId: str, addOnly: bool = True) -> Dict[str, int]: """ @@ -309,11 +326,15 @@ class FeatureInterface: featureCode = instance.featureCode mandateId = instance.mandateId - # Get current template roles - templateRoles = self.db.getRecordset( + # Get feature-specific template roles (mandateId=None, featureInstanceId=None) + allForFeature = self.db.getRecordset( Role, - recordFilter={"featureCode": featureCode, "mandateId": None} + recordFilter={"featureCode": featureCode} ) + templateRoles = [ + r for r in allForFeature + if r.get("mandateId") is None and r.get("featureInstanceId") is None + ] templateLabels = {r.get("roleLabel") for r in templateRoles} # Get current instance roles diff --git a/modules/migration/migrateRootUsers.py b/modules/migration/migrateRootUsers.py index f1a55d9e..a048e614 100644 --- a/modules/migration/migrateRootUsers.py +++ b/modules/migration/migrateRootUsers.py @@ -7,12 +7,20 @@ Called once from bootstrap, sets a DB flag to prevent re-execution. """ import logging -from typing import Optional +from typing import Optional, List, Dict, Any logger = logging.getLogger(__name__) _MIGRATION_FLAG_KEY = "migration_root_users_completed" +_DATA_TABLES = [ + "ChatWorkflow", + "FileItem", + "DataSource", + "DataNeutralizerAttributes", + "FileContentIndex", +] + def _isMigrationCompleted(db) -> bool: """Check if migration has already been executed.""" @@ -37,6 +45,95 @@ def _setMigrationCompleted(db) -> None: logger.error(f"Failed to set migration flag: {e}") +def _findOrCreateTargetInstance(db, featureInterface, featureCode: str, targetMandateId: str, rootInstance: dict) -> dict: + """Find existing or create new FeatureInstance in target mandate. Idempotent.""" + from modules.datamodels.datamodelFeatures import FeatureInstance + + existing = db.getRecordset(FeatureInstance, recordFilter={ + "featureCode": featureCode, + "mandateId": targetMandateId, + }) + if existing: + logger.debug(f"Target instance already exists for {featureCode} in mandate {targetMandateId}") + return existing[0] + + label = rootInstance.get("label") or featureCode + instance = featureInterface.createFeatureInstance( + featureCode=featureCode, + mandateId=targetMandateId, + label=label, + enabled=True, + copyTemplateRoles=True, + ) + if isinstance(instance, dict): + return instance + return instance.model_dump() if hasattr(instance, "model_dump") else {"id": instance.id} + + +def _migrateDataRecords(db, oldInstanceId: str, newInstanceId: str, userId: str) -> int: + """Bulk-update featureInstanceId on all data tables for records owned by userId.""" + totalMigrated = 0 + db._ensure_connection() + for tableName in _DATA_TABLES: + try: + with db.connection.cursor() as cursor: + cursor.execute( + f'UPDATE "{tableName}" ' + f'SET "featureInstanceId" = %s ' + f'WHERE "featureInstanceId" = %s AND "_createdBy" = %s', + (newInstanceId, oldInstanceId, userId), + ) + count = cursor.rowcount + db.connection.commit() + if count > 0: + logger.info(f" Migrated {count} rows in {tableName}: {oldInstanceId} -> {newInstanceId}") + totalMigrated += count + except Exception as e: + try: + db.connection.rollback() + except Exception: + pass + logger.debug(f" Table {tableName} skipped (may not exist or no matching column): {e}") + return totalMigrated + + +def _grantFeatureAccess(db, userId: str, featureInstanceId: str) -> dict: + """Create FeatureAccess + admin role on a feature instance. Idempotent.""" + from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole + from modules.datamodels.datamodelRbac import Role + + existing = db.getRecordset(FeatureAccess, recordFilter={ + "userId": userId, + "featureInstanceId": featureInstanceId, + }) + if existing: + logger.debug(f"FeatureAccess already exists for user {userId} on instance {featureInstanceId}") + return existing[0] + + fa = FeatureAccess(userId=userId, featureInstanceId=featureInstanceId, enabled=True) + createdFa = db.recordCreate(FeatureAccess, fa.model_dump()) + if not createdFa: + logger.warning(f"Failed to create FeatureAccess for user {userId} on instance {featureInstanceId}") + return {} + + instanceRoles = db.getRecordset(Role, recordFilter={"featureInstanceId": featureInstanceId}) + adminRoleId = None + for r in instanceRoles: + roleLabel = (r.get("roleLabel") or "").lower() + if roleLabel.endswith("-admin"): + adminRoleId = r.get("id") + break + if not adminRoleId: + raise ValueError( + f"No feature-specific admin role for instance {featureInstanceId}. " + f"Cannot create FeatureAccess without role — even in migration context." + ) + far = FeatureAccessRole(featureAccessId=createdFa["id"], roleId=adminRoleId) + db.recordCreate(FeatureAccessRole, far.model_dump()) + + return createdFa + + def migrateRootUsers(db, dryRun: bool = False) -> dict: """ Migrate all end-user feature data from Root mandate to personal mandates. @@ -68,12 +165,15 @@ def migrateRootUsers(db, dryRun: bool = False) -> dict: ) from modules.datamodels.datamodelFeatures import FeatureInstance from modules.interfaces.interfaceDbApp import getRootInterface + from modules.interfaces.interfaceFeatures import getFeatureInterface rootInterface = getRootInterface() + featureInterface = getFeatureInterface(db) stats = { "usersProcessed": 0, "mandatesCreated": 0, "instancesMigrated": 0, + "dataRowsMigrated": 0, "rootInstancesDeleted": 0, "rootMembershipsRemoved": 0, "dryRun": dryRun, @@ -167,12 +267,29 @@ def migrateRootUsers(db, dryRun: bool = False) -> dict: logger.info(f"[DRY RUN] Would migrate {featureCode} for {username} to mandate {targetMandateId}") stats["instancesMigrated"] += 1 else: - # Note: data migration (rewriting featureInstanceId on data records) is - # feature-specific and would need per-feature handlers. For now, we create - # the new instance and transfer the access. Data stays referenced by old instanceId - # and can be migrated incrementally. - logger.info(f"Migrated access for {username} on {featureCode} (data migration deferred)") + targetInstance = _findOrCreateTargetInstance( + db, featureInterface, featureCode, targetMandateId, instRecords[0], + ) + newInstanceId = targetInstance.get("id") + if not newInstanceId: + logger.error(f"Failed to obtain target instance for {featureCode} in mandate {targetMandateId}") + continue + + migratedCount = _migrateDataRecords(db, oldInstanceId, newInstanceId, userId) + + _grantFeatureAccess(db, userId, newInstanceId) + + try: + db.recordDelete(FeatureAccess, oldAccessId) + except Exception as delErr: + logger.warning(f"Could not remove old FeatureAccess {oldAccessId}: {delErr}") + + logger.info( + f"Migrated {featureCode} for {username}: " + f"instance {oldInstanceId} -> {newInstanceId}, {migratedCount} data rows moved" + ) stats["instancesMigrated"] += 1 + stats["dataRowsMigrated"] += migratedCount stats["usersProcessed"] += 1 diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py index c95c0b1b..12206b06 100644 --- a/modules/routes/routeAdminFeatures.py +++ b/modules/routes/routeAdminFeatures.py @@ -1172,31 +1172,29 @@ def add_user_to_feature_instance( detail=f"User '{data.userId}' not found" ) - # Check if user already has access - from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole - existingAccess = rootInterface.getFeatureAccess(data.userId, instanceId) - if existingAccess: + if not data.roleIds: raise HTTPException( - status_code=status.HTTP_409_CONFLICT, - detail="User already has access to this feature instance" + status_code=status.HTTP_400_BAD_REQUEST, + detail="At least one role is required to grant feature access" ) - - # Create FeatureAccess record - featureAccess = FeatureAccess( + + from modules.datamodels.datamodelRbac import Role + instanceRoles = rootInterface.db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) + validRoleIds = {r.get("id") for r in instanceRoles} + invalidRoles = [rid for rid in data.roleIds if rid not in validRoleIds] + if invalidRoles: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"Role IDs {invalidRoles} do not belong to feature instance {instanceId}. " + f"Only instance-scoped roles are allowed, never mandate roles." + ) + + featureAccess = rootInterface.createFeatureAccess( userId=data.userId, featureInstanceId=instanceId, - enabled=True + roleIds=data.roleIds ) - createdAccess = rootInterface.db.recordCreate(FeatureAccess, featureAccess.model_dump()) - featureAccessId = createdAccess.get("id") - - # Create FeatureAccessRole records for each role - for roleId in data.roleIds: - featureAccessRole = FeatureAccessRole( - featureAccessId=featureAccessId, - roleId=roleId - ) - rootInterface.db.recordCreate(FeatureAccessRole, featureAccessRole.model_dump()) + featureAccessId = str(featureAccess.id) logger.info( f"User {context.user.id} added user {data.userId} to feature instance {instanceId} " @@ -1379,10 +1377,19 @@ def update_feature_instance_user_roles( if data.enabled is not None: rootInterface.db.recordModify(FeatureAccess, featureAccessId, {"enabled": data.enabled}) - # Delete existing FeatureAccessRole records via interface method + from modules.datamodels.datamodelRbac import Role + instanceRoles = rootInterface.db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) + validRoleIds = {r.get("id") for r in instanceRoles} + invalidRoles = [rid for rid in data.roleIds if rid not in validRoleIds] + if invalidRoles: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"Role IDs {invalidRoles} do not belong to feature instance {instanceId}. " + f"Only instance-scoped roles are allowed, never mandate roles." + ) + rootInterface.deleteFeatureAccessRoles(featureAccessId) - # Create new FeatureAccessRole records for roleId in data.roleIds: featureAccessRole = FeatureAccessRole( featureAccessId=featureAccessId, @@ -1523,6 +1530,65 @@ def get_feature( ) +# ============================================================================= +# Instance Rename (for instance admins, used by navigation tree) +# ============================================================================= + +class FeatureInstanceRenameRequest(BaseModel): + """Request model for renaming a feature instance""" + label: str = Field(..., min_length=1, max_length=200, description="New label for the instance") + + +@router.patch("/instances/{instanceId}/rename", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +def _renameFeatureInstance( + request: Request, + instanceId: str, + data: FeatureInstanceRenameRequest, + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """ + Rename a feature instance. Requires instance admin role. + """ + try: + rootInterface = getRootInterface() + featureInterface = getFeatureInterface(rootInterface.db) + + instance = featureInterface.getFeatureInstance(instanceId) + if not instance: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Feature instance not found") + + userId = str(context.user.id) + isInstanceAdmin = False + if context.hasSysAdminRole: + isInstanceAdmin = True + else: + from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole + fa = rootInterface.getFeatureAccess(userId, instanceId) + if fa: + faRoleIds = rootInterface.getRoleIdsForFeatureAccess(str(fa.id)) + for rid in faRoleIds: + role = rootInterface.getRole(rid) + if role and (role.roleLabel or "").lower().endswith("-admin"): + isInstanceAdmin = True + break + + if not isInstanceAdmin: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Instance admin role required to rename") + + updated = featureInterface.updateFeatureInstance(instanceId, {"label": data.label.strip()}) + if not updated: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update instance") + + return {"id": instanceId, "label": updated.label} + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error renaming feature instance {instanceId}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) + + # ============================================================================= # Helper Functions # ============================================================================= diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index f98b2306..5f71bb47 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -8,6 +8,7 @@ import json # Import auth module from modules.auth import limiter, getCurrentUser, getRequestContext, RequestContext +from modules.auth.authentication import _hasSysAdminRole # Import interfaces import modules.interfaces.interfaceDbManagement as interfaceDbManagement @@ -699,6 +700,20 @@ def updateFileScope( except Exception as e: logger.warning(f"Failed to update FileContentIndex scope for file {fileId}: {e}") + # Trigger re-indexing so RAG embeddings metadata reflects the new scope + try: + fileMeta = managementInterface.getFile(fileId) + if fileMeta: + import asyncio + asyncio.ensure_future(_autoIndexFile( + fileId=fileId, + fileName=fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", ""), + mimeType=fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", ""), + user=context.user, + )) + except Exception as e: + logger.warning(f"Failed to trigger re-index after scope change for file {fileId}: {e}") + return {"fileId": fileId, "scope": scope, "updated": True} except HTTPException: raise @@ -725,6 +740,34 @@ def updateFileNeutralize( managementInterface.updateFile(fileId, {"neutralize": neutralize}) + # Update FileContentIndex neutralization metadata + try: + from modules.interfaces.interfaceDbKnowledge import getInterface as getKnowledgeInterface + from modules.datamodels.datamodelKnowledge import FileContentIndex + knowledgeDb = getKnowledgeInterface() + neutralizationStatus = "neutralized" if neutralize else "original" + indices = knowledgeDb.db.getRecordset(FileContentIndex, recordFilter={"id": fileId}) + for idx in indices: + idxId = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if idxId: + knowledgeDb.db.recordModify(FileContentIndex, idxId, {"neutralizationStatus": neutralizationStatus}) + except Exception as e: + logger.warning(f"Failed to update FileContentIndex neutralize for file {fileId}: {e}") + + # Trigger re-indexing so content is re-processed with/without neutralization + try: + fileMeta = managementInterface.getFile(fileId) + if fileMeta: + import asyncio + asyncio.ensure_future(_autoIndexFile( + fileId=fileId, + fileName=fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", ""), + mimeType=fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", ""), + user=context.user, + )) + except Exception as e: + logger.warning(f"Failed to trigger re-index after neutralize change for file {fileId}: {e}") + return {"fileId": fileId, "neutralize": neutralize, "updated": True} except Exception as e: logger.error(f"Error updating file neutralize flag: {e}") @@ -799,6 +842,12 @@ def update_file( detail=f"File with ID {fileId} not found" ) + if file_info.get("scope") == "global" and not _hasSysAdminRole(str(currentUser.id)): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Only sysadmins can set global scope", + ) + # Check if user has access to the file using RBAC if not managementInterface.checkRbacPermission(FileItem, "update", fileId): raise HTTPException( diff --git a/modules/routes/routeDataSources.py b/modules/routes/routeDataSources.py new file mode 100644 index 00000000..e210d094 --- /dev/null +++ b/modules/routes/routeDataSources.py @@ -0,0 +1,97 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""PATCH endpoints for DataSource and FeatureDataSource scope/neutralize tagging.""" + +import logging +from typing import Any, Dict + +from fastapi import APIRouter, HTTPException, Depends, Path, Request, Body +from modules.auth import limiter, getRequestContext, RequestContext +from modules.auth.authentication import _hasSysAdminRole +from modules.datamodels.datamodelDataSource import DataSource +from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource + +logger = logging.getLogger(__name__) + +router = APIRouter( + prefix="/api/datasources", + tags=["Data Sources"], + responses={ + 404: {"description": "Not found"}, + 400: {"description": "Bad request"}, + 401: {"description": "Unauthorized"}, + 403: {"description": "Forbidden"}, + 500: {"description": "Internal server error"}, + }, +) + +_VALID_SCOPES = {"personal", "featureInstance", "mandate", "global"} + + +def _findSourceRecord(db, sourceId: str): + """Look up a source by ID, checking DataSource first, then FeatureDataSource.""" + rec = db.getRecord(DataSource, sourceId) + if rec: + return rec, DataSource + rec = db.getRecord(FeatureDataSource, sourceId) + if rec: + return rec, FeatureDataSource + return None, None + + +@router.patch("/{sourceId}/scope") +@limiter.limit("30/minute") +def _updateDataSourceScope( + request: Request, + sourceId: str = Path(..., description="ID of the DataSource or FeatureDataSource"), + scope: str = Body(..., embed=True), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """Update the scope of a DataSource or FeatureDataSource. Global scope requires sysAdmin.""" + if scope not in _VALID_SCOPES: + raise HTTPException(status_code=400, detail=f"Invalid scope: {scope}. Must be one of {_VALID_SCOPES}") + + if scope == "global" and not _hasSysAdminRole(context.user): + raise HTTPException(status_code=403, detail="Only sysadmins can set global scope") + + try: + from modules.interfaces.interfaceDbApp import getRootInterface + rootIf = getRootInterface() + rec, model = _findSourceRecord(rootIf.db, sourceId) + if not rec: + raise HTTPException(status_code=404, detail=f"DataSource {sourceId} not found") + + rootIf.db.recordModify(model, sourceId, {"scope": scope}) + logger.info("Updated scope=%s for %s %s", scope, model.__name__, sourceId) + return {"sourceId": sourceId, "scope": scope, "updated": True} + except HTTPException: + raise + except Exception as e: + logger.error("Error updating datasource scope: %s", e) + raise HTTPException(status_code=500, detail=str(e)) + + +@router.patch("/{sourceId}/neutralize") +@limiter.limit("30/minute") +def _updateDataSourceNeutralize( + request: Request, + sourceId: str = Path(..., description="ID of the DataSource or FeatureDataSource"), + neutralize: bool = Body(..., embed=True), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """Toggle the neutralization flag on a DataSource or FeatureDataSource.""" + try: + from modules.interfaces.interfaceDbApp import getRootInterface + rootIf = getRootInterface() + rec, model = _findSourceRecord(rootIf.db, sourceId) + if not rec: + raise HTTPException(status_code=404, detail=f"DataSource {sourceId} not found") + + rootIf.db.recordModify(model, sourceId, {"neutralize": neutralize}) + logger.info("Updated neutralize=%s for %s %s", neutralize, model.__name__, sourceId) + return {"sourceId": sourceId, "neutralize": neutralize, "updated": True} + except HTTPException: + raise + except Exception as e: + logger.error("Error updating datasource neutralize: %s", e) + raise HTTPException(status_code=500, detail=str(e)) diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py index 7e903466..a1da658b 100644 --- a/modules/routes/routeDataUsers.py +++ b/modules/routes/routeDataUsers.py @@ -639,14 +639,17 @@ def create_user( # MULTI-TENANT: Add user to current mandate via UserMandate with default "user" role if context.mandateId: - # Get "user" role ID userRole = appInterface.getRoleByLabel("user") - roleIds = [str(userRole.id)] if userRole else [] + if not userRole: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="No 'user' role found in system — cannot assign user to mandate" + ) appInterface.createUserMandate( userId=str(newUser.id), mandateId=str(context.mandateId), - roleIds=roleIds + roleIds=[str(userRole.id)] ) logger.info(f"Created UserMandate for user {newUser.id} in mandate {context.mandateId}") diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index 8b1d9e8e..f066fda2 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -4,7 +4,7 @@ Routes for local security and authentication. """ -from fastapi import APIRouter, HTTPException, status, Depends, Request, Response, Body, Query +from fastapi import APIRouter, HTTPException, status, Depends, Request, Response, Body, Query, Path from fastapi.security import OAuth2PasswordRequestForm import logging from typing import Dict, Any @@ -14,7 +14,7 @@ import uuid from jose import jwt # Import auth modules -from modules.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM +from modules.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM, getRequestContext, RequestContext from modules.auth import createAccessToken, createRefreshToken, setAccessTokenCookie, setRefreshTokenCookie, clearAccessTokenCookie, clearRefreshTokenCookie from modules.interfaces.interfaceDbApp import getInterface, getRootInterface from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, Mandate, MandateType @@ -730,6 +730,13 @@ def onboarding_provision( planKey=planKey, ) + try: + activatedCount = appInterface._activatePendingSubscriptions(str(currentUser.id)) + if activatedCount > 0: + logger.info(f"Activated {activatedCount} pending subscription(s) for user {currentUser.username} during onboarding") + except Exception as subErr: + logger.error(f"Error activating subscriptions during onboarding: {subErr}") + logger.info(f"Onboarding provision for {currentUser.username}: {result}") return { "message": "Mandate provisioned successfully", @@ -922,3 +929,45 @@ async def testVoice( ).decode() return {"success": True, "audio": audioB64, "format": "mp3", "text": text} return {"success": False, "error": "TTS returned no audio"} + + +# ============================================================ +# Neutralization Mappings (user-level, view/delete) +# ============================================================ + +@router.get("/neutralization-mappings") +@limiter.limit("60/minute") +def _getNeutralizationMappings( + request: Request, + context: RequestContext = Depends(getRequestContext), +): + """List the current user's neutralization placeholder mappings.""" + userId = str(context.user.id) + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes + rootIf = getRootInterface() + records = rootIf.db.getRecordset(DataNeutralizerAttributes, recordFilter={"userId": userId}) + return {"mappings": records} + + +@router.delete("/neutralization-mappings/{mappingId}") +@limiter.limit("30/minute") +def _deleteNeutralizationMapping( + request: Request, + mappingId: str = Path(..., description="ID of the mapping to delete"), + context: RequestContext = Depends(getRequestContext), +): + """Delete a specific neutralization mapping owned by the current user.""" + userId = str(context.user.id) + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes + rootIf = getRootInterface() + records = rootIf.db.getRecordset(DataNeutralizerAttributes, recordFilter={"id": mappingId}) + if not records: + raise HTTPException(status_code=404, detail="Mapping not found") + rec = records[0] + recUserId = rec.get("userId") if isinstance(rec, dict) else getattr(rec, "userId", None) + if recUserId != userId: + raise HTTPException(status_code=403, detail="Not your mapping") + rootIf.db.recordDelete(DataNeutralizerAttributes, mappingId) + return {"deleted": True, "id": mappingId} diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 99c582c6..cbd4ef6e 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -36,7 +36,7 @@ router = APIRouter( class StoreActivateRequest(BaseModel): """Request model for activating a store feature.""" featureCode: str = Field(..., description="Feature code to activate") - mandateId: Optional[str] = Field(None, description="Target mandate ID (explicit). If None and user has no admin mandate, auto-creates personal mandate.") + mandateId: str = Field(..., description="Target mandate ID — always explicit, never optional") class StoreDeactivateRequest(BaseModel): @@ -134,12 +134,27 @@ def listUserMandates( request: Request, context: RequestContext = Depends(getRequestContext) ) -> List[Dict[str, Any]]: - """List mandates where the user can activate features (admin mandates).""" + """ + List mandates where the user can activate features (admin mandates). + If user has 0 admin mandates, auto-provisions a personal mandate so the + Store always has a clear mandate context. + """ try: rootInterface = getRootInterface() db = rootInterface.db userId = str(context.user.id) adminMandateIds = _getUserAdminMandateIds(db, userId) + + if not adminMandateIds: + provisionResult = rootInterface._provisionMandateForUser( + userId=userId, + mandateType="personal", + mandateName=context.user.fullName or context.user.username, + planKey="TRIAL_7D", + ) + adminMandateIds = [provisionResult["mandateId"]] + logger.info(f"Auto-provisioned personal mandate {adminMandateIds[0]} for user {userId} on Store access") + result = [] for mid in adminMandateIds: records = db.getRecordset(Mandate, recordFilter={"id": mid}) @@ -253,7 +268,7 @@ def activateStoreFeature( ) -> Dict[str, Any]: """ Activate a store feature. Creates a new FeatureInstance in the target mandate. - If mandateId is None and user has no admin mandate, auto-creates a personal mandate. + If user has no admin mandate, auto-creates a personal mandate. """ featureCode = data.featureCode userId = str(context.user.id) @@ -269,27 +284,6 @@ def activateStoreFeature( mandateId = data.mandateId - # Auto-create personal mandate if user has no admin mandates - if not mandateId: - adminMandateIds = _getUserAdminMandateIds(db, userId) - if not adminMandateIds: - provisionResult = rootInterface._provisionMandateForUser( - userId=userId, - mandateType="personal", - mandateName=context.user.fullName or context.user.username, - planKey="TRIAL_7D", - ) - mandateId = provisionResult["mandateId"] - logger.info(f"Auto-created personal mandate {mandateId} for user {userId} via store") - elif len(adminMandateIds) == 1: - mandateId = adminMandateIds[0] - else: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="mandateId is required when user has multiple admin mandates" - ) - - # Verify user is admin in target mandate if not _isUserAdminInMandate(db, userId, mandateId): raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Not admin in target mandate") @@ -323,19 +317,23 @@ def activateStoreFeature( instanceId = instance.get("id") if isinstance(instance, dict) else instance.id - # Grant FeatureAccess with admin role + # Grant FeatureAccess with admin role — MUST be feature-specific (e.g. workspace-admin) instanceRoles = db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) adminRoleId = None for ir in instanceRoles: - if "admin" in (ir.get("roleLabel") or "").lower(): + roleLabel = (ir.get("roleLabel") or "").lower() + if roleLabel.endswith("-admin"): adminRoleId = ir.get("id") break - fa = FeatureAccess(userId=userId, featureInstanceId=instanceId, enabled=True) - createdFa = db.recordCreate(FeatureAccess, fa.model_dump()) - if adminRoleId and createdFa: - far = FeatureAccessRole(featureAccessId=createdFa["id"], roleId=adminRoleId) - db.recordCreate(FeatureAccessRole, far.model_dump()) + if not adminRoleId: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"No feature-specific admin role (e.g. {featureCode}-admin) found for instance {instanceId}. " + f"Template roles were not correctly copied.", + ) + + rootInterface.createFeatureAccess(userId, instanceId, roleIds=[adminRoleId]) # Sync subscription quantity try: diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 8334a8c0..0c5eed4e 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -12,7 +12,7 @@ Endpoints: - POST /api/subscription/force-cancel — sysadmin immediate cancel (by ID) """ -from fastapi import APIRouter, HTTPException, Depends, Request, Query +from fastapi import APIRouter, HTTPException, Depends, Request, Query, Path from fastapi import status from typing import Dict, Any, List, Optional import logging @@ -435,3 +435,59 @@ def getFilterValues( crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams) return _extractDistinctValues(crossFiltered, column) + + +# ============================================================ +# Data Volume Usage per Mandate +# ============================================================ + +@router.get("/data-volume/{targetMandateId}") +@limiter.limit("60/minute") +def _getDataVolumeUsage( + request: Request, + targetMandateId: str = Path(..., description="Mandate ID to check volume for"), + context: RequestContext = Depends(getRequestContext), +): + """Calculate current data volume usage for a mandate vs. plan limit.""" + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.datamodels.datamodelFiles import FileItem + from modules.datamodels.datamodelSubscription import MandateSubscription, SubscriptionPlan + from modules.datamodels.datamodelFeature import FeatureInstance + + rootIf = getRootInterface() + mandateId = targetMandateId + + instances = rootIf.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + totalBytes = 0 + for inst in instances: + instId = inst.get("id") if isinstance(inst, dict) else getattr(inst, "id", None) + if not instId: + continue + files = rootIf.db.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) + for f in files: + size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) + totalBytes += (size or 0) + + usedMB = round(totalBytes / (1024 * 1024), 2) + + maxMB = None + subs = rootIf.db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) + for sub in subs: + planKey = sub.get("planKey") if isinstance(sub, dict) else getattr(sub, "planKey", "") + if planKey: + plans = rootIf.db.getRecordset(SubscriptionPlan, recordFilter={"planKey": planKey}) + for plan in plans: + limit = plan.get("maxDataVolumeMB") if isinstance(plan, dict) else getattr(plan, "maxDataVolumeMB", None) + if limit: + maxMB = limit + break + if maxMB: + break + + return { + "mandateId": mandateId, + "usedMB": usedMB, + "maxDataVolumeMB": maxMB, + "percentUsed": round((usedMB / maxMB) * 100, 1) if maxMB else None, + "warning": usedMB >= (maxMB * 0.8) if maxMB else False, + } diff --git a/modules/routes/routeSystem.py b/modules/routes/routeSystem.py index 60e498bd..5a08202c 100644 --- a/modules/routes/routeSystem.py +++ b/modules/routes/routeSystem.py @@ -247,12 +247,12 @@ def _buildDynamicBlock( # Sort views by order views.sort(key=lambda v: v["order"]) - # Add instance to feature featuresMap[featureKey]["instances"].append({ "id": str(instance.id), "uiLabel": instance.label, "order": 10, - "views": views + "views": views, + "isAdmin": permissions.get("isAdmin", False), }) # Build final structure diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index 37b8b0ba..541835a3 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -177,8 +177,11 @@ class AiService: # Neutralize prompt if enabled (before AI call) _wasNeutralized = False + _excludedDocs: List[str] = [] if self._shouldNeutralize(request): - request, _wasNeutralized = self._neutralizeRequest(request) + request, _wasNeutralized, _excludedDocs = self._neutralizeRequest(request) + if _excludedDocs: + logger.warning(f"Neutralization partial failures (continuing): {_excludedDocs}") # Set billing callback on aiObjects BEFORE the AI call # This callback is invoked by _callWithModel() after EVERY individual model call @@ -199,6 +202,15 @@ class AiService: if _wasNeutralized and response and hasattr(response, 'content') and response.content: response.content = self._rehydrateResponse(response.content) + # Attach neutralization exclusion metadata if any parts failed + if _excludedDocs and response: + if not hasattr(response, 'metadata') or response.metadata is None: + response.metadata = {} + if isinstance(response.metadata, dict): + response.metadata["neutralizationExcluded"] = _excludedDocs + elif hasattr(response.metadata, '__dict__'): + response.metadata.neutralizationExcluded = _excludedDocs + return response async def callAiStream(self, request: AiCallRequest): @@ -217,15 +229,26 @@ class AiService: # Neutralize prompt if enabled (before streaming) _wasNeutralized = False + _excludedDocs: List[str] = [] if self._shouldNeutralize(request): - request, _wasNeutralized = self._neutralizeRequest(request) + request, _wasNeutralized, _excludedDocs = self._neutralizeRequest(request) + if _excludedDocs: + logger.warning(f"Neutralization partial failures in stream (continuing): {_excludedDocs}") self.aiObjects.billingCallback = self._createBillingCallback() try: async for chunk in self.aiObjects.callWithTextContextStream(request): # Rehydrate the final AiCallResponse (non-str chunks are the final response) - if _wasNeutralized and not isinstance(chunk, str) and hasattr(chunk, 'content') and chunk.content: - chunk.content = self._rehydrateResponse(chunk.content) + if not isinstance(chunk, str): + if _wasNeutralized and hasattr(chunk, 'content') and chunk.content: + chunk.content = self._rehydrateResponse(chunk.content) + if _excludedDocs: + if not hasattr(chunk, 'metadata') or chunk.metadata is None: + chunk.metadata = {} + if isinstance(chunk.metadata, dict): + chunk.metadata["neutralizationExcluded"] = _excludedDocs + elif hasattr(chunk.metadata, '__dict__'): + chunk.metadata.neutralizationExcluded = _excludedDocs yield chunk finally: self.aiObjects.billingCallback = None @@ -541,40 +564,71 @@ detectedIntent-Werte: def _shouldNeutralize(self, request: AiCallRequest) -> bool: """Check if this AI request should have neutralization applied. - Only applies to text prompts — not embeddings or image processing.""" + Per-request override: requireNeutralization=True forces it, False skips it. + Only applies to text prompts -- not embeddings or image processing.""" try: + if request.requireNeutralization is False: + return False + if not request.prompt and not request.messages: + return False + if request.requireNeutralization is True: + return True neutralSvc = self._get_service("neutralization") if not neutralSvc: return False config = neutralSvc.getConfig() if hasattr(neutralSvc, 'getConfig') else None if not config or not getattr(config, 'enabled', False): return False - if not request.prompt and not request.messages: - return False return True except Exception: return False - def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool]: - """Neutralize the prompt text in an AiCallRequest. - Returns (modifiedRequest, wasNeutralized). - Raises RuntimeError if neutralization is required but fails (fail-safe).""" + def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool, List[str]]: + """Neutralize the prompt text and messages in an AiCallRequest. + Returns (modifiedRequest, wasNeutralized, excludedDocs). + Fail-safe: failing parts are excluded instead of aborting the entire call.""" + excludedDocs: List[str] = [] + neutralSvc = self._get_service("neutralization") if not neutralSvc or not hasattr(neutralSvc, 'processText'): - raise RuntimeError("Neutralization required but neutralization service is unavailable") + logger.warning("Neutralization required but neutralization service is unavailable — continuing without neutralization") + excludedDocs.append("Neutralization service unavailable; prompt sent un-neutralized") + return request, False, excludedDocs + + _wasNeutralized = False if request.prompt: - result = neutralSvc.processText(request.prompt) - if result and result.get("neutralized_text"): - request.prompt = result["neutralized_text"] - logger.debug("Neutralized prompt in AiCallRequest") - return request, True - raise RuntimeError( - "Neutralization required but processText returned no neutralized_text — " - "AI call blocked to protect sensitive data" - ) + try: + result = neutralSvc.processText(request.prompt) + if result and result.get("neutralized_text"): + request.prompt = result["neutralized_text"] + _wasNeutralized = True + logger.debug("Neutralized prompt in AiCallRequest") + else: + logger.warning("Neutralization of prompt returned no neutralized_text — sending original prompt") + excludedDocs.append("Prompt neutralization failed; original prompt used") + except Exception as e: + logger.warning(f"Neutralization of prompt failed: {e} — sending original prompt") + excludedDocs.append(f"Prompt neutralization error: {e}") - return request, False + if request.messages and isinstance(request.messages, list): + for idx, msg in enumerate(request.messages): + content = msg.get("content") if isinstance(msg, dict) else None + if not isinstance(content, str) or not content: + continue + try: + result = neutralSvc.processText(content) + if result and result.get("neutralized_text"): + msg["content"] = result["neutralized_text"] + _wasNeutralized = True + else: + logger.warning(f"Neutralization of message[{idx}] returned no neutralized_text — keeping original") + excludedDocs.append(f"Message[{idx}] neutralization failed; original kept") + except Exception as e: + logger.warning(f"Neutralization of message[{idx}] failed: {e} — keeping original") + excludedDocs.append(f"Message[{idx}] neutralization error: {e}") + + return request, _wasNeutralized, excludedDocs def _rehydrateResponse(self, responseText: str) -> str: """Replace neutralization placeholders with original values in AI response.""" diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index 77e8530e..14a01557 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -111,7 +111,7 @@ class KnowledgeService: # 2. Chunk text content objects and create embeddings textObjects = [o for o in contentObjects if o.get("contentType") == "text"] - # Check if file requires neutralization + # Read FileItem attributes for index metadata and neutralization _shouldNeutralize = False try: from modules.datamodels.datamodelFiles import FileItem as _FileItem @@ -119,10 +119,14 @@ class KnowledgeService: _fileRecords = _dbComponent.getRecordset(_FileItem, recordFilter={"id": fileId}) if _dbComponent else [] if _fileRecords: _fileRecord = _fileRecords[0] - _shouldNeutralize = ( - _fileRecord.get("neutralize", False) if isinstance(_fileRecord, dict) - else getattr(_fileRecord, "neutralize", False) - ) + _get = (lambda k, d=None: _fileRecord.get(k, d)) if isinstance(_fileRecord, dict) else (lambda k, d=None: getattr(_fileRecord, k, d)) + _shouldNeutralize = bool(_get("neutralize", False)) + _fileScope = _get("scope") + if _fileScope: + index.scope = _fileScope + _fileCreatedBy = _get("_createdBy") + if _fileCreatedBy: + index.userId = str(_fileCreatedBy) except Exception: pass @@ -201,6 +205,7 @@ class KnowledgeService: if _shouldNeutralize: try: index.neutralizationStatus = "completed" + index.isNeutralized = True self._knowledgeDb.upsertFileContentIndex(index) except Exception as e: logger.debug(f"Could not set neutralizationStatus for file {fileId}: {e}") diff --git a/modules/shared/attributeUtils.py b/modules/shared/attributeUtils.py index 6a857d85..863d7f36 100644 --- a/modules/shared/attributeUtils.py +++ b/modules/shared/attributeUtils.py @@ -258,27 +258,6 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag attributes.append(attr_def) - # Append system timestamp fields (set automatically by DatabaseConnector) - systemTimestampFields = [ - ("_createdAt", {"en": "Created at", "de": "Erstellt am", "fr": "Créé le"}), - ("_modifiedAt", {"en": "Modified at", "de": "Geändert am", "fr": "Modifié le"}), - ] - for sysName, sysLabels in systemTimestampFields: - attributes.append({ - "name": sysName, - "type": "timestamp", - "required": False, - "description": "", - "label": sysLabels.get(userLanguage, sysLabels["en"]), - "placeholder": "", - "editable": False, - "visible": True, - "order": len(attributes), - "readonly": True, - "options": None, - "default": None, - }) - return {"model": model_label, "attributes": attributes} From 75484c0f7360ff78dc3b87a73d794e2942eb9eff Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 18:12:37 +0100 Subject: [PATCH 06/33] BREAKING CHANGE API and persisted records use PowerOnModel system fields: - sysCreatedAt, sysCreatedBy, sysModifiedAt, sysModifiedBy Removed legacy JSON/DB field names: - _createdAt, _createdBy, _modifiedAt, _modifiedBy Frontend (frontend_nyla) and gateway call sites were updated accordingly. Database: - Bootstrap runs idempotent backfill (_migrateSystemFieldColumns) from old underscore columns and selected business duplicates into sys* where sys* IS NULL. - Re-run app bootstrap against each PostgreSQL database after deploy. - Optional: DROP INDEX IF EXISTS "idx_invitation_createdby" if an old index remains; new index: idx_invitation_syscreatedby on Invitation(sysCreatedBy). Tests: - RBAC integration tests aligned with current GROUP mandate filter and UserMandate-based UserConnection GROUP clause; buildRbacWhereClause(..., mandateId=...) must be passed explicitly (same as production request context). --- modules/connectors/connectorDbPostgre.py | 110 ++++++++-------- modules/datamodels/datamodelBase.py | 68 ++++++++++ modules/datamodels/datamodelBilling.py | 5 +- modules/datamodels/datamodelChat.py | 9 +- modules/datamodels/datamodelDataSource.py | 6 +- .../datamodels/datamodelFeatureDataSource.py | 6 +- modules/datamodels/datamodelFeatures.py | 5 +- modules/datamodels/datamodelFileFolder.py | 6 +- modules/datamodels/datamodelFiles.py | 11 +- modules/datamodels/datamodelInvitation.py | 15 +-- modules/datamodels/datamodelKnowledge.py | 13 +- modules/datamodels/datamodelMembership.py | 9 +- modules/datamodels/datamodelMessaging.py | 47 +------ modules/datamodels/datamodelNotification.py | 10 +- modules/datamodels/datamodelRbac.py | 5 +- modules/datamodels/datamodelSecurity.py | 9 +- modules/datamodels/datamodelSubscription.py | 3 +- modules/datamodels/datamodelUam.py | 15 ++- modules/datamodels/datamodelUtils.py | 6 +- .../automation/datamodelFeatureAutomation.py | 6 +- .../automation/interfaceFeatureAutomation.py | 31 +++-- .../automation/routeFeatureAutomation.py | 4 +- .../datamodelFeatureAutomation2.py | 5 +- .../automation2/routeFeatureAutomation2.py | 8 +- .../chatbot/interfaceFeatureChatbot.py | 12 +- .../features/commcoach/datamodelCommcoach.py | 31 ++--- .../datamodelFeatureNeutralizer.py | 3 +- .../realEstate/datamodelFeatureRealEstate.py | 9 +- .../features/teamsbot/datamodelTeamsbot.py | 24 ++-- .../trustee/datamodelFeatureTrustee.py | 34 +++-- .../trustee/interfaceFeatureTrustee.py | 14 +- .../workspace/datamodelFeatureWorkspace.py | 3 +- modules/features/workspace/mainWorkspace.py | 2 +- modules/interfaces/interfaceBootstrap.py | 123 +++++++++++++++++- modules/interfaces/interfaceDbApp.py | 119 ++++++++--------- modules/interfaces/interfaceDbBilling.py | 12 +- modules/interfaces/interfaceDbChat.py | 9 +- modules/interfaces/interfaceDbManagement.py | 63 +++++---- modules/interfaces/interfaceFeatures.py | 14 +- modules/interfaces/interfaceRbac.py | 22 ++-- modules/migration/migrateRootUsers.py | 2 +- modules/routes/routeAdminAutomationEvents.py | 4 +- modules/routes/routeAdminAutomationLogs.py | 4 +- modules/routes/routeAdminRbacRules.py | 2 +- modules/routes/routeBilling.py | 14 +- modules/routes/routeDataFiles.py | 2 +- modules/security/rbac.py | 2 +- .../services/serviceAgent/mainServiceAgent.py | 10 +- .../serviceKnowledge/mainServiceKnowledge.py | 2 +- modules/shared/attributeUtils.py | 14 +- modules/shared/dbMultiTenantOptimizations.py | 2 +- modules/shared/gdprDeletion.py | 10 +- modules/workflows/automation/mainWorkflow.py | 8 +- scripts/script_db_export_migration.py | 11 +- tests/integration/rbac/test_rbac_database.py | 116 ++++++++++------- 55 files changed, 624 insertions(+), 485 deletions(-) create mode 100644 modules/datamodels/datamodelBase.py diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index 67cceb45..e168467b 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -12,6 +12,7 @@ import threading from modules.shared.timeUtils import getUtcTimestamp from modules.shared.configuration import APP_CONFIG +from modules.datamodels.datamodelBase import PowerOnModel from modules.datamodels.datamodelUam import User, AccessLevel, UserPermissions from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext @@ -20,7 +21,7 @@ logger = logging.getLogger(__name__) # No mapping needed - table name = Pydantic model name exactly -class SystemTable(BaseModel): +class SystemTable(PowerOnModel): """Data model for system table entries""" table_name: str = Field( @@ -178,7 +179,7 @@ def _get_cached_connector( userId: str = None, ) -> "DatabaseConnector": """Return cached DatabaseConnector for same (host, database, port) to avoid duplicate PostgreSQL inits. - Uses contextvars for userId so concurrent requests sharing the same connector get correct _createdBy/_modifiedBy. + Uses contextvars for userId so concurrent requests sharing the same connector get correct sysCreatedBy/sysModifiedBy. """ port = int(dbPort) if dbPort is not None else 5432 key = (dbHost, dbDatabase, port) @@ -327,8 +328,10 @@ class DatabaseConnector: id SERIAL PRIMARY KEY, table_name VARCHAR(255) UNIQUE NOT NULL, initial_id VARCHAR(255) NOT NULL, - _createdAt DOUBLE PRECISION, - _modifiedAt DOUBLE PRECISION + "sysCreatedAt" DOUBLE PRECISION, + "sysCreatedBy" VARCHAR(255), + "sysModifiedAt" DOUBLE PRECISION, + "sysModifiedBy" VARCHAR(255) ) """) conn.close() @@ -416,7 +419,7 @@ class DatabaseConnector: for table_name, initial_id in data.items(): cursor.execute( """ - INSERT INTO "_system" ("table_name", "initial_id", "_modifiedAt") + INSERT INTO "_system" ("table_name", "initial_id", "sysModifiedAt") VALUES (%s, %s, %s) """, (table_name, initial_id, getUtcTimestamp()), @@ -448,8 +451,10 @@ class DatabaseConnector: CREATE TABLE "{self._systemTableName}" ( "table_name" VARCHAR(255) PRIMARY KEY, "initial_id" VARCHAR(255), - "_createdAt" DOUBLE PRECISION, - "_modifiedAt" DOUBLE PRECISION + "sysCreatedAt" DOUBLE PRECISION, + "sysCreatedBy" VARCHAR(255), + "sysModifiedAt" DOUBLE PRECISION, + "sysModifiedBy" VARCHAR(255) ) """) logger.info("System table created successfully") @@ -464,10 +469,16 @@ class DatabaseConnector: ) existing_columns = [row["column_name"] for row in cursor.fetchall()] - if "_modifiedAt" not in existing_columns: - cursor.execute( - f'ALTER TABLE "{self._systemTableName}" ADD COLUMN "_modifiedAt" DOUBLE PRECISION' - ) + for sys_col, sys_sql in [ + ("sysCreatedAt", "DOUBLE PRECISION"), + ("sysCreatedBy", "VARCHAR(255)"), + ("sysModifiedAt", "DOUBLE PRECISION"), + ("sysModifiedBy", "VARCHAR(255)"), + ]: + if sys_col not in existing_columns: + cursor.execute( + f'ALTER TABLE "{self._systemTableName}" ADD COLUMN "{sys_col}" {sys_sql}' + ) return True except Exception as e: @@ -518,11 +529,7 @@ class DatabaseConnector: # Desired columns based on model model_fields = _get_model_fields(model_class) - desired_columns = ( - set(["id"]) - | set(model_fields.keys()) - | {"_createdAt", "_modifiedAt", "_createdBy", "_modifiedBy"} - ) + desired_columns = set(["id"]) | set(model_fields.keys()) # Add missing columns for col in sorted(desired_columns - existing_columns): @@ -530,12 +537,6 @@ class DatabaseConnector: if col in ["id"]: continue # primary key exists already sql_type = model_fields.get(col) - if col in ["_createdAt"]: - sql_type = "DOUBLE PRECISION" - elif col in ["_modifiedAt"]: - sql_type = "DOUBLE PRECISION" - elif col in ["_createdBy", "_modifiedBy"]: - sql_type = "VARCHAR(255)" if not sql_type: sql_type = "TEXT" try: @@ -594,16 +595,6 @@ class DatabaseConnector: if field_name != "id": # Skip id, already defined columns.append(f'"{field_name}" {sql_type}') - # Add metadata columns - columns.extend( - [ - '"_createdAt" DOUBLE PRECISION', - '"_modifiedAt" DOUBLE PRECISION', - '"_createdBy" VARCHAR(255)', - '"_modifiedBy" VARCHAR(255)', - ] - ) - # Create table sql = f'CREATE TABLE IF NOT EXISTS "{table}" ({", ".join(columns)})' cursor.execute(sql) @@ -626,11 +617,7 @@ class DatabaseConnector: """Save record to normalized table with explicit columns.""" # Get columns from Pydantic model instead of database schema fields = _get_model_fields(model_class) - columns = ( - ["id"] - + [field for field in fields.keys() if field != "id"] - + ["_createdAt", "_createdBy", "_modifiedAt", "_modifiedBy"] - ) + columns = ["id"] + [field for field in fields.keys() if field != "id"] if not columns: logger.error(f"No columns found for table {table}") @@ -648,7 +635,7 @@ class DatabaseConnector: value = filtered_record.get(col) # Handle timestamp fields - store as Unix timestamps (floats) for consistency - if col in ["_createdAt", "_modifiedAt"] and value is not None: + if col in ["sysCreatedAt", "sysModifiedAt"] and value is not None: if isinstance(value, str): # Try to parse string as timestamp try: @@ -690,7 +677,7 @@ class DatabaseConnector: [ f'"{col}" = EXCLUDED."{col}"' for col in columns[1:] - if col not in ["_createdAt", "_createdBy"] + if col not in ["sysCreatedAt", "sysCreatedBy"] ] ) @@ -742,17 +729,18 @@ class DatabaseConnector: if effective_user_id is None: effective_user_id = self.userId currentTime = getUtcTimestamp() - # Set _createdAt and _createdBy if this is a new record (record doesn't have _createdAt) - if "_createdAt" not in record: - record["_createdAt"] = currentTime + # Set sysCreatedAt/sysCreatedBy on first persist; always refresh modified fields. + # Use falsy check: model_dump() always includes sysCreatedAt key (often None). + if not record.get("sysCreatedAt"): + record["sysCreatedAt"] = currentTime if effective_user_id: - record["_createdBy"] = effective_user_id - elif "_createdBy" not in record or not record.get("_createdBy"): + record["sysCreatedBy"] = effective_user_id + elif not record.get("sysCreatedBy"): if effective_user_id: - record["_createdBy"] = effective_user_id - record["_modifiedAt"] = currentTime + record["sysCreatedBy"] = effective_user_id + record["sysModifiedAt"] = currentTime if effective_user_id: - record["_modifiedBy"] = effective_user_id + record["sysModifiedBy"] = effective_user_id with self.connection.cursor() as cursor: self._save_record(cursor, table, recordId, record, model_class) @@ -840,6 +828,26 @@ class DatabaseConnector: logger.error(f"Error removing initial ID for table {table}: {e}") return False + def buildRbacWhereClause( + self, + permissions: UserPermissions, + currentUser: User, + table: str, + mandateId: Optional[str] = None, + featureInstanceId: Optional[str] = None, + ) -> Optional[Dict[str, Any]]: + """Delegate to interfaceRbac.buildRbacWhereClause (tests and call sites use connector as entry).""" + from modules.interfaces.interfaceRbac import buildRbacWhereClause as _buildRbacWhereClause + + return _buildRbacWhereClause( + permissions, + currentUser, + table, + self, + mandateId=mandateId, + featureInstanceId=featureInstanceId, + ) + def updateContext(self, userId: str) -> None: """Updates the context of the database connector. Sets both instance userId and contextvar for request-scoped use when connector is shared. @@ -992,10 +1000,6 @@ class DatabaseConnector: Returns (where_clause, order_clause, limit_clause, values, count_values). """ fields = _get_model_fields(model_class) - fields["_createdAt"] = "DOUBLE PRECISION" - fields["_modifiedAt"] = "DOUBLE PRECISION" - fields["_createdBy"] = "TEXT" - fields["_modifiedBy"] = "TEXT" validColumns = set(fields.keys()) where_parts: List[str] = [] values: List[Any] = [] @@ -1190,10 +1194,6 @@ class DatabaseConnector: """ table = model_class.__name__ fields = _get_model_fields(model_class) - fields["_createdAt"] = "DOUBLE PRECISION" - fields["_modifiedAt"] = "DOUBLE PRECISION" - fields["_createdBy"] = "TEXT" - fields["_modifiedBy"] = "TEXT" if column not in fields: return [] diff --git a/modules/datamodels/datamodelBase.py b/modules/datamodels/datamodelBase.py new file mode 100644 index 00000000..862f177b --- /dev/null +++ b/modules/datamodels/datamodelBase.py @@ -0,0 +1,68 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Base Pydantic model with system-managed fields (DB + API + UI metadata).""" + +from typing import Optional + +from pydantic import BaseModel, Field + +from modules.shared.attributeUtils import registerModelLabels + + +class PowerOnModel(BaseModel): + sysCreatedAt: Optional[float] = Field( + default=None, + description="Record creation timestamp (UTC, set by system)", + json_schema_extra={ + "frontend_type": "timestamp", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + sysCreatedBy: Optional[str] = Field( + default=None, + description="User ID who created this record (set by system)", + json_schema_extra={ + "frontend_type": "text", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + sysModifiedAt: Optional[float] = Field( + default=None, + description="Record last modification timestamp (UTC, set by system)", + json_schema_extra={ + "frontend_type": "timestamp", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + sysModifiedBy: Optional[str] = Field( + default=None, + description="User ID who last modified this record (set by system)", + json_schema_extra={ + "frontend_type": "text", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + + +registerModelLabels( + "PowerOnModel", + {"en": "Base Record", "de": "Basisdatensatz"}, + { + "sysCreatedAt": {"en": "Created At", "de": "Erstellt am", "fr": "Cree le"}, + "sysCreatedBy": {"en": "Created By", "de": "Erstellt von", "fr": "Cree par"}, + "sysModifiedAt": {"en": "Modified At", "de": "Geaendert am", "fr": "Modifie le"}, + "sysModifiedBy": {"en": "Modified By", "de": "Geaendert von", "fr": "Modifie par"}, + }, +) diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py index 995ac75d..a61faa59 100644 --- a/modules/datamodels/datamodelBilling.py +++ b/modules/datamodels/datamodelBilling.py @@ -6,6 +6,7 @@ from typing import List, Dict, Any, Optional from enum import Enum from datetime import date, datetime, timezone from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid @@ -48,7 +49,7 @@ class PeriodTypeEnum(str, Enum): YEAR = "YEAR" -class BillingAccount(BaseModel): +class BillingAccount(PowerOnModel): """Billing account for mandate or user-mandate combination.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -78,7 +79,7 @@ registerModelLabels( ) -class BillingTransaction(BaseModel): +class BillingTransaction(PowerOnModel): """Single billing transaction (credit, debit, adjustment).""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" diff --git a/modules/datamodels/datamodelChat.py b/modules/datamodels/datamodelChat.py index 7002187a..7154e57e 100644 --- a/modules/datamodels/datamodelChat.py +++ b/modules/datamodels/datamodelChat.py @@ -5,12 +5,13 @@ from typing import List, Dict, Any, Optional from enum import Enum from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp import uuid -class ChatLog(BaseModel): +class ChatLog(PowerOnModel): """Log entries for chat workflows. User-owned, no mandate context.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -56,7 +57,7 @@ registerModelLabels( ) -class ChatDocument(BaseModel): +class ChatDocument(PowerOnModel): """Documents attached to chat messages. User-owned, no mandate context.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -163,7 +164,7 @@ registerModelLabels( ) -class ChatMessage(BaseModel): +class ChatMessage(PowerOnModel): """Messages in chat workflows. User-owned, no mandate context.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -260,7 +261,7 @@ registerModelLabels( ) -class ChatWorkflow(BaseModel): +class ChatWorkflow(PowerOnModel): """Chat workflow container. User-owned, no mandate context.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) featureInstanceId: Optional[str] = Field(None, description="Feature instance ID for multi-tenancy isolation", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) diff --git a/modules/datamodels/datamodelDataSource.py b/modules/datamodels/datamodelDataSource.py index 47578b03..51a324ca 100644 --- a/modules/datamodels/datamodelDataSource.py +++ b/modules/datamodels/datamodelDataSource.py @@ -8,12 +8,12 @@ Google Drive folder, FTP directory, etc.) for agent-accessible data containers. from typing import Dict, Any, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class DataSource(BaseModel): +class DataSource(PowerOnModel): """Configured external data source linked to a UserConnection.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") connectionId: str = Field(description="FK to UserConnection") @@ -29,7 +29,6 @@ class DataSource(BaseModel): userId: str = Field(default="", description="Owner user ID") autoSync: bool = Field(default=False, description="Automatically sync on schedule") lastSynced: Optional[float] = Field(default=None, description="Last sync timestamp") - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") scope: str = Field( default="personal", description="Data visibility scope: personal, featureInstance, mandate, global", @@ -62,7 +61,6 @@ registerModelLabels( "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"}, "autoSync": {"en": "Auto Sync", "de": "Auto-Sync", "fr": "Synchro auto"}, "lastSynced": {"en": "Last Synced", "de": "Letzter Sync", "fr": "Dernier sync"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "scope": {"en": "Scope", "de": "Sichtbarkeit"}, "neutralize": {"en": "Neutralize", "de": "Neutralisieren"}, }, diff --git a/modules/datamodels/datamodelFeatureDataSource.py b/modules/datamodels/datamodelFeatureDataSource.py index 5aa834eb..80ceb03c 100644 --- a/modules/datamodels/datamodelFeatureDataSource.py +++ b/modules/datamodels/datamodelFeatureDataSource.py @@ -8,12 +8,12 @@ so the agent can query structured feature data (e.g. TrusteePosition rows). from typing import Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class FeatureDataSource(BaseModel): +class FeatureDataSource(PowerOnModel): """A feature-instance table attached as data source in the AI workspace.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") featureInstanceId: str = Field(description="FK to FeatureInstance") @@ -24,7 +24,6 @@ class FeatureDataSource(BaseModel): mandateId: str = Field(default="", description="Mandate scope") userId: str = Field(default="", description="Owner user ID") workspaceInstanceId: str = Field(description="Workspace instance where this source is used") - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") scope: str = Field( default="personal", description="Data visibility scope: personal, featureInstance, mandate, global", @@ -55,6 +54,5 @@ registerModelLabels( "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"}, "userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"}, "workspaceInstanceId": {"en": "Workspace", "de": "Workspace", "fr": "Espace de travail"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, }, ) diff --git a/modules/datamodels/datamodelFeatures.py b/modules/datamodels/datamodelFeatures.py index 0a5dc441..3134a18e 100644 --- a/modules/datamodels/datamodelFeatures.py +++ b/modules/datamodels/datamodelFeatures.py @@ -5,11 +5,12 @@ import uuid from typing import Optional, Dict, Any from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.datamodels.datamodelUtils import TextMultilingual -class Feature(BaseModel): +class Feature(PowerOnModel): """ Feature-Definition (global, z.B. 'trustee', 'chatbot'). Features sind die verfügbaren Funktionalitäten der Plattform. @@ -40,7 +41,7 @@ registerModelLabels( ) -class FeatureInstance(BaseModel): +class FeatureInstance(PowerOnModel): """ Instanz eines Features in einem Mandanten. Ein Mandant kann mehrere Instanzen desselben Features haben. diff --git a/modules/datamodels/datamodelFileFolder.py b/modules/datamodels/datamodelFileFolder.py index b7a19915..23cd197b 100644 --- a/modules/datamodels/datamodelFileFolder.py +++ b/modules/datamodels/datamodelFileFolder.py @@ -4,18 +4,17 @@ from typing import Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class FileFolder(BaseModel): +class FileFolder(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) name: str = Field(description="Folder name", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}) parentId: Optional[str] = Field(default=None, description="Parent folder ID (null = root)", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) mandateId: Optional[str] = Field(default=None, description="Mandate context", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) featureInstanceId: Optional[str] = Field(default=None, description="Feature instance context", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}) registerModelLabels( @@ -27,6 +26,5 @@ registerModelLabels( "parentId": {"en": "Parent Folder", "fr": "Dossier parent"}, "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"}, "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"}, - "createdAt": {"en": "Created At", "fr": "Créé le"}, }, ) diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py index f95a0ef1..b8a44d2c 100644 --- a/modules/datamodels/datamodelFiles.py +++ b/modules/datamodels/datamodelFiles.py @@ -3,15 +3,14 @@ """File-related datamodels: FileItem, FilePreview, FileData.""" from typing import Dict, Any, List, Optional, Union -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid import base64 -class FileItem(BaseModel): - model_config = ConfigDict(extra='allow') # Preserve system fields (_createdBy, _createdAt, etc.) +class FileItem(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) mandateId: Optional[str] = Field(default="", description="ID of the mandate this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) featureInstanceId: Optional[str] = Field(default="", description="ID of the feature instance this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "frontend_fk_source": "/api/features/instances", "frontend_fk_display_field": "label"}) @@ -19,7 +18,6 @@ class FileItem(BaseModel): mimeType: str = Field(description="MIME type of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) fileHash: str = Field(description="Hash of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) fileSize: int = Field(description="Size of the file in bytes", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False}) - creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the file was created (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}) tags: Optional[List[str]] = Field(default=None, description="Tags for categorization and search", json_schema_extra={"frontend_type": "tags", "frontend_readonly": False, "frontend_required": False}) folderId: Optional[str] = Field(default=None, description="ID of the parent folder", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) description: Optional[str] = Field(default=None, description="User-provided description of the file", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False}) @@ -51,7 +49,6 @@ registerModelLabels( "mimeType": {"en": "MIME Type", "fr": "Type MIME"}, "fileHash": {"en": "File Hash", "fr": "Hash du fichier"}, "fileSize": {"en": "File Size", "fr": "Taille du fichier"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, "tags": {"en": "Tags", "fr": "Tags"}, "folderId": {"en": "Folder ID", "fr": "ID du dossier"}, "description": {"en": "Description", "fr": "Description"}, @@ -88,7 +85,7 @@ registerModelLabels( }, ) -class FileData(BaseModel): +class FileData(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") data: str = Field(description="File data content") base64Encoded: bool = Field(description="Whether the data is base64 encoded") diff --git a/modules/datamodels/datamodelInvitation.py b/modules/datamodels/datamodelInvitation.py index 472318af..709e5021 100644 --- a/modules/datamodels/datamodelInvitation.py +++ b/modules/datamodels/datamodelInvitation.py @@ -9,11 +9,11 @@ import uuid import secrets from typing import Optional, List from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp -class Invitation(BaseModel): +class Invitation(PowerOnModel): """ Einladungs-Token für neue User. Ermöglicht Self-Service Onboarding zu Mandanten und Feature-Instanzen. @@ -56,15 +56,6 @@ class Invitation(BaseModel): description="Email address to send invitation link (optional)", json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": False} ) - createdBy: str = Field( - description="User ID of the person who created the invitation", - json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True} - ) - createdAt: float = Field( - default_factory=getUtcTimestamp, - description="When the invitation was created (UTC timestamp)", - json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False} - ) expiresAt: float = Field( description="When the invitation expires (UTC timestamp)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True} @@ -121,8 +112,6 @@ registerModelLabels( "roleIds": {"en": "Roles", "de": "Rollen", "fr": "Rôles"}, "targetUsername": {"en": "Target Username", "de": "Ziel-Benutzername", "fr": "Nom d'utilisateur cible"}, "email": {"en": "Email (optional)", "de": "E-Mail (optional)", "fr": "Email (optionnel)"}, - "createdBy": {"en": "Created By", "de": "Erstellt von", "fr": "Créé par"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "expiresAt": {"en": "Expires At", "de": "Gültig bis", "fr": "Expire le"}, "usedBy": {"en": "Used By", "de": "Verwendet von", "fr": "Utilisé par"}, "usedAt": {"en": "Used At", "de": "Verwendet am", "fr": "Utilisé le"}, diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py index e9dcc857..3742a84b 100644 --- a/modules/datamodels/datamodelKnowledge.py +++ b/modules/datamodels/datamodelKnowledge.py @@ -12,12 +12,13 @@ Vector fields use json_schema_extra={"db_type": "vector(1536)"} for pgvector. from typing import Dict, Any, List, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp import uuid -class FileContentIndex(BaseModel): +class FileContentIndex(PowerOnModel): """Structural index of a file's content objects. Created without AI. Lives in the Instance Layer; optionally promoted to Shared Layer via isShared.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key (typically = fileId)") @@ -73,7 +74,7 @@ registerModelLabels( ) -class ContentChunk(BaseModel): +class ContentChunk(PowerOnModel): """Persisted content chunk with embedding vector. Reusable across workflows. Scalar content object (or chunk thereof) with pgvector embedding.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") @@ -111,7 +112,7 @@ registerModelLabels( ) -class RoundMemory(BaseModel): +class RoundMemory(PowerOnModel): """Persistent per-round memory for agent tool results, file refs, and decisions. Stored after each agent round so that RAG can retrieve relevant context @@ -135,7 +136,6 @@ class RoundMemory(BaseModel): description="Embedding of summary for semantic retrieval", json_schema_extra={"db_type": "vector(1536)"}, ) - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") registerModelLabels( @@ -151,12 +151,11 @@ registerModelLabels( "fullData": {"en": "Full Data", "fr": "Données complètes"}, "fileIds": {"en": "File IDs", "fr": "IDs de fichier"}, "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"}, - "createdAt": {"en": "Created At", "fr": "Créé le"}, }, ) -class WorkflowMemory(BaseModel): +class WorkflowMemory(PowerOnModel): """Workflow-scoped key-value cache for entities and facts. Extracted during agent rounds, persisted for cross-round and cross-workflow reuse.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") @@ -166,7 +165,6 @@ class WorkflowMemory(BaseModel): key: str = Field(description="Key identifier (e.g. 'entity:companyName')") value: str = Field(description="Extracted value") source: str = Field(default="extraction", description="Origin: extraction, tool, conversation, summary") - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") embedding: Optional[List[float]] = Field( default=None, description="Optional embedding for semantic lookup", json_schema_extra={"db_type": "vector(1536)"} @@ -184,7 +182,6 @@ registerModelLabels( "key": {"en": "Key", "fr": "Clé"}, "value": {"en": "Value", "fr": "Valeur"}, "source": {"en": "Source", "fr": "Source"}, - "createdAt": {"en": "Created At", "fr": "Créé le"}, "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"}, }, ) diff --git a/modules/datamodels/datamodelMembership.py b/modules/datamodels/datamodelMembership.py index 5e8b8814..ce753d15 100644 --- a/modules/datamodels/datamodelMembership.py +++ b/modules/datamodels/datamodelMembership.py @@ -9,10 +9,11 @@ Rollen werden über Junction Tables verknüpft für saubere CASCADE DELETE. import uuid from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -class UserMandate(BaseModel): +class UserMandate(PowerOnModel): """ User-Mitgliedschaft in einem Mandanten. Kein User gehört direkt zu einem Mandanten - Zugehörigkeit wird über dieses Model gesteuert. @@ -50,7 +51,7 @@ registerModelLabels( ) -class FeatureAccess(BaseModel): +class FeatureAccess(PowerOnModel): """ User-Zugriff auf eine Feature-Instanz. Definiert welche User auf welche Feature-Instanzen zugreifen können. @@ -88,7 +89,7 @@ registerModelLabels( ) -class UserMandateRole(BaseModel): +class UserMandateRole(PowerOnModel): """ Junction Table: UserMandate zu Role. Ermöglicht CASCADE DELETE auf Datenbankebene. @@ -119,7 +120,7 @@ registerModelLabels( ) -class FeatureAccessRole(BaseModel): +class FeatureAccessRole(PowerOnModel): """ Junction Table: FeatureAccess zu Role. Ermöglicht CASCADE DELETE auf Datenbankebene. diff --git a/modules/datamodels/datamodelMessaging.py b/modules/datamodels/datamodelMessaging.py index 1c2206b7..ebacc9d4 100644 --- a/modules/datamodels/datamodelMessaging.py +++ b/modules/datamodels/datamodelMessaging.py @@ -6,8 +6,8 @@ import uuid from typing import Optional from enum import Enum from pydantic import BaseModel, Field, ConfigDict +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp class MessagingChannel(str, Enum): @@ -26,7 +26,7 @@ class DeliveryStatus(str, Enum): FAILED = "failed" -class MessagingSubscription(BaseModel): +class MessagingSubscription(PowerOnModel): """Data model for messaging subscriptions""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -64,26 +64,6 @@ class MessagingSubscription(BaseModel): description="Whether the subscription is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} ) - creationDate: float = Field( - default_factory=getUtcTimestamp, - description="When the subscription was created (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) - lastModified: float = Field( - default_factory=getUtcTimestamp, - description="When the subscription was last modified (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) - createdBy: Optional[str] = Field( - default=None, - description="User ID who created the subscription", - json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False} - ) - modifiedBy: Optional[str] = Field( - default=None, - description="User ID who last modified the subscription", - json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False} - ) model_config = ConfigDict(use_enum_values=True) @@ -100,10 +80,6 @@ registerModelLabels( "description": {"en": "Description", "fr": "Description"}, "isSystemSubscription": {"en": "System Subscription", "fr": "Abonnement système"}, "enabled": {"en": "Enabled", "fr": "Activé"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, - "lastModified": {"en": "Last Modified", "fr": "Dernière modification"}, - "createdBy": {"en": "Created By", "fr": "Créé par"}, - "modifiedBy": {"en": "Modified By", "fr": "Modifié par"}, }, ) @@ -155,16 +131,6 @@ class MessagingSubscriptionRegistration(BaseModel): description="Whether this registration is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} ) - creationDate: float = Field( - default_factory=getUtcTimestamp, - description="When the registration was created (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) - lastModified: float = Field( - default_factory=getUtcTimestamp, - description="When the registration was last modified (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) model_config = ConfigDict(use_enum_values=True) @@ -181,8 +147,6 @@ registerModelLabels( "channel": {"en": "Channel", "fr": "Canal"}, "channelConfig": {"en": "Channel Config", "fr": "Configuration du canal"}, "enabled": {"en": "Enabled", "fr": "Activé"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, - "lastModified": {"en": "Last Modified", "fr": "Dernière modification"}, }, ) @@ -248,11 +212,6 @@ class MessagingDelivery(BaseModel): description="When the delivery was sent (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} ) - creationDate: float = Field( - default_factory=getUtcTimestamp, - description="When the delivery record was created (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) model_config = ConfigDict(use_enum_values=True) @@ -270,7 +229,6 @@ registerModelLabels( "status": {"en": "Status", "fr": "Statut"}, "errorMessage": {"en": "Error Message", "fr": "Message d'erreur"}, "sentAt": {"en": "Sent At", "fr": "Envoyé le"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, }, ) @@ -349,4 +307,3 @@ class MessagingSubscriptionExecutionResult(BaseModel): description="Error message if execution failed", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False} ) - model_config = ConfigDict(extra="allow") # Allow additional fields for custom results diff --git a/modules/datamodels/datamodelNotification.py b/modules/datamodels/datamodelNotification.py index b1475767..f5af0f55 100644 --- a/modules/datamodels/datamodelNotification.py +++ b/modules/datamodels/datamodelNotification.py @@ -9,8 +9,8 @@ import uuid from typing import Optional, List from enum import Enum from pydantic import BaseModel, Field, ConfigDict +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp class NotificationType(str, Enum): @@ -43,7 +43,7 @@ class NotificationAction(BaseModel): ) -class UserNotification(BaseModel): +class UserNotification(PowerOnModel): """ In-app notification for a user. Supports actionable notifications with accept/decline buttons. @@ -137,11 +137,6 @@ class UserNotification(BaseModel): ) # Timestamps - createdAt: float = Field( - default_factory=getUtcTimestamp, - description="When the notification was created (UTC timestamp)", - json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False} - ) readAt: Optional[float] = Field( default=None, description="When the notification was read (UTC timestamp)", @@ -177,7 +172,6 @@ registerModelLabels( "actions": {"en": "Actions", "de": "Aktionen", "fr": "Actions"}, "actionTaken": {"en": "Action Taken", "de": "Durchgeführte Aktion", "fr": "Action effectuée"}, "actionResult": {"en": "Action Result", "de": "Aktions-Ergebnis", "fr": "Résultat de l'action"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "readAt": {"en": "Read At", "de": "Gelesen am", "fr": "Lu le"}, "actionedAt": {"en": "Actioned At", "de": "Bearbeitet am", "fr": "Traité le"}, "expiresAt": {"en": "Expires At", "de": "Gültig bis", "fr": "Expire le"}, diff --git a/modules/datamodels/datamodelRbac.py b/modules/datamodels/datamodelRbac.py index 978c3be6..b9e0cb91 100644 --- a/modules/datamodels/datamodelRbac.py +++ b/modules/datamodels/datamodelRbac.py @@ -13,6 +13,7 @@ import uuid from typing import Optional from enum import Enum from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.datamodels.datamodelUtils import TextMultilingual from modules.datamodels.datamodelUam import AccessLevel @@ -25,7 +26,7 @@ class AccessRuleContext(str, Enum): RESOURCE = "RESOURCE" # System resources (AI models, actions, etc.) -class Role(BaseModel): +class Role(PowerOnModel): """ Data model for RBAC roles. @@ -90,7 +91,7 @@ registerModelLabels( ) -class AccessRule(BaseModel): +class AccessRule(PowerOnModel): """ Data model for access control rules. diff --git a/modules/datamodels/datamodelSecurity.py b/modules/datamodels/datamodelSecurity.py index 5caafe1b..dc8c26e6 100644 --- a/modules/datamodels/datamodelSecurity.py +++ b/modules/datamodels/datamodelSecurity.py @@ -11,6 +11,7 @@ Multi-Tenant Design: from typing import Optional, Any from pydantic import BaseModel, Field, ConfigDict, model_validator +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp from .datamodelUam import AuthAuthority @@ -30,7 +31,7 @@ class TokenPurpose(str, Enum): DATA_CONNECTION = "dataConnection" -class Token(BaseModel): +class Token(PowerOnModel): """ Authentication Token model. @@ -55,9 +56,6 @@ class Token(BaseModel): description="When the token expires (UTC timestamp in seconds)" ) tokenRefresh: Optional[str] = None - createdAt: Optional[float] = Field( - None, description="When the token was created (UTC timestamp in seconds)" - ) status: TokenStatus = Field( default=TokenStatus.ACTIVE, description="Token status: active/revoked" ) @@ -106,7 +104,6 @@ registerModelLabels( "tokenType": {"en": "Token Type", "de": "Token-Typ", "fr": "Type de jeton"}, "expiresAt": {"en": "Expires At", "de": "Läuft ab am", "fr": "Expire le"}, "tokenRefresh": {"en": "Refresh Token", "de": "Refresh-Token", "fr": "Jeton de rafraîchissement"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "status": {"en": "Status", "de": "Status", "fr": "Statut"}, "revokedAt": {"en": "Revoked At", "de": "Widerrufen am", "fr": "Révoqué le"}, "revokedBy": {"en": "Revoked By", "de": "Widerrufen von", "fr": "Révoqué par"}, @@ -116,7 +113,7 @@ registerModelLabels( ) -class AuthEvent(BaseModel): +class AuthEvent(PowerOnModel): """Authentication event for audit logging.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the auth event", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) userId: str = Field(description="ID of the user this event belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 8f5fd824..fa9f2c87 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -10,6 +10,7 @@ from typing import Dict, List, Optional from enum import Enum from datetime import datetime, timezone from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid @@ -124,7 +125,7 @@ registerModelLabels( # Instance: MandateSubscription # ============================================================================ -class MandateSubscription(BaseModel): +class MandateSubscription(PowerOnModel): """A subscription instance bound to a specific mandate. See wiki/concepts/Subscription-State-Machine.md for state transitions.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 3e1250c7..5a057639 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -13,6 +13,7 @@ import uuid from typing import Optional, List, Dict from enum import Enum from pydantic import BaseModel, Field, EmailStr, field_validator, computed_field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp @@ -65,7 +66,7 @@ class MandateType(str, Enum): COMPANY = "company" -class Mandate(BaseModel): +class Mandate(PowerOnModel): """ Mandate (Mandant/Tenant) model. Ein Mandant ist ein isolierter Bereich für Daten und Berechtigungen. @@ -145,7 +146,7 @@ registerModelLabels( ) -class UserConnection(BaseModel): +class UserConnection(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the connection", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) userId: str = Field(description="ID of the user this connection belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) authority: AuthAuthority = Field(description="Authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": "/api/connections/authorities/options"}) @@ -202,7 +203,7 @@ registerModelLabels( ) -class User(BaseModel): +class User(PowerOnModel): """ User model. @@ -289,6 +290,11 @@ class User(BaseModel): description="Primary authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": "/api/connections/authorities/options"} ) + roleLabels: List[str] = Field( + default_factory=list, + description="Role labels (from DB or enriched when loading users)", + json_schema_extra={"frontend_type": "multiselect", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}, + ) registerModelLabels( @@ -303,6 +309,7 @@ registerModelLabels( "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"}, "isSysAdmin": {"en": "System Admin", "de": "System-Admin", "fr": "Admin système"}, "authenticationAuthority": {"en": "Auth Authority", "de": "Authentifizierung", "fr": "Autorité d'authentification"}, + "roleLabels": {"en": "Role Labels", "de": "Rollen-Labels", "fr": "Libellés de rôles"}, }, ) @@ -325,7 +332,7 @@ registerModelLabels( ) -class UserVoicePreferences(BaseModel): +class UserVoicePreferences(PowerOnModel): """User-level voice/language preferences, shared across all features.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") userId: str = Field(description="User ID") diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py index 614d6592..1088cb31 100644 --- a/modules/datamodels/datamodelUtils.py +++ b/modules/datamodels/datamodelUtils.py @@ -3,13 +3,13 @@ """Utility datamodels: Prompt, TextMultilingual.""" from typing import Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, Field, field_validator +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid -class Prompt(BaseModel): - model_config = ConfigDict(extra='allow') # Preserve system fields (_createdBy, _createdAt, etc.) +class Prompt(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) mandateId: str = Field(default="", description="ID of the mandate this prompt belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) isSystem: bool = Field(default=False, description="System prompt visible to all users (read-only for non-SysAdmin)", json_schema_extra={"frontend_type": "boolean", "frontend_readonly": True, "frontend_required": False}) diff --git a/modules/features/automation/datamodelFeatureAutomation.py b/modules/features/automation/datamodelFeatureAutomation.py index 732f3163..8ea4a300 100644 --- a/modules/features/automation/datamodelFeatureAutomation.py +++ b/modules/features/automation/datamodelFeatureAutomation.py @@ -4,6 +4,7 @@ from typing import List, Dict, Any, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.datamodels.datamodelUtils import TextMultilingual import uuid @@ -48,7 +49,7 @@ registerModelLabels( ) -class AutomationTemplate(BaseModel): +class AutomationTemplate(PowerOnModel): """Automation-Vorlage ohne scharfe Placeholder-Werte (DB-persistiert). System-Templates (isSystem=True): Nur durch SysAdmin aenderbar. Alle User koennen lesen. @@ -82,9 +83,6 @@ class AutomationTemplate(BaseModel): description="Feature instance ID (null for system templates, set for instance-scoped templates)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False} ) - # System fields (_createdAt, _createdBy, etc.) werden automatisch vom DB-Connector gesetzt - - registerModelLabels( "AutomationTemplate", {"en": "Automation Template", "ge": "Automation-Vorlage", "fr": "Modèle d'automatisation"}, diff --git a/modules/features/automation/interfaceFeatureAutomation.py b/modules/features/automation/interfaceFeatureAutomation.py index 4091bc28..a4f90a51 100644 --- a/modules/features/automation/interfaceFeatureAutomation.py +++ b/modules/features/automation/interfaceFeatureAutomation.py @@ -22,6 +22,13 @@ from modules.shared.configuration import APP_CONFIG logger = logging.getLogger(__name__) + +def _automationDefinitionPayload(data: Dict[str, Any]) -> Dict[str, Any]: + """Strip connector/enrichment keys; only fields defined on AutomationDefinition.""" + allowed = AutomationDefinition.model_fields.keys() + return {k: v for k, v in (data or {}).items() if k in allowed} + + # Singleton factory for Automation instances _automationInterfaces = {} @@ -100,7 +107,7 @@ class AutomationObjects: if recordId: record = self.db.getRecordset(model, recordFilter={"id": recordId}) if record: - return record[0].get("_createdBy") == self.userId + return record[0].get("sysCreatedBy") == self.userId else: return False # Record not found = no access return True # No recordId needed (e.g., for CREATE) @@ -130,7 +137,7 @@ class AutomationObjects: featureInstanceIds = set() for automation in automations: - createdBy = automation.get("_createdBy") + createdBy = automation.get("sysCreatedBy") if createdBy: userIds.add(createdBy) @@ -186,8 +193,8 @@ class AutomationObjects: # Enrich each automation with the fetched data # SECURITY: Never show a fallback name — if lookup fails, show empty string for automation in automations: - createdBy = automation.get("_createdBy") - automation["_createdByUserName"] = usersMap.get(createdBy, "") if createdBy else "" + createdBy = automation.get("sysCreatedBy") + automation["sysCreatedByUserName"] = usersMap.get(createdBy, "") if createdBy else "" mandateId = automation.get("mandateId") automation["mandateName"] = mandatesMap.get(mandateId, "") if mandateId else "" @@ -295,7 +302,7 @@ class AutomationObjects: Args: automationId: ID of the automation to get - includeSystemFields: If True, returns raw dict with system fields (_createdBy, etc). + includeSystemFields: If True, returns raw dict with system fields (sysCreatedBy, etc). If False (default), returns Pydantic model without system fields. """ try: @@ -330,7 +337,7 @@ class AutomationObjects: return AutomationWithSystemFields(automation) # Clean metadata fields and return Pydantic model - cleanedRecord = {k: v for k, v in automation.items() if not k.startswith("_")} + cleanedRecord = _automationDefinitionPayload(automation) return AutomationDefinition(**cleanedRecord) except Exception as e: logger.error(f"Error getting automation definition: {str(e)}") @@ -365,7 +372,7 @@ class AutomationObjects: # Ensure database connector has correct userId context if not self.userId: - logger.error(f"createAutomationDefinition: userId is not set! Cannot set _createdBy. currentUser={self.currentUser}") + logger.error(f"createAutomationDefinition: userId is not set! Cannot set sysCreatedBy. currentUser={self.currentUser}") elif hasattr(self.db, 'updateContext'): try: self.db.updateContext(self.userId) @@ -386,7 +393,7 @@ class AutomationObjects: self._notifyAutomationChanged() # Clean metadata fields and return Pydantic model - cleanedRecord = {k: v for k, v in createdAutomation.items() if not k.startswith("_")} + cleanedRecord = _automationDefinitionPayload(createdAutomation) return AutomationDefinition(**cleanedRecord) except Exception as e: logger.error(f"Error creating automation definition: {str(e)}") @@ -446,7 +453,7 @@ class AutomationObjects: self._notifyAutomationChanged() # Clean metadata fields and return Pydantic model - cleanedRecord = {k: v for k, v in updatedAutomation.items() if not k.startswith("_")} + cleanedRecord = _automationDefinitionPayload(updatedAutomation) return AutomationDefinition(**cleanedRecord) except Exception as e: logger.error(f"Error updating automation definition: {str(e)}") @@ -561,7 +568,7 @@ class AutomationObjects: # Collect unique user IDs userIds = set() for template in templates: - createdBy = template.get("_createdBy") + createdBy = template.get("sysCreatedBy") if createdBy: userIds.add(createdBy) @@ -585,8 +592,8 @@ class AutomationObjects: # Apply to templates — SECURITY: no fallback, empty if not found for template in templates: - createdBy = template.get("_createdBy") - template["_createdByUserName"] = userNameMap.get(createdBy, "") if createdBy else "" + createdBy = template.get("sysCreatedBy") + template["sysCreatedByUserName"] = userNameMap.get(createdBy, "") if createdBy else "" except Exception as e: logger.warning(f"Could not enrich templates with user names: {e}") diff --git a/modules/features/automation/routeFeatureAutomation.py b/modules/features/automation/routeFeatureAutomation.py index 48f53eea..c6343b25 100644 --- a/modules/features/automation/routeFeatureAutomation.py +++ b/modules/features/automation/routeFeatureAutomation.py @@ -77,8 +77,8 @@ def get_automations( # If pagination was requested, result is PaginatedResult # If no pagination, result is List[Dict] - # Note: Using JSONResponse to bypass Pydantic validation which would filter out _createdBy - # The enriched fields (_createdByUserName, mandateName) are not in the Pydantic model + # Note: Using JSONResponse to bypass Pydantic validation which would filter out sysCreatedBy + # The enriched fields (sysCreatedByUserName, mandateName) are not in the Pydantic model from fastapi.responses import JSONResponse if paginationParams: diff --git a/modules/features/automation2/datamodelFeatureAutomation2.py b/modules/features/automation2/datamodelFeatureAutomation2.py index f505c7d0..99d3b292 100644 --- a/modules/features/automation2/datamodelFeatureAutomation2.py +++ b/modules/features/automation2/datamodelFeatureAutomation2.py @@ -4,6 +4,7 @@ from typing import Dict, Any, List, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid @@ -52,7 +53,7 @@ registerModelLabels( ) -class Automation2WorkflowRun(BaseModel): +class Automation2WorkflowRun(PowerOnModel): id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key", @@ -98,7 +99,7 @@ registerModelLabels( ) -class Automation2HumanTask(BaseModel): +class Automation2HumanTask(PowerOnModel): id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key", diff --git a/modules/features/automation2/routeFeatureAutomation2.py b/modules/features/automation2/routeFeatureAutomation2.py index 996c3cb6..5b087f83 100644 --- a/modules/features/automation2/routeFeatureAutomation2.py +++ b/modules/features/automation2/routeFeatureAutomation2.py @@ -359,7 +359,7 @@ def get_workflows( active_run = None last_started_at = None for r in runs: - ts = r.get("_createdAt") + ts = r.get("sysCreatedAt") if ts and (last_started_at is None or ts > last_started_at): last_started_at = ts if r.get("status") in ("running", "paused"): @@ -375,7 +375,7 @@ def get_workflows( "runStatus": active_run.get("status") if active_run else None, "stuckAtNodeId": stuck_at_node_id, "stuckAtNodeLabel": stuck_at_node_label or stuck_at_node_id or "", - "createdAt": wf.get("_createdAt"), + "createdAt": wf.get("sysCreatedAt"), "lastStartedAt": last_started_at, }) return {"workflows": enriched} @@ -536,7 +536,7 @@ def get_tasks( context: RequestContext = Depends(getRequestContext), ) -> dict: """Get tasks - by default those assigned to current user, or all if no assignee filter. - Enriches each task with workflowLabel and createdAt (_createdAt). + Enriches each task with workflowLabel and createdAt (from sysCreatedAt). """ mandateId = _validateInstanceAccess(instanceId, context) a2 = getAutomation2Interface(context.user, mandateId, instanceId) @@ -549,7 +549,7 @@ def get_tasks( enriched.append({ **t, "workflowLabel": wf.get("label", t.get("workflowId", "")) if wf else t.get("workflowId", ""), - "createdAt": t.get("_createdAt"), + "createdAt": t.get("sysCreatedAt"), }) return {"tasks": enriched} diff --git a/modules/features/chatbot/interfaceFeatureChatbot.py b/modules/features/chatbot/interfaceFeatureChatbot.py index 4a03bec9..151a96ce 100644 --- a/modules/features/chatbot/interfaceFeatureChatbot.py +++ b/modules/features/chatbot/interfaceFeatureChatbot.py @@ -20,6 +20,7 @@ from modules.datamodels.datamodelRbac import AccessRuleContext from modules.datamodels.datamodelUam import AccessLevel from modules.datamodels.datamodelChat import UserInputRequest +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp # ============================================================================= @@ -27,7 +28,7 @@ from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp # ============================================================================= -class ChatbotDocument(BaseModel): +class ChatbotDocument(PowerOnModel): """Documents attached to chatbot messages.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") messageId: str = Field(description="Foreign key to message") @@ -41,7 +42,7 @@ class ChatbotDocument(BaseModel): actionId: Optional[str] = Field(None, description="ID of the action that created this document") -class ChatbotMessage(BaseModel): +class ChatbotMessage(PowerOnModel): """Messages in chatbot conversations. Must match bridge format in memory.py.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") conversationId: str = Field(description="Foreign key to conversation") @@ -64,7 +65,7 @@ class ChatbotMessage(BaseModel): actionProgress: Optional[str] = Field(None, description="Action progress status") -class ChatbotLog(BaseModel): +class ChatbotLog(PowerOnModel): """Log entries for chatbot conversations.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") conversationId: str = Field(description="Foreign key to conversation") @@ -85,7 +86,7 @@ class ChatbotWorkflowModeEnum(str, Enum): WORKFLOW_CHATBOT = "Chatbot" -class ChatbotConversation(BaseModel): +class ChatbotConversation(PowerOnModel): """Chatbot conversation container. Per feature-instance isolation via featureInstanceId.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") featureInstanceId: str = Field(description="Feature instance ID for per-instance isolation") @@ -328,9 +329,8 @@ class ChatObjects: objectFields[fieldName] = value else: # Field not in model - treat as scalar if simple, otherwise filter out - # BUT: always include metadata fields (_createdBy, _createdAt, etc.) as they're handled by connector + # Underscore-prefixed keys (e.g. UI meta) pass through; sys* live on PowerOnModel subclasses if fieldName.startswith("_"): - # Metadata fields should be passed through to connector simpleFields[fieldName] = value elif isinstance(value, (str, int, float, bool, type(None))): simpleFields[fieldName] = value diff --git a/modules/features/commcoach/datamodelCommcoach.py b/modules/features/commcoach/datamodelCommcoach.py index bd94f173..635ba19a 100644 --- a/modules/features/commcoach/datamodelCommcoach.py +++ b/modules/features/commcoach/datamodelCommcoach.py @@ -7,6 +7,8 @@ Pydantic models for coaching contexts, sessions, messages, tasks, scores, and us from typing import Optional, List, Dict, Any from pydantic import BaseModel, Field from enum import Enum + +from modules.datamodels.datamodelBase import PowerOnModel import uuid @@ -73,7 +75,7 @@ class CoachingScoreTrend(str, Enum): # Database Models # ============================================================================ -class CoachingContext(BaseModel): +class CoachingContext(PowerOnModel): """A coaching context/dossier representing a topic the user is working on.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID (strict ownership)") @@ -91,11 +93,9 @@ class CoachingContext(BaseModel): lastSessionAt: Optional[str] = Field(default=None) rollingOverview: Optional[str] = Field(default=None, description="AI summary of older sessions for long context history") rollingOverviewUpToSessionCount: Optional[int] = Field(default=None, description="Session count covered by rollingOverview") - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) -class CoachingSession(BaseModel): +class CoachingSession(PowerOnModel): """A single coaching conversation session within a context.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) contextId: str = Field(description="FK to CoachingContext") @@ -115,11 +115,9 @@ class CoachingSession(BaseModel): emailSent: bool = Field(default=False) startedAt: Optional[str] = Field(default=None) endedAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) -class CoachingMessage(BaseModel): +class CoachingMessage(PowerOnModel): """A single message in a coaching session.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) sessionId: str = Field(description="FK to CoachingSession") @@ -130,10 +128,9 @@ class CoachingMessage(BaseModel): contentType: CoachingMessageContentType = Field(default=CoachingMessageContentType.TEXT) audioRef: Optional[str] = Field(default=None, description="Reference to audio file") metadata: Optional[str] = Field(default=None, description="JSON: token count, voice info, etc.") - createdAt: Optional[str] = Field(default=None) -class CoachingTask(BaseModel): +class CoachingTask(PowerOnModel): """A task/checklist item assigned within a coaching context.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) contextId: str = Field(description="FK to CoachingContext") @@ -146,11 +143,9 @@ class CoachingTask(BaseModel): priority: CoachingTaskPriority = Field(default=CoachingTaskPriority.MEDIUM) dueDate: Optional[str] = Field(default=None) completedAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) -class CoachingScore(BaseModel): +class CoachingScore(PowerOnModel): """A competence score for a dimension, recorded after a session.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) contextId: str = Field(description="FK to CoachingContext") @@ -161,10 +156,9 @@ class CoachingScore(BaseModel): score: float = Field(ge=0.0, le=100.0) trend: CoachingScoreTrend = Field(default=CoachingScoreTrend.STABLE) evidence: Optional[str] = Field(default=None, description="AI reasoning for the score") - createdAt: Optional[str] = Field(default=None) -class CoachingUserProfile(BaseModel): +class CoachingUserProfile(PowerOnModel): """Per-user coaching profile and preferences.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID") @@ -178,15 +172,13 @@ class CoachingUserProfile(BaseModel): totalSessions: int = Field(default=0) totalMinutes: int = Field(default=0) lastSessionAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) # ============================================================================ # Iteration 2: Personas # ============================================================================ -class CoachingPersona(BaseModel): +class CoachingPersona(PowerOnModel): """A roleplay persona for coaching sessions.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID ('system' for builtins)") @@ -199,15 +191,13 @@ class CoachingPersona(BaseModel): gender: Optional[str] = Field(default=None, description="m or f") category: str = Field(default="builtin", description="'builtin' or 'custom'") isActive: bool = Field(default=True) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) # ============================================================================ # Iteration 2: Badges / Gamification # ============================================================================ -class CoachingBadge(BaseModel): +class CoachingBadge(PowerOnModel): """An achievement badge awarded to a user.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID") @@ -215,7 +205,6 @@ class CoachingBadge(BaseModel): instanceId: str = Field(description="Feature instance ID") badgeKey: str = Field(description="Badge identifier, e.g. 'streak_7'") awardedAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) # ============================================================================ diff --git a/modules/features/neutralization/datamodelFeatureNeutralizer.py b/modules/features/neutralization/datamodelFeatureNeutralizer.py index 3aea7632..a8ed5981 100644 --- a/modules/features/neutralization/datamodelFeatureNeutralizer.py +++ b/modules/features/neutralization/datamodelFeatureNeutralizer.py @@ -6,6 +6,7 @@ import uuid from enum import Enum from typing import Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels @@ -16,7 +17,7 @@ class DataScope(str, Enum): GLOBAL = "global" -class DataNeutraliserConfig(BaseModel): +class DataNeutraliserConfig(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) mandateId: str = Field(description="ID of the mandate this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) featureInstanceId: str = Field(description="ID of the feature instance this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) diff --git a/modules/features/realEstate/datamodelFeatureRealEstate.py b/modules/features/realEstate/datamodelFeatureRealEstate.py index 31efbc07..8f136056 100644 --- a/modules/features/realEstate/datamodelFeatureRealEstate.py +++ b/modules/features/realEstate/datamodelFeatureRealEstate.py @@ -7,6 +7,7 @@ Implements a general Swiss architecture planning data model. from typing import List, Dict, Any, Optional, ForwardRef from enum import Enum from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp import uuid @@ -178,7 +179,7 @@ class Dokument(BaseModel): ) -class Kontext(BaseModel): +class Kontext(PowerOnModel): """Supporting data object for flexible additional information.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -248,7 +249,7 @@ class Land(BaseModel): ) -class Kanton(BaseModel): +class Kanton(PowerOnModel): """Cantonal level administrative entity.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -368,7 +369,7 @@ class Gemeinde(BaseModel): ParzelleRef = ForwardRef('Parzelle') -class Parzelle(BaseModel): +class Parzelle(PowerOnModel): """Represents a plot with all building law properties.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -594,7 +595,7 @@ class Parzelle(BaseModel): ) -class Projekt(BaseModel): +class Projekt(PowerOnModel): """Core object representing a construction project.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), diff --git a/modules/features/teamsbot/datamodelTeamsbot.py b/modules/features/teamsbot/datamodelTeamsbot.py index bc17642f..f19b4c6c 100644 --- a/modules/features/teamsbot/datamodelTeamsbot.py +++ b/modules/features/teamsbot/datamodelTeamsbot.py @@ -9,6 +9,8 @@ from pydantic import BaseModel, Field from enum import Enum import uuid +from modules.datamodels.datamodelBase import PowerOnModel + # ============================================================================ # Enums @@ -72,7 +74,7 @@ class TeamsbotTransferMode(str, Enum): # Database Models (stored in PostgreSQL) # ============================================================================ -class TeamsbotSession(BaseModel): +class TeamsbotSession(PowerOnModel): """A Teams Bot meeting session.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Session ID") instanceId: str = Field(description="Feature instance ID (FK)") @@ -90,11 +92,9 @@ class TeamsbotSession(BaseModel): errorMessage: Optional[str] = Field(default=None, description="Error message if status is ERROR") transcriptSegmentCount: int = Field(default=0, description="Number of transcript segments in this session") botResponseCount: int = Field(default=0, description="Number of bot responses in this session") - creationDate: Optional[str] = Field(default=None, description="ISO timestamp of record creation") - lastModified: Optional[str] = Field(default=None, description="ISO timestamp of last modification") -class TeamsbotTranscript(BaseModel): +class TeamsbotTranscript(PowerOnModel): """A single transcript segment from the meeting.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Transcript segment ID") sessionId: str = Field(description="Session ID (FK)") @@ -105,10 +105,9 @@ class TeamsbotTranscript(BaseModel): language: Optional[str] = Field(default=None, description="Detected language code (e.g., de-DE)") isFinal: bool = Field(default=True, description="Whether this is a final or interim result") source: Optional[str] = Field(default=None, description="Source: caption, audioCapture, chat, chatHistory, speakerHint") - creationDate: Optional[str] = Field(default=None, description="ISO timestamp of record creation") -class TeamsbotBotResponse(BaseModel): +class TeamsbotBotResponse(PowerOnModel): """A bot response generated during a meeting session.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Response ID") sessionId: str = Field(description="Session ID (FK)") @@ -121,14 +120,13 @@ class TeamsbotBotResponse(BaseModel): processingTime: float = Field(default=0.0, description="Processing time in seconds") priceCHF: float = Field(default=0.0, description="Cost of this AI call in CHF") timestamp: Optional[str] = Field(default=None, description="ISO timestamp of the response") - creationDate: Optional[str] = Field(default=None, description="ISO timestamp of record creation") # ============================================================================ # System Bot Accounts (stored in PostgreSQL, credentials encrypted) # ============================================================================ -class TeamsbotSystemBot(BaseModel): +class TeamsbotSystemBot(PowerOnModel): """A system bot account for authenticated meeting joins. Credentials are stored encrypted in the database, NOT in the UI-visible config. Only mandate admins can manage system bots.""" @@ -138,15 +136,13 @@ class TeamsbotSystemBot(BaseModel): email: str = Field(description="Microsoft account email") encryptedPassword: str = Field(description="Encrypted Microsoft account password") isActive: bool = Field(default=True, description="Whether this bot account is active") - creationDate: Optional[str] = Field(default=None, description="ISO timestamp of creation") - lastModified: Optional[str] = Field(default=None, description="ISO timestamp of last modification") # ============================================================================ # User Account Credentials (stored in PostgreSQL, credentials encrypted) # ============================================================================ -class TeamsbotUserAccount(BaseModel): +class TeamsbotUserAccount(PowerOnModel): """Saved Microsoft credentials for 'Mein Account' joins. Each user can store their own MS credentials per mandate. Password is encrypted; on login only MFA confirmation is needed.""" @@ -156,15 +152,13 @@ class TeamsbotUserAccount(BaseModel): email: str = Field(description="Microsoft account email") encryptedPassword: str = Field(description="Encrypted Microsoft account password") displayName: Optional[str] = Field(default=None, description="Display name derived from MS account") - creationDate: Optional[str] = Field(default=None, description="ISO timestamp of creation") - lastModified: Optional[str] = Field(default=None, description="ISO timestamp of last modification") # ============================================================================ # Per-User Settings (stored in PostgreSQL, per user per instance) # ============================================================================ -class TeamsbotUserSettings(BaseModel): +class TeamsbotUserSettings(PowerOnModel): """Per-user settings for the Teams Bot feature. Each user has their own settings per feature instance. These override the instance-level defaults (TeamsbotConfig).""" @@ -182,8 +176,6 @@ class TeamsbotUserSettings(BaseModel): triggerCooldownSeconds: Optional[int] = Field(default=None, description="Trigger cooldown override") contextWindowSegments: Optional[int] = Field(default=None, description="Context window override") debugMode: Optional[bool] = Field(default=None, description="Debug mode override") - creationDate: Optional[str] = Field(default=None, description="ISO timestamp of creation") - lastModified: Optional[str] = Field(default=None, description="ISO timestamp of last modification") # ============================================================================ diff --git a/modules/features/trustee/datamodelFeatureTrustee.py b/modules/features/trustee/datamodelFeatureTrustee.py index 538414a0..0889e361 100644 --- a/modules/features/trustee/datamodelFeatureTrustee.py +++ b/modules/features/trustee/datamodelFeatureTrustee.py @@ -5,11 +5,13 @@ from enum import Enum from typing import Optional from pydantic import BaseModel, Field + +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid -class TrusteeOrganisation(BaseModel): +class TrusteeOrganisation(PowerOnModel): """Represents trustee organisations (companies) within the Trustee feature.""" id: str = Field( # Unique string label (PK), not UUID description="Unique organisation identifier (label)", @@ -55,7 +57,7 @@ class TrusteeOrganisation(BaseModel): } ) # System attributes are automatically set by DatabaseConnector: - # _createdAt, _modifiedAt, _createdBy, _modifiedBy + # sysCreatedAt, sysModifiedAt, sysCreatedBy, sysModifiedBy (PowerOnModel) registerModelLabels( @@ -71,7 +73,7 @@ registerModelLabels( ) -class TrusteeRole(BaseModel): +class TrusteeRole(PowerOnModel): """Defines roles within the Trustee feature.""" id: str = Field( # Unique string label (PK), not UUID description="Unique role identifier (label)", @@ -122,7 +124,7 @@ registerModelLabels( ) -class TrusteeAccess(BaseModel): +class TrusteeAccess(PowerOnModel): """Defines user access to organisations with specific roles.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -207,7 +209,7 @@ registerModelLabels( ) -class TrusteeContract(BaseModel): +class TrusteeContract(PowerOnModel): """Defines customer contracts within organisations.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -289,7 +291,7 @@ class TrusteeDocumentTypeEnum(str, Enum): AUTO = "auto" -class TrusteeDocument(BaseModel): +class TrusteeDocument(PowerOnModel): """Contains document references for bookings. Documents reference files in the central Files table via fileId. @@ -413,7 +415,7 @@ registerModelLabels( ) -class TrusteePosition(BaseModel): +class TrusteePosition(PowerOnModel): """Contains booking positions (expense entries). A position can have up to two document references: documentId (Beleg) and bankDocumentId (Bank-Referenz). @@ -696,10 +698,6 @@ class TrusteePosition(BaseModel): } ) - # Allow extra fields like _createdAt from database - model_config = {"extra": "allow"} - - registerModelLabels( "TrusteePosition", {"en": "Position", "fr": "Position", "de": "Position"}, @@ -739,7 +737,7 @@ registerModelLabels( # ── TrusteeData* tables (synced from external accounting apps for analysis) ── -class TrusteeDataAccount(BaseModel): +class TrusteeDataAccount(PowerOnModel): """Chart of accounts synced from external accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) accountNumber: str = Field(description="Account number (e.g. '1020')") @@ -769,7 +767,7 @@ registerModelLabels( ) -class TrusteeDataJournalEntry(BaseModel): +class TrusteeDataJournalEntry(PowerOnModel): """Journal entry header synced from external accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) externalId: Optional[str] = Field(default=None, description="ID in the source system") @@ -799,7 +797,7 @@ registerModelLabels( ) -class TrusteeDataJournalLine(BaseModel): +class TrusteeDataJournalLine(PowerOnModel): """Journal entry line (debit/credit) synced from external accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) journalEntryId: str = Field(description="FK → TrusteeDataJournalEntry.id") @@ -833,7 +831,7 @@ registerModelLabels( ) -class TrusteeDataContact(BaseModel): +class TrusteeDataContact(PowerOnModel): """Customer or vendor synced from external accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) externalId: Optional[str] = Field(default=None, description="ID in the source system") @@ -873,7 +871,7 @@ registerModelLabels( ) -class TrusteeDataAccountBalance(BaseModel): +class TrusteeDataAccountBalance(PowerOnModel): """Account balance per period, derived from journal lines or directly from accounting system.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) accountNumber: str = Field(description="Account number") @@ -907,7 +905,7 @@ registerModelLabels( ) -class TrusteeAccountingConfig(BaseModel): +class TrusteeAccountingConfig(PowerOnModel): """Per-instance accounting system configuration with encrypted credentials. Each feature instance can connect to exactly one accounting system. @@ -946,7 +944,7 @@ registerModelLabels( ) -class TrusteeAccountingSync(BaseModel): +class TrusteeAccountingSync(PowerOnModel): """Tracks which position was synced to which external system and when. Used for duplicate prevention, audit trail, and retry logic. diff --git a/modules/features/trustee/interfaceFeatureTrustee.py b/modules/features/trustee/interfaceFeatureTrustee.py index b9a95005..7ed6fcff 100644 --- a/modules/features/trustee/interfaceFeatureTrustee.py +++ b/modules/features/trustee/interfaceFeatureTrustee.py @@ -1152,7 +1152,7 @@ class TrusteeObjects: logger.warning(f"Document {documentId} not found") return None - createdBy = existing.get("_createdBy") + createdBy = existing.get("sysCreatedBy") # Check system RBAC permission (userreport can only edit their own records) if not self.checkCombinedPermission(TrusteeDocument, "update", recordCreatedBy=createdBy): @@ -1178,7 +1178,7 @@ class TrusteeObjects: logger.warning(f"Document {documentId} not found") return False - createdBy = existing.get("_createdBy") + createdBy = existing.get("sysCreatedBy") if not self.checkCombinedPermission(TrusteeDocument, "delete", recordCreatedBy=createdBy): logger.warning(f"User {self.userId} lacks permission to delete document") @@ -1198,7 +1198,7 @@ class TrusteeObjects: def _toTrusteePositionOrDelete(self, rawRecord: Dict[str, Any], deleteCorrupt: bool = True) -> Optional[TrusteePosition]: """Build TrusteePosition safely; optionally delete irreparably corrupt records.""" - cleanRecord = {k: v for k, v in (rawRecord or {}).items() if not k.startswith("_") or k == "_createdAt"} + cleanRecord = {k: v for k, v in (rawRecord or {}).items() if not k.startswith("_") or k == "sysCreatedAt"} if not cleanRecord: return None @@ -1271,7 +1271,7 @@ class TrusteeObjects: """Get all positions with RBAC filtering and optional DB-level pagination. Filtering, sorting, and pagination are handled at the SQL level. - Post-processing cleans internal fields (keeps _createdAt) and validates + Post-processing cleans internal fields (keeps sysCreatedAt) and validates each record via _toTrusteePositionOrDelete (corrupt rows are deleted). NOTE(post-process): totalItems may slightly overcount when corrupt legacy @@ -1288,7 +1288,7 @@ class TrusteeObjects: featureCode=self.FEATURE_CODE ) - keepFields = {'_createdAt'} + keepFields = {'sysCreatedAt'} def _cleanAndValidate(records): items = [] @@ -1369,7 +1369,7 @@ class TrusteeObjects: logger.warning(f"Position {positionId} not found") return None - createdBy = existing.get("_createdBy") + createdBy = existing.get("sysCreatedBy") # Check system RBAC permission (userreport can only edit their own records) if not self.checkCombinedPermission(TrusteePosition, "update", recordCreatedBy=createdBy): @@ -1391,7 +1391,7 @@ class TrusteeObjects: logger.warning(f"Position {positionId} not found") return False - createdBy = existing.get("_createdBy") + createdBy = existing.get("sysCreatedBy") if not self.checkCombinedPermission(TrusteePosition, "delete", recordCreatedBy=createdBy): logger.warning(f"User {self.userId} lacks permission to delete position") diff --git a/modules/features/workspace/datamodelFeatureWorkspace.py b/modules/features/workspace/datamodelFeatureWorkspace.py index 7c718d67..d7c292db 100644 --- a/modules/features/workspace/datamodelFeatureWorkspace.py +++ b/modules/features/workspace/datamodelFeatureWorkspace.py @@ -4,11 +4,12 @@ from typing import Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid -class WorkspaceUserSettings(BaseModel): +class WorkspaceUserSettings(PowerOnModel): """Per-user workspace settings. None values mean 'use instance default'.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) userId: str = Field(description="User ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) diff --git a/modules/features/workspace/mainWorkspace.py b/modules/features/workspace/mainWorkspace.py index c502a82e..5ef9b399 100644 --- a/modules/features/workspace/mainWorkspace.py +++ b/modules/features/workspace/mainWorkspace.py @@ -128,7 +128,7 @@ TEMPLATE_ROLES = [ "accessRules": [ {"context": "UI", "item": None, "view": True}, {"context": "RESOURCE", "item": None, "view": True}, - # DATA: never ALL in shared instances — every role (including admin) sees only _createdBy = self + # DATA: never ALL in shared instances — every role (including admin) sees only sysCreatedBy = self {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, ] }, diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 0a3e24ad..0fb48ffe 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -11,7 +11,7 @@ Multi-Tenant Design: """ import logging -from typing import Optional, Dict +from typing import Optional, Dict, Set, Tuple from passlib.context import CryptContext from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.shared.configuration import APP_CONFIG @@ -38,6 +38,120 @@ pwdContext = CryptContext(schemes=["argon2"], deprecated="auto") # Cache für Role-IDs (roleLabel -> roleId) _roleIdCache: Dict[str, str] = {} +# Historical PostgreSQL column identifiers (pre-sys*). Used only in _migrateSystemFieldColumns SQL. +_LEGACY_SYS_PAIR_RENAMES: Tuple[Tuple[str, str], ...] = ( + ("_createdAt", "sysCreatedAt"), + ("_createdBy", "sysCreatedBy"), + ("_modifiedAt", "sysModifiedAt"), + ("_modifiedBy", "sysModifiedBy"), +) + + +def _getPublicTableColumns(db: DatabaseConnector, tableName: str) -> Set[str]: + """Column names for a quoted PostgreSQL table (exact case in information_schema).""" + try: + with db.connection.cursor() as cursor: + cursor.execute( + """ + SELECT column_name FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = %s + """, + (tableName,), + ) + return {row["column_name"] for row in cursor.fetchall()} + except Exception as e: + logger.warning(f"_getPublicTableColumns failed for {tableName}: {e}") + return set() + + +def _migrateSystemFieldColumns(db: DatabaseConnector) -> None: + """Backfill sys* from older physical columns and business duplicates where sys* IS NULL (idempotent).""" + businessFieldMigrations: Dict[str, Dict[str, str]] = { + "FileFolder": {"createdAt": "sysCreatedAt"}, + "FileItem": {"creationDate": "sysCreatedAt"}, + "Invitation": {"createdAt": "sysCreatedAt", "createdBy": "sysCreatedBy"}, + "FeatureDataSource": {"createdAt": "sysCreatedAt"}, + "DataSource": {"createdAt": "sysCreatedAt"}, + "UserNotification": {"createdAt": "sysCreatedAt"}, + "Token": {"createdAt": "sysCreatedAt"}, + "MessagingSubscription": {"createdBy": "sysCreatedBy", "modifiedBy": "sysModifiedBy"}, + "CoachingContext": {"createdAt": "sysCreatedAt"}, + "CoachingSession": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, + "CoachingMessage": {"createdAt": "sysCreatedAt"}, + "CoachingTask": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, + "CoachingScore": {"createdAt": "sysCreatedAt"}, + "CoachingUserProfile": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, + "CoachingPersona": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, + "CoachingBadge": {"createdAt": "sysCreatedAt"}, + "TeamsbotSession": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, + "TeamsbotTranscript": {"creationDate": "sysCreatedAt"}, + "TeamsbotBotResponse": {"creationDate": "sysCreatedAt"}, + "TeamsbotSystemBot": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, + "TeamsbotUserAccount": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, + "TeamsbotUserSettings": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, + "_system": { + k: v + for k, v in _LEGACY_SYS_PAIR_RENAMES + if k in ("_createdAt", "_modifiedAt") + }, + } + + try: + db._ensure_connection() + with db.connection.cursor() as cursor: + cursor.execute( + """ + SELECT table_name FROM information_schema.tables + WHERE table_schema = 'public' AND table_type = 'BASE TABLE' + """ + ) + tableNames = [row["table_name"] for row in cursor.fetchall()] + + totalUpdates = 0 + for table in tableNames: + cols = _getPublicTableColumns(db, table) + if not cols: + continue + + for old_col, new_col in _LEGACY_SYS_PAIR_RENAMES: + if old_col in cols and new_col in cols: + try: + with db.connection.cursor() as cursor: + cursor.execute( + f'UPDATE "{table}" SET "{new_col}" = "{old_col}" ' + f'WHERE "{new_col}" IS NULL AND "{old_col}" IS NOT NULL' + ) + totalUpdates += cursor.rowcount + db.connection.commit() + except Exception as e: + db.connection.rollback() + logger.debug(f"Column migrate skip {table}.{old_col}->{new_col}: {e}") + + biz = businessFieldMigrations.get(table) + if biz: + for old_col, new_col in biz.items(): + if old_col in cols and new_col in cols: + try: + with db.connection.cursor() as cursor: + cursor.execute( + f'UPDATE "{table}" SET "{new_col}" = "{old_col}" ' + f'WHERE "{new_col}" IS NULL AND "{old_col}" IS NOT NULL' + ) + totalUpdates += cursor.rowcount + db.connection.commit() + except Exception as e: + db.connection.rollback() + logger.debug(f"Business field migrate skip {table}.{old_col}->{new_col}: {e}") + + if totalUpdates: + logger.info(f"_migrateSystemFieldColumns: backfilled {totalUpdates} cell(s) on {db.dbDatabase}") + except Exception as e: + logger.error(f"_migrateSystemFieldColumns failed: {e}") + try: + db.connection.rollback() + except Exception: + pass + def initBootstrap(db: DatabaseConnector) -> None: """ @@ -50,6 +164,9 @@ def initBootstrap(db: DatabaseConnector) -> None: # Initialize root mandate mandateId = initRootMandate(db) + + # Backfill sys* columns from legacy _* / duplicate business fields (idempotent) + _migrateSystemFieldColumns(db) # Migrate existing mandate records: description -> label _migrateMandateDescriptionToLabel(db) @@ -146,13 +263,13 @@ def initAutomationTemplates(dbApp: DatabaseConnector, adminUserId: Optional[str] """ Seed initial automation templates from subAutomationTemplates.py. Only runs if no templates exist yet (bootstrap). - Creates templates with _createdBy = admin user (SysAdmin privilege). + Creates templates with sysCreatedBy = admin user (SysAdmin privilege). NOTE: AutomationTemplate lives in poweron_automation database, not poweron_app! Args: dbApp: Database connector for poweron_app (used to get admin user if needed) - adminUserId: Admin user ID for _createdBy field + adminUserId: Admin user ID for sysCreatedBy field """ import json from modules.features.automation.subAutomationTemplates import AUTOMATION_TEMPLATES diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 183bedb6..2a6b0f78 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -187,12 +187,8 @@ class AppObjects: # Complex objects that should be filtered out objectFields[fieldName] = value else: - # Field not in model - treat as scalar if simple, otherwise filter out - # BUT: always include metadata fields (_createdBy, _createdAt, etc.) as they're handled by connector - if fieldName.startswith("_"): - # Metadata fields should be passed through to connector - simpleFields[fieldName] = value - elif isinstance(value, (str, int, float, bool, type(None))): + # Field not in model - pass through scalars; nested objects go to objectFields + if isinstance(value, (str, int, float, bool, type(None))): simpleFields[fieldName] = value else: objectFields[fieldName] = value @@ -528,7 +524,7 @@ class AppObjects: items = [] for record in result["items"]: - cleanedUser = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedUser = dict(record) if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] items.append(User(**cleanedUser)) @@ -560,7 +556,7 @@ class AppObjects: # Return first matching user (should be unique) userDict = users[0] # Filter out database-specific fields - cleanedUser = {k: v for k, v in userDict.items() if not k.startswith("_")} + cleanedUser = dict(userDict) # Ensure roleLabels is always a list, not None if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] @@ -586,7 +582,7 @@ class AppObjects: # User already filtered by RBAC, just clean fields user_dict = users[0] - cleanedUser = {k: v for k, v in user_dict.items() if not k.startswith("_")} + cleanedUser = dict(user_dict) # Ensure roleLabels is always a list, not None if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] @@ -648,12 +644,10 @@ class AppObjects: if not self._verifyPassword(password, userRecord["hashedPassword"]): raise ValueError("Invalid password") - # Return clean User object (without password hash and internal fields) - cleanedUser = {k: v for k, v in userRecord.items() if not k.startswith("_") and k != "hashedPassword" and k != "resetToken" and k != "resetTokenExpires"} - # Ensure roleLabels is always a list - if cleanedUser.get("roleLabels") is None: - cleanedUser["roleLabels"] = [] - return User(**cleanedUser) + user = User.model_validate(userRecord) + if user.roleLabels is None: + return user.model_copy(update={"roleLabels": []}) + return user def createUser( self, @@ -877,7 +871,7 @@ class AppObjects: result = [] for userRecord in users: - cleanedUser = {k: v for k, v in userRecord.items() if not k.startswith("_")} + cleanedUser = dict(userRecord) if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] result.append(User(**cleanedUser)) @@ -917,7 +911,7 @@ class AppObjects: ) if users: - cleanedUser = {k: v for k, v in users[0].items() if not k.startswith("_")} + cleanedUser = dict(users[0]) if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] return User(**cleanedUser) @@ -978,7 +972,7 @@ class AppObjects: ) if users: - cleanedUser = {k: v for k, v in users[0].items() if not k.startswith("_")} + cleanedUser = dict(users[0]) if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] return User(**cleanedUser) @@ -1041,7 +1035,7 @@ class AppObjects: logger.warning(f"Reset token expired for user {userRecord.get('id')}") return None - cleanedUser = {k: v for k, v in userRecord.items() if not k.startswith("_")} + cleanedUser = dict(userRecord) if cleanedUser.get("roleLabels") is None: cleanedUser["roleLabels"] = [] return User(**cleanedUser) @@ -1329,7 +1323,7 @@ class AppObjects: # Filter out database-specific fields filteredMandates = [] for mandate in allMandates: - cleanedMandate = {k: v for k, v in mandate.items() if not k.startswith("_")} + cleanedMandate = dict(mandate) filteredMandates.append(cleanedMandate) # If no pagination requested, return all items @@ -1378,7 +1372,7 @@ class AppObjects: # Filter out database-specific fields filteredMandates = [] for mandate in mandates: - cleanedMandate = {k: v for k, v in mandate.items() if not k.startswith("_")} + cleanedMandate = dict(mandate) filteredMandates.append(cleanedMandate) if not filteredMandates: return None @@ -1794,7 +1788,7 @@ class AppObjects: ) if not records: return None - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return UserMandate(**cleanedRecord) except Exception as e: logger.error(f"Error getting UserMandate: {e}") @@ -1817,7 +1811,7 @@ class AppObjects: ) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(UserMandate(**cleanedRecord)) return result except Exception as e: @@ -1869,7 +1863,7 @@ class AppObjects: self._ensureUserBillingAccount(userId, mandateId) self._syncSubscriptionQuantity(mandateId) - cleanedRecord = {k: v for k, v in createdRecord.items() if not k.startswith("_")} + cleanedRecord = dict(createdRecord) return UserMandate(**cleanedRecord) except Exception as e: logger.error(f"Error creating UserMandate: {e}") @@ -1999,7 +1993,7 @@ class AppObjects: ) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(UserMandate(**cleanedRecord)) return result except Exception as e: @@ -2023,7 +2017,7 @@ class AppObjects: ) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(UserMandateRole(**cleanedRecord)) return result except Exception as e: @@ -2120,7 +2114,7 @@ class AppObjects: recordFilter={"userMandateId": userMandateId, "roleId": roleId} ) if existing: - cleanedRecord = {k: v for k, v in existing[0].items() if not k.startswith("_")} + cleanedRecord = dict(existing[0]) return UserMandateRole(**cleanedRecord) userMandateRole = UserMandateRole( @@ -2128,7 +2122,7 @@ class AppObjects: roleId=roleId ) createdRecord = self.db.recordCreate(UserMandateRole, userMandateRole.model_dump()) - cleanedRecord = {k: v for k, v in createdRecord.items() if not k.startswith("_")} + cleanedRecord = dict(createdRecord) return UserMandateRole(**cleanedRecord) except Exception as e: logger.error(f"Error adding role to UserMandate: {e}") @@ -2193,7 +2187,7 @@ class AppObjects: ) if not records: return None - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return FeatureAccess(**cleanedRecord) except Exception as e: logger.error(f"Error getting FeatureAccess: {e}") @@ -2216,7 +2210,7 @@ class AppObjects: ) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(FeatureAccess(**cleanedRecord)) return result except Exception as e: @@ -2240,7 +2234,7 @@ class AppObjects: ) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(FeatureAccess(**cleanedRecord)) return result except Exception as e: @@ -2289,7 +2283,7 @@ class AppObjects: ) self.db.recordCreate(FeatureAccessRole, featureAccessRole.model_dump()) - cleanedRecord = {k: v for k, v in createdRecord.items() if not k.startswith("_")} + cleanedRecord = dict(createdRecord) return FeatureAccess(**cleanedRecord) except Exception as e: logger.error(f"Error creating FeatureAccess: {e}") @@ -2427,7 +2421,7 @@ class AppObjects: try: records = self.db.getRecordset(Invitation, recordFilter={"id": invitationId}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return Invitation(**cleanedRecord) return None except Exception as e: @@ -2447,7 +2441,7 @@ class AppObjects: try: records = self.db.getRecordset(Invitation, recordFilter={"token": token}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return Invitation(**cleanedRecord) return None except Exception as e: @@ -2468,7 +2462,7 @@ class AppObjects: records = self.db.getRecordset(Invitation, recordFilter={"mandateId": mandateId}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Invitation(**cleanedRecord)) return result except Exception as e: @@ -2486,10 +2480,10 @@ class AppObjects: List of Invitation objects """ try: - records = self.db.getRecordset(Invitation, recordFilter={"createdBy": creatorId}) + records = self.db.getRecordset(Invitation, recordFilter={"sysCreatedBy": creatorId}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Invitation(**cleanedRecord)) return result except Exception as e: @@ -2510,7 +2504,7 @@ class AppObjects: records = self.db.getRecordset(Invitation, recordFilter={"usedBy": usedById}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Invitation(**cleanedRecord)) return result except Exception as e: @@ -2531,7 +2525,7 @@ class AppObjects: records = self.db.getRecordset(Invitation, recordFilter={"targetUsername": targetUsername}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Invitation(**cleanedRecord)) return result except Exception as e: @@ -2558,13 +2552,10 @@ class AppObjects: items = [] for record in result["items"]: - cleanedRecord = { - k: v for k, v in record.items() - if not k.startswith("_") and k not in ["hashedPassword", "resetToken", "resetTokenExpires"] - } - if cleanedRecord.get("roleLabels") is None: - cleanedRecord["roleLabels"] = [] - items.append(User(**cleanedRecord)) + user = User.model_validate(record) + if user.roleLabels is None: + user = user.model_copy(update={"roleLabels": []}) + items.append(user) if pagination is None: return items @@ -2593,7 +2584,7 @@ class AppObjects: try: records = self.db.getRecordset(UserMandate, recordFilter={"id": userMandateId}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return UserMandate(**cleanedRecord) return None except Exception as e: @@ -2614,7 +2605,7 @@ class AppObjects: records = self.db.getRecordset(UserMandateRole, recordFilter={"roleId": roleId}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(UserMandateRole(**cleanedRecord)) return result except Exception as e: @@ -2634,7 +2625,7 @@ class AppObjects: try: records = self.db.getRecordset(FeatureInstance, recordFilter={"id": instanceId}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return FeatureInstance(**cleanedRecord) return None except Exception as e: @@ -2654,7 +2645,7 @@ class AppObjects: try: records = self.db.getRecordset(Feature, recordFilter={"code": featureCode}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return Feature(**cleanedRecord) return None except Exception as e: @@ -2679,7 +2670,7 @@ class AppObjects: records = self.db.getRecordset(FeatureInstance, recordFilter=recordFilter) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(FeatureInstance(**cleanedRecord)) return result except Exception as e: @@ -2703,7 +2694,7 @@ class AppObjects: try: records = self.db.getRecordset(UserNotification, recordFilter={"id": notificationId}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return UserNotification(**cleanedRecord) return None except Exception as e: @@ -2734,10 +2725,10 @@ class AppObjects: records = self.db.getRecordset(UserNotification, recordFilter=recordFilter) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(UserNotification(**cleanedRecord)) - # Sort by createdAt descending - result.sort(key=lambda x: x.createdAt or 0, reverse=True) + # Sort by sysCreatedAt descending + result.sort(key=lambda x: x.sysCreatedAt or 0, reverse=True) if limit: result = result[:limit] return result @@ -2762,7 +2753,7 @@ class AppObjects: try: records = self.db.getRecordset(AccessRule, recordFilter={"id": ruleId}) if records: - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return AccessRule(**cleanedRecord) return None except Exception as e: @@ -2783,7 +2774,7 @@ class AppObjects: records = self.db.getRecordset(AccessRule, recordFilter={"roleId": roleId}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(AccessRule(**cleanedRecord)) return result except Exception as e: @@ -2804,7 +2795,7 @@ class AppObjects: records = self.db.getRecordset(Role, recordFilter={"featureInstanceId": featureInstanceId}) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Role(**cleanedRecord)) return result except Exception as e: @@ -2829,7 +2820,7 @@ class AppObjects: records = self.db.getRecordset(Role, recordFilter=recordFilter) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Role(**cleanedRecord)) return result except Exception as e: @@ -3028,7 +3019,7 @@ class AppObjects: ) result = [] for token_dict in tokens: - cleanedRecord = {k: v for k, v in token_dict.items() if not k.startswith("_")} + cleanedRecord = dict(token_dict) result.append(Token(**cleanedRecord)) return result except Exception as e: @@ -3049,7 +3040,7 @@ class AppObjects: ) result = [] for token_dict in tokens: - cleanedRecord = {k: v for k, v in token_dict.items() if not k.startswith("_")} + cleanedRecord = dict(token_dict) result.append(Token(**cleanedRecord)) return result except Exception as e: @@ -3363,7 +3354,7 @@ class AppObjects: # Filter out database-specific fields filteredRules = [] for rule in rules: - cleanedRule = {k: v for k, v in rule.items() if not k.startswith("_")} + cleanedRule = dict(rule) filteredRules.append(cleanedRule) # If no pagination requested, return all items @@ -3547,7 +3538,7 @@ class AppObjects: Role, recordFilter={"mandateId": mandateId, "featureInstanceId": None} ) - return [Role(**{k: v for k, v in r.items() if not k.startswith("_")}) for r in roles] + return [Role(**dict(r)) for r in roles] except Exception as e: logger.error(f"Error getting roles for mandate {mandateId}: {e}") return [] @@ -3568,7 +3559,7 @@ class AppObjects: items = [] for record in result["items"]: - cleanedRole = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRole = dict(record) items.append(Role(**cleanedRole)) if pagination is None: diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index 2db71bb4..343e2215 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -674,7 +674,7 @@ class BillingObjects: if startDate or endDate: filtered = [] for t in results: - createdAt = t.get("_createdAt") + createdAt = t.get("sysCreatedAt") if createdAt: tDate = createdAt.date() if isinstance(createdAt, datetime) else createdAt if startDate and tDate < startDate: @@ -684,7 +684,7 @@ class BillingObjects: filtered.append(t) results = filtered - results.sort(key=lambda x: x.get("_createdAt", ""), reverse=True) + results.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) return results[offset:offset + limit] except Exception as e: @@ -739,7 +739,7 @@ class BillingObjects: transactions = self.getTransactions(account["id"], limit=limit) allTransactions.extend(transactions) - allTransactions.sort(key=lambda x: x.get("_createdAt", ""), reverse=True) + allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) return allTransactions[:limit] # ========================================================================= @@ -1244,7 +1244,7 @@ class BillingObjects: except Exception as e: logger.error(f"Error getting transactions for user: {e}") - allTransactions.sort(key=lambda x: x.get("_createdAt", ""), reverse=True) + allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) return allTransactions[:limit] # ========================================================================= @@ -1361,7 +1361,7 @@ class BillingObjects: logger.error(f"Error getting mandate transactions: {e}") # Sort by creation date descending and limit - allTransactions.sort(key=lambda x: x.get("_createdAt", ""), reverse=True) + allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) return allTransactions[:limit] # ========================================================================= @@ -1549,5 +1549,5 @@ class BillingObjects: logger.error(f"Error getting user transactions for mandates: {e}") # Sort by creation date descending and limit - allTransactions.sort(key=lambda x: x.get("_createdAt", ""), reverse=True) + allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) return allTransactions[:limit] diff --git a/modules/interfaces/interfaceDbChat.py b/modules/interfaces/interfaceDbChat.py index b0d4aff3..192cbad4 100644 --- a/modules/interfaces/interfaceDbChat.py +++ b/modules/interfaces/interfaceDbChat.py @@ -251,9 +251,8 @@ class ChatObjects: objectFields[fieldName] = value else: # Field not in model - treat as scalar if simple, otherwise filter out - # BUT: always include metadata fields (_createdBy, _createdAt, etc.) as they're handled by connector + # Underscore-prefixed keys (e.g. UI meta) pass through; sys* live on PowerOnModel subclasses if fieldName.startswith("_"): - # Metadata fields should be passed through to connector simpleFields[fieldName] = value elif isinstance(value, (str, int, float, bool, type(None))): simpleFields[fieldName] = value @@ -885,7 +884,7 @@ class ChatObjects: "role": msg.get("role", "assistant"), "status": msg.get("status", "step"), "sequenceNr": msg.get("sequenceNr", 0), - "publishedAt": msg.get("publishedAt") or msg.get("_createdAt") or msg.get("timestamp") or 0, + "publishedAt": msg.get("publishedAt") or msg.get("sysCreatedAt") or msg.get("timestamp") or 0, "success": msg.get("success"), "actionId": msg.get("actionId"), "actionMethod": msg.get("actionMethod"), @@ -1268,7 +1267,7 @@ class ChatObjects: # CASCADE DELETE: Delete all related data first # 1. Delete message documents (but NOT the files themselves) - # Bypass RBAC -- workflow access already verified, child records may have different _createdBy + # Bypass RBAC -- workflow access already verified, child records may have different sysCreatedBy existing_docs = self.db.getRecordset(ChatDocument, recordFilter={"messageId": messageId}) for doc in existing_docs: self.db.recordDelete(ChatDocument, doc["id"]) @@ -1296,7 +1295,7 @@ class ChatObjects: # Get documents for this message from normalized table - # Bypass RBAC -- workflow access already verified, child records may have different _createdBy + # Bypass RBAC -- workflow access already verified, child records may have different sysCreatedBy documents = self.db.getRecordset(ChatDocument, recordFilter={"messageId": messageId}) if not documents: diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py index 58fd6926..28842958 100644 --- a/modules/interfaces/interfaceDbManagement.py +++ b/modules/interfaces/interfaceDbManagement.py @@ -175,12 +175,7 @@ class ComponentObjects: # Complex objects that should be filtered out objectFields[fieldName] = value else: - # Field not in model - treat as scalar if simple, otherwise filter out - # BUT: always include metadata fields (_createdBy, _createdAt, etc.) as they're handled by connector - if fieldName.startswith("_"): - # Metadata fields should be passed through to connector - simpleFields[fieldName] = value - elif isinstance(value, (str, int, float, bool, type(None))): + if isinstance(value, (str, int, float, bool, type(None))): simpleFields[fieldName] = value else: objectFields[fieldName] = value @@ -609,7 +604,7 @@ class ComponentObjects: """ isSysAdmin = self._isSysAdmin() for prompt in prompts: - isOwner = prompt.get("_createdBy") == self.userId + isOwner = prompt.get("sysCreatedBy") == self.userId prompt["_permissions"] = { "canUpdate": isOwner or isSysAdmin, "canDelete": isOwner or isSysAdmin @@ -621,13 +616,13 @@ class ComponentObjects: Visibility rules: - SysAdmin: ALL prompts - - Regular user: own prompts (_createdBy) + system prompts (isSystem=True) + - Regular user: own prompts (sysCreatedBy) + system prompts (isSystem=True) """ if self._isSysAdmin(): return self.db.getRecordset(Prompt) # Get own prompts - ownPrompts = self.db.getRecordset(Prompt, recordFilter={"_createdBy": self.userId}) + ownPrompts = self.db.getRecordset(Prompt, recordFilter={"sysCreatedBy": self.userId}) # Get system prompts systemPrompts = self.db.getRecordset(Prompt, recordFilter={"isSystem": True}) @@ -716,7 +711,7 @@ class ComponentObjects: # Visibility check for non-SysAdmin: must be owner or system prompt if not self._isSysAdmin(): - isOwner = prompt.get("_createdBy") == self.userId + isOwner = prompt.get("sysCreatedBy") == self.userId isSystem = prompt.get("isSystem", False) if not isOwner and not isSystem: return None @@ -747,7 +742,7 @@ class ComponentObjects: raise ValueError(f"Prompt {promptId} not found") # Permission check: owner or SysAdmin - isOwner = (getattr(prompt, '_createdBy', None) == self.userId) + isOwner = (getattr(prompt, 'sysCreatedBy', None) == self.userId) if not self._isSysAdmin() and not isOwner: raise PermissionError(f"No permission to update prompt {promptId}") @@ -784,7 +779,7 @@ class ComponentObjects: return False # Permission check: owner or SysAdmin - isOwner = (getattr(prompt, '_createdBy', None) == self.userId) + isOwner = (getattr(prompt, 'sysCreatedBy', None) == self.userId) if not self._isSysAdmin() and not isOwner: raise PermissionError(f"No permission to delete prompt {promptId}") @@ -798,7 +793,7 @@ class ComponentObjects: def checkForDuplicateFile(self, fileHash: str, fileName: str) -> Optional[FileItem]: """Checks if a file with the same hash AND fileName already exists for the current user. - Duplicate = same user (_createdBy) + same fileHash + same fileName. + Duplicate = same user (sysCreatedBy) + same fileHash + same fileName. Same hash with different name is allowed (intentional copy by user). Uses direct DB query (not RBAC) because files are isolated per user. """ @@ -809,7 +804,7 @@ class ComponentObjects: matchingFiles = self.db.getRecordset( FileItem, recordFilter={ - "_createdBy": self.userId, + "sysCreatedBy": self.userId, "fileHash": fileHash, "fileName": fileName } @@ -908,7 +903,7 @@ class ComponentObjects: def _getFilesByCurrentUser(self, recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]: """Files are always user-scoped. Returns only files owned by the current user, regardless of role (including SysAdmin). This bypasses RBAC intentionally.""" - filterDict = {"_createdBy": self.userId} + filterDict = {"sysCreatedBy": self.userId} if recordFilter: filterDict.update(recordFilter) return self.db.getRecordset(FileItem, recordFilter=filterDict) @@ -927,7 +922,7 @@ class ComponentObjects: If pagination is provided: PaginatedResult with items and metadata """ # User-scoping filter: every user only sees their own files (bypasses RBAC SysAdmin override) - recordFilter = {"_createdBy": self.userId} + recordFilter = {"sysCreatedBy": self.userId} def _convertFileItems(files): fileItems = [] @@ -974,7 +969,7 @@ class ComponentObjects: def getFile(self, fileId: str) -> Optional[FileItem]: """Returns a file by ID if it belongs to the current user (user-scoped).""" - # Files are always user-scoped: filter by _createdBy (bypasses RBAC SysAdmin override) + # Files are always user-scoped: filter by sysCreatedBy (bypasses RBAC SysAdmin override) filteredFiles = self._getFilesByCurrentUser(recordFilter={"id": fileId}) if not filteredFiles: @@ -1151,7 +1146,7 @@ class ComponentObjects: self.db._ensure_connection() with self.db.connection.cursor() as cursor: cursor.execute( - 'SELECT "id" FROM "FileItem" WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'SELECT "id" FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (uniqueIds, self.userId or ""), ) accessibleIds = [row["id"] for row in cursor.fetchall()] @@ -1162,7 +1157,7 @@ class ComponentObjects: cursor.execute('DELETE FROM "FileData" WHERE "id" = ANY(%s)', (accessibleIds,)) cursor.execute( - 'DELETE FROM "FileItem" WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'DELETE FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (accessibleIds, self.userId or ""), ) deletedFiles = cursor.rowcount @@ -1207,12 +1202,12 @@ class ComponentObjects: def getFolder(self, folderId: str) -> Optional[Dict[str, Any]]: """Returns a folder by ID if it belongs to the current user.""" - folders = self.db.getRecordset(FileFolder, recordFilter={"id": folderId, "_createdBy": self.userId or ""}) + folders = self.db.getRecordset(FileFolder, recordFilter={"id": folderId, "sysCreatedBy": self.userId or ""}) return folders[0] if folders else None def listFolders(self, parentId: Optional[str] = None) -> List[Dict[str, Any]]: """List folders for current user, optionally filtered by parentId.""" - recordFilter = {"_createdBy": self.userId or ""} + recordFilter = {"sysCreatedBy": self.userId or ""} if parentId is not None: recordFilter["parentId"] = parentId return self.db.getRecordset(FileFolder, recordFilter=recordFilter) @@ -1261,7 +1256,7 @@ class ComponentObjects: self.db._ensure_connection() with self.db.connection.cursor() as cursor: cursor.execute( - 'SELECT "id" FROM "FileItem" WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'SELECT "id" FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (uniqueIds, self.userId or ""), ) accessibleIds = [row["id"] for row in cursor.fetchall()] @@ -1270,8 +1265,8 @@ class ComponentObjects: raise FileNotFoundError(f"Files not found or not accessible: {missingIds}") cursor.execute( - 'UPDATE "FileItem" SET "folderId" = %s, "_modifiedAt" = %s, "_modifiedBy" = %s ' - 'WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'UPDATE "FileItem" SET "folderId" = %s, "sysModifiedAt" = %s, "sysModifiedBy" = %s ' + 'WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (targetFolderId, getUtcTimestamp(), self.userId or "", accessibleIds, self.userId or ""), ) movedFiles = cursor.rowcount @@ -1300,7 +1295,7 @@ class ComponentObjects: existingInTarget = self.db.getRecordset( FileFolder, - recordFilter={"parentId": targetParentId or "", "_createdBy": self.userId or ""}, + recordFilter={"parentId": targetParentId or "", "sysCreatedBy": self.userId or ""}, ) existingNames = {f.get("name"): f.get("id") for f in existingInTarget} movingNames: Dict[str, str] = {} @@ -1321,8 +1316,8 @@ class ComponentObjects: self.db._ensure_connection() with self.db.connection.cursor() as cursor: cursor.execute( - 'UPDATE "FileFolder" SET "parentId" = %s, "_modifiedAt" = %s, "_modifiedBy" = %s ' - 'WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'UPDATE "FileFolder" SET "parentId" = %s, "sysModifiedAt" = %s, "sysModifiedBy" = %s ' + 'WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (targetParentId, getUtcTimestamp(), self.userId or "", uniqueIds, self.userId or ""), ) movedFolders = cursor.rowcount @@ -1340,7 +1335,7 @@ class ComponentObjects: if not folder: raise FileNotFoundError(f"Folder {folderId} not found") - childFolders = self.db.getRecordset(FileFolder, recordFilter={"parentId": folderId, "_createdBy": self.userId or ""}) + childFolders = self.db.getRecordset(FileFolder, recordFilter={"parentId": folderId, "sysCreatedBy": self.userId or ""}) childFiles = self._getFilesByCurrentUser(recordFilter={"folderId": folderId}) if not recursive and (childFolders or childFiles): @@ -1389,7 +1384,7 @@ class ComponentObjects: self.db._ensure_connection() with self.db.connection.cursor() as cursor: cursor.execute( - 'SELECT "id" FROM "FileFolder" WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'SELECT "id" FROM "FileFolder" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (uniqueIds, self.userId or ""), ) rootAccessibleIds = [row["id"] for row in cursor.fetchall()] @@ -1402,12 +1397,12 @@ class ComponentObjects: WITH RECURSIVE folder_tree AS ( SELECT "id" FROM "FileFolder" - WHERE "id" = ANY(%s) AND "_createdBy" = %s + WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s UNION ALL SELECT child."id" FROM "FileFolder" child INNER JOIN folder_tree ft ON child."parentId" = ft."id" - WHERE child."_createdBy" = %s + WHERE child."sysCreatedBy" = %s ) SELECT DISTINCT "id" FROM folder_tree """, @@ -1416,7 +1411,7 @@ class ComponentObjects: allFolderIds = [row["id"] for row in cursor.fetchall()] cursor.execute( - 'SELECT "id" FROM "FileItem" WHERE "folderId" = ANY(%s) AND "_createdBy" = %s', + 'SELECT "id" FROM "FileItem" WHERE "folderId" = ANY(%s) AND "sysCreatedBy" = %s', (allFolderIds, self.userId or ""), ) allFileIds = [row["id"] for row in cursor.fetchall()] @@ -1424,7 +1419,7 @@ class ComponentObjects: if allFileIds: cursor.execute('DELETE FROM "FileData" WHERE "id" = ANY(%s)', (allFileIds,)) cursor.execute( - 'DELETE FROM "FileItem" WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'DELETE FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (allFileIds, self.userId or ""), ) deletedFiles = cursor.rowcount @@ -1432,7 +1427,7 @@ class ComponentObjects: deletedFiles = 0 cursor.execute( - 'DELETE FROM "FileFolder" WHERE "id" = ANY(%s) AND "_createdBy" = %s', + 'DELETE FROM "FileFolder" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s', (allFolderIds, self.userId or ""), ) deletedFolders = cursor.rowcount diff --git a/modules/interfaces/interfaceFeatures.py b/modules/interfaces/interfaceFeatures.py index 56311f01..6616218d 100644 --- a/modules/interfaces/interfaceFeatures.py +++ b/modules/interfaces/interfaceFeatures.py @@ -57,7 +57,7 @@ class FeatureInterface: records = self.db.getRecordset(Feature, recordFilter={"code": featureCode}) if not records: return None - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return Feature(**cleanedRecord) except Exception as e: logger.error(f"Error getting feature {featureCode}: {e}") @@ -74,7 +74,7 @@ class FeatureInterface: records = self.db.getRecordset(Feature) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Feature(**cleanedRecord)) return result except Exception as e: @@ -120,7 +120,7 @@ class FeatureInterface: records = self.db.getRecordset(FeatureInstance, recordFilter={"id": instanceId}) if not records: return None - cleanedRecord = {k: v for k, v in records[0].items() if not k.startswith("_")} + cleanedRecord = dict(records[0]) return FeatureInstance(**cleanedRecord) except Exception as e: logger.error(f"Error getting feature instance {instanceId}: {e}") @@ -144,7 +144,7 @@ class FeatureInterface: records = self.db.getRecordset(FeatureInstance, recordFilter=recordFilter) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(FeatureInstance(**cleanedRecord)) return result except Exception as e: @@ -199,7 +199,7 @@ class FeatureInterface: if copyTemplateRoles: self._copyTemplateRoles(featureCode, mandateId, instanceId) - cleanedRecord = {k: v for k, v in createdInstance.items() if not k.startswith("_")} + cleanedRecord = dict(createdInstance) return FeatureInstance(**cleanedRecord) except Exception as e: @@ -435,7 +435,7 @@ class FeatureInterface: updated = self.db.recordModify(FeatureInstance, instanceId, filteredData) if updated: - cleanedRecord = {k: v for k, v in updated.items() if not k.startswith("_")} + cleanedRecord = dict(updated) return FeatureInstance(**cleanedRecord) return None except Exception as e: @@ -484,7 +484,7 @@ class FeatureInterface: records = self.db.getRecordset(Role, recordFilter=recordFilter) result = [] for record in records: - cleanedRecord = {k: v for k, v in record.items() if not k.startswith("_")} + cleanedRecord = dict(record) result.append(Role(**cleanedRecord)) return result except Exception as e: diff --git a/modules/interfaces/interfaceRbac.py b/modules/interfaces/interfaceRbac.py index e65cd5ab..947a6e2d 100644 --- a/modules/interfaces/interfaceRbac.py +++ b/modules/interfaces/interfaceRbac.py @@ -17,7 +17,7 @@ Data Namespace Structure: GROUP-Berechtigung: - data.uam.*: GROUP filtert nach Mandant (via UserMandate) -- data.chat.*, data.files.*, data.automation.*: GROUP = MY (benutzer-eigen); bei gesetztem featureInstanceId zusätzlich _createdBy +- data.chat.*, data.files.*, data.automation.*: GROUP = MY (benutzer-eigen); bei gesetztem featureInstanceId zusätzlich sysCreatedBy - data.feature.*: GROUP filtert nach mandateId/featureInstanceId """ @@ -146,7 +146,7 @@ def getRecordsetWithRBAC( mandateId: Explicit mandate context (from request header). Required for GROUP access. featureInstanceId: Explicit feature instance context enrichPermissions: If True, adds _permissions field to each record with row-level - permissions { canUpdate, canDelete } based on RBAC rules and _createdBy + permissions { canUpdate, canDelete } based on RBAC rules and sysCreatedBy featureCode: Optional feature code for feature-specific tables (e.g., "trustee"). If None, table is treated as a system table. @@ -657,7 +657,7 @@ def buildRbacWhereClause( # shared featureInstance (stale RBAC rules or merged roles). Same as MY. namespaceAll = TABLE_NAMESPACE.get(table, "system") if featureInstanceId and namespaceAll == "chat": - userIdFieldAll = "_createdBy" + userIdFieldAll = "sysCreatedBy" if table == "UserInDB": userIdFieldAll = "id" elif table == "UserConnection": @@ -671,7 +671,7 @@ def buildRbacWhereClause( return {"condition": " AND ".join(baseConditions), "values": baseValues} return None - # My records - filter by _createdBy or userId field + # My records - filter by sysCreatedBy or userId field if readLevel == AccessLevel.MY: # Try common field names for creator userIdField = None @@ -680,7 +680,7 @@ def buildRbacWhereClause( elif table == "UserConnection": userIdField = "userId" else: - userIdField = "_createdBy" + userIdField = "sysCreatedBy" conditions = list(baseConditions) values = list(baseValues) @@ -707,7 +707,7 @@ def buildRbacWhereClause( if featureInstanceId and readLevel == AccessLevel.GROUP: conditions = list(baseConditions) values = list(baseValues) - conditions.append('"_createdBy" = %s') + conditions.append('"sysCreatedBy" = %s') values.append(currentUser.id) return {"condition": " AND ".join(conditions), "values": values} return {"condition": " AND ".join(baseConditions), "values": baseValues} @@ -829,7 +829,7 @@ def _enrichRecordsWithPermissions( Logic: - AccessLevel.ALL ('a'): User can update/delete all records - - AccessLevel.MY ('m'): User can only update/delete records where _createdBy == userId + - AccessLevel.MY ('m'): User can only update/delete records where sysCreatedBy == userId - AccessLevel.GROUP ('g'): Same as MY for now (group-level ownership) - AccessLevel.NONE ('n'): User cannot update/delete any records @@ -846,7 +846,7 @@ def _enrichRecordsWithPermissions( for record in records: recordCopy = dict(record) - createdBy = record.get("_createdBy") + createdBy = record.get("sysCreatedBy") # Determine canUpdate canUpdate = _checkRowPermission(permissions.update, userId, createdBy) @@ -873,7 +873,7 @@ def _checkRowPermission( Args: accessLevel: The permission level (ALL, MY, GROUP, NONE) userId: Current user's ID - recordCreatedBy: The _createdBy value of the record + recordCreatedBy: The sysCreatedBy value of the record Returns: True if user has permission, False otherwise @@ -884,9 +884,9 @@ def _checkRowPermission( if accessLevel == AccessLevel.ALL: return True - # MY and GROUP: Check ownership via _createdBy + # MY and GROUP: Check ownership via sysCreatedBy if accessLevel in (AccessLevel.MY, AccessLevel.GROUP): - # If record has no _createdBy, allow access (can't verify ownership) + # If record has no sysCreatedBy, allow access (can't verify ownership) if not recordCreatedBy: return True # If no userId, can't verify - deny diff --git a/modules/migration/migrateRootUsers.py b/modules/migration/migrateRootUsers.py index a048e614..69d1b7af 100644 --- a/modules/migration/migrateRootUsers.py +++ b/modules/migration/migrateRootUsers.py @@ -80,7 +80,7 @@ def _migrateDataRecords(db, oldInstanceId: str, newInstanceId: str, userId: str) cursor.execute( f'UPDATE "{tableName}" ' f'SET "featureInstanceId" = %s ' - f'WHERE "featureInstanceId" = %s AND "_createdBy" = %s', + f'WHERE "featureInstanceId" = %s AND "sysCreatedBy" = %s', (newInstanceId, oldInstanceId, userId), ) count = cursor.rowcount diff --git a/modules/routes/routeAdminAutomationEvents.py b/modules/routes/routeAdminAutomationEvents.py index 47d3ac9c..553c66d3 100644 --- a/modules/routes/routeAdminAutomationEvents.py +++ b/modules/routes/routeAdminAutomationEvents.py @@ -112,12 +112,12 @@ def _buildEnrichedAutomationEvents(currentUser: User) -> List[Dict[str, Any]]: if automation: if isinstance(automation, dict): job["name"] = automation.get("label", "") - job["createdBy"] = _resolveUsername(automation.get("_createdBy", "")) + job["createdBy"] = _resolveUsername(automation.get("sysCreatedBy", "")) job["mandate"] = _resolveMandateLabel(automation.get("mandateId", "")) job["featureInstance"] = _resolveFeatureLabel(automation.get("featureInstanceId", "")) else: job["name"] = getattr(automation, "label", "") - job["createdBy"] = _resolveUsername(getattr(automation, "_createdBy", "")) + job["createdBy"] = _resolveUsername(getattr(automation, "sysCreatedBy", "")) job["mandate"] = _resolveMandateLabel(getattr(automation, "mandateId", "")) job["featureInstance"] = _resolveFeatureLabel(getattr(automation, "featureInstanceId", "")) else: diff --git a/modules/routes/routeAdminAutomationLogs.py b/modules/routes/routeAdminAutomationLogs.py index 8b4d897b..479d0df3 100644 --- a/modules/routes/routeAdminAutomationLogs.py +++ b/modules/routes/routeAdminAutomationLogs.py @@ -91,14 +91,14 @@ def _buildFlattenedExecutionLogs(currentUser: User) -> List[Dict[str, Any]]: automationLabel = automation.get("label", "") mandateId = automation.get("mandateId", "") featureInstanceId = automation.get("featureInstanceId", "") - createdBy = automation.get("_createdBy", "") + createdBy = automation.get("sysCreatedBy", "") logs = automation.get("executionLogs") or [] else: automationId = getattr(automation, "id", "") automationLabel = getattr(automation, "label", "") mandateId = getattr(automation, "mandateId", "") featureInstanceId = getattr(automation, "featureInstanceId", "") - createdBy = getattr(automation, "_createdBy", "") + createdBy = getattr(automation, "sysCreatedBy", "") logs = getattr(automation, "executionLogs", None) or [] mandateName = _resolveMandateLabel(mandateId) diff --git a/modules/routes/routeAdminRbacRules.py b/modules/routes/routeAdminRbacRules.py index 3778d227..16336fae 100644 --- a/modules/routes/routeAdminRbacRules.py +++ b/modules/routes/routeAdminRbacRules.py @@ -1477,7 +1477,7 @@ def cleanup_duplicate_access_rules( for sig, rules in rulesBySignature.items(): if len(rules) > 1: # Sort by creation time (keep oldest) - rules.sort(key=lambda r: r.get("_createdAt", 0)) + rules.sort(key=lambda r: r.get("sysCreatedAt", 0)) keepRule = rules[0] deleteRules = rules[1:] diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 04412752..88ec0cc6 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -564,7 +564,7 @@ def getTransactions( aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("_createdAt"), + createdAt=t.get("sysCreatedAt"), mandateId=t.get("mandateId"), mandateName=t.get("mandateName") )) @@ -1421,7 +1421,7 @@ def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]: aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("_createdAt") + createdAt=t.get("sysCreatedAt") ) result.append(row.model_dump()) @@ -1465,7 +1465,7 @@ def _buildTransactionsList(ctx: RequestContext, targetMandateId: str) -> List[Di aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("_createdAt") + createdAt=t.get("sysCreatedAt") ) result.append(row.model_dump()) @@ -1641,7 +1641,7 @@ def getMandateViewTransactions( aicoreProvider=t.get("aicoreProvider"), aicoreModel=t.get("aicoreModel"), createdByUserId=t.get("createdByUserId"), - createdAt=t.get("_createdAt"), + createdAt=t.get("sysCreatedAt"), mandateId=t.get("mandateId"), mandateName=t.get("mandateName") )) @@ -1796,7 +1796,7 @@ def getUserViewStatistics( skippedNotDebit = 0 for t in allTransactions: - createdAt = t.get("_createdAt") + createdAt = t.get("sysCreatedAt") if not createdAt: skippedNoDate += 1 continue @@ -1972,7 +1972,7 @@ def getUserViewTransactions( "aicoreProvider": t.get("aicoreProvider"), "aicoreModel": t.get("aicoreModel"), "createdByUserId": t.get("createdByUserId"), - "createdAt": t.get("_createdAt"), + "createdAt": t.get("sysCreatedAt"), "mandateId": t.get("mandateId"), "mandateName": t.get("mandateName"), "userId": t.get("userId"), @@ -2069,7 +2069,7 @@ def getUserViewTransactionsFilterValues( "aicoreProvider": t.get("aicoreProvider"), "aicoreModel": t.get("aicoreModel"), "createdByUserId": t.get("createdByUserId"), - "createdAt": t.get("_createdAt"), + "createdAt": t.get("sysCreatedAt"), "mandateId": t.get("mandateId"), "mandateName": t.get("mandateName"), "userId": t.get("userId"), diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index 5f71bb47..e95da174 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -266,7 +266,7 @@ def get_file_filter_values( pass try: - recordFilter = {"_createdBy": managementInterface.userId} + recordFilter = {"sysCreatedBy": managementInterface.userId} values = managementInterface.db.getDistinctColumnValues( FileItem, column, crossFilterPagination, recordFilter ) diff --git a/modules/security/rbac.py b/modules/security/rbac.py index f1d83252..9199e73b 100644 --- a/modules/security/rbac.py +++ b/modules/security/rbac.py @@ -261,7 +261,7 @@ class RbacClass: # No mandate context: load roles from ALL user's mandates. # Required for user-owned namespaces (files, chat, automation) that # are accessed without mandate context (e.g., /api/files/ endpoints). - # Data isolation is still enforced by _createdBy WHERE clause. + # Data isolation is still enforced by sysCreatedBy WHERE clause. allUserMandates = self.dbApp.getRecordset( UserMandate, recordFilter={"userId": user.id, "enabled": True} diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index f9e72ea6..539d3672 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -441,13 +441,13 @@ def _buildWorkflowHintItems( import time as _time now = _time.time() - others.sort(key=lambda w: w.get("_createdAt") or w.get("startedAt") or 0, reverse=True) + others.sort(key=lambda w: w.get("sysCreatedAt") or w.get("startedAt") or 0, reverse=True) others = others[:10] items = [] for wf in others: name = wf.get("name") or "(unnamed)" - createdAt = wf.get("_createdAt") or wf.get("startedAt") or 0 + createdAt = wf.get("sysCreatedAt") or wf.get("startedAt") or 0 ageSec = now - createdAt if createdAt else 0 if ageSec < 3600: ageStr = f"{int(ageSec / 60)}m ago" @@ -3188,7 +3188,7 @@ def _registerCoreTools(registry: ToolRegistry, services): allWorkflows = chatInterface.getWorkflows() or [] allWorkflows.sort( - key=lambda w: w.get("_createdAt") or w.get("startedAt") or 0, + key=lambda w: w.get("sysCreatedAt") or w.get("startedAt") or 0, reverse=True, ) allWorkflows = allWorkflows[:50] @@ -3197,7 +3197,7 @@ def _registerCoreTools(registry: ToolRegistry, services): for wf in allWorkflows: wfId = wf.get("id", "") name = wf.get("name") or "(unnamed)" - createdAt = wf.get("_createdAt") or wf.get("startedAt") or 0 + createdAt = wf.get("sysCreatedAt") or wf.get("startedAt") or 0 lastActivity = wf.get("lastActivity") or createdAt msgs = chatInterface.getMessages(wfId) or [] @@ -3275,7 +3275,7 @@ def _registerCoreTools(registry: ToolRegistry, services): items.append({ "role": raw.get("role", ""), "message": content, - "publishedAt": raw.get("publishedAt") or raw.get("_createdAt") or 0, + "publishedAt": raw.get("publishedAt") or raw.get("sysCreatedAt") or 0, }) header = f"Workflow {targetWorkflowId}: {len(allMsgs)} total messages" diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index 14a01557..7d85edcc 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -124,7 +124,7 @@ class KnowledgeService: _fileScope = _get("scope") if _fileScope: index.scope = _fileScope - _fileCreatedBy = _get("_createdBy") + _fileCreatedBy = _get("sysCreatedBy") if _fileCreatedBy: index.userId = str(_fileCreatedBy) except Exception: diff --git a/modules/shared/attributeUtils.py b/modules/shared/attributeUtils.py index 863d7f36..239e214d 100644 --- a/modules/shared/attributeUtils.py +++ b/modules/shared/attributeUtils.py @@ -74,6 +74,18 @@ def getModelLabels(modelName: str, language: str = "en") -> Dict[str, str]: } +def _mergedAttributeLabels(modelClass: Type[BaseModel], userLanguage: str) -> Dict[str, str]: + """Merge attribute labels from model MRO (base classes first, subclass overrides).""" + try: + baseIdx = modelClass.__mro__.index(BaseModel) + except ValueError: + return getModelLabels(modelClass.__name__, userLanguage) + merged: Dict[str, str] = {} + for cls in reversed(modelClass.__mro__[:baseIdx]): + merged.update(getModelLabels(cls.__name__, userLanguage)) + return merged + + def getModelLabel(modelName: str, language: str = "en") -> str: """ Get the label for a model in the specified language. @@ -106,7 +118,7 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag attributes = [] model_name = modelClass.__name__ - labels = getModelLabels(model_name, userLanguage) + labels = _mergedAttributeLabels(modelClass, userLanguage) model_label = getModelLabel(model_name, userLanguage) # Pydantic v2 only diff --git a/modules/shared/dbMultiTenantOptimizations.py b/modules/shared/dbMultiTenantOptimizations.py index f3c2de98..95ad6cae 100644 --- a/modules/shared/dbMultiTenantOptimizations.py +++ b/modules/shared/dbMultiTenantOptimizations.py @@ -74,7 +74,7 @@ _INDEXES = [ # Invitation indexes ("Invitation", "idx_invitation_mandate", ["mandateId"]), - ("Invitation", "idx_invitation_createdby", ["createdBy"]), + ("Invitation", "idx_invitation_syscreatedby", ["sysCreatedBy"]), ] # Unique indexes (separate list) diff --git a/modules/shared/gdprDeletion.py b/modules/shared/gdprDeletion.py index 034b627a..99e09313 100644 --- a/modules/shared/gdprDeletion.py +++ b/modules/shared/gdprDeletion.py @@ -35,8 +35,8 @@ USER_COLUMNS = [ "createdBy", "usedBy", "revokedBy", - "_createdBy", - "_modifiedBy", + "sysCreatedBy", + "sysModifiedBy", ] @@ -284,12 +284,12 @@ def _anonymizeRecords( # Build WHERE clause for primary key whereClause = " AND ".join([f'"{pk}" = %s' for pk in pkColumns]) - # Check if table has _modifiedAt column + # Check if table has sysModifiedAt column columns = _getTableColumns(dbConnector, tableName) - hasModifiedAt = "_modifiedAt" in columns + hasModifiedAt = "sysModifiedAt" in columns if hasModifiedAt: - query = f'UPDATE "{tableName}" SET "{columnName}" = %s, "_modifiedAt" = %s WHERE {whereClause}' + query = f'UPDATE "{tableName}" SET "{columnName}" = %s, "sysModifiedAt" = %s WHERE {whereClause}' params = [anonymousValue, getUtcTimestamp()] else: query = f'UPDATE "{tableName}" SET "{columnName}" = %s WHERE {whereClause}' diff --git a/modules/workflows/automation/mainWorkflow.py b/modules/workflows/automation/mainWorkflow.py index 19473c01..dc387926 100644 --- a/modules/workflows/automation/mainWorkflow.py +++ b/modules/workflows/automation/mainWorkflow.py @@ -76,7 +76,7 @@ async def executeAutomation(automationId: str, automation, creatorUser: User, se Args: automationId: ID of automation to execute - automation: Pre-loaded automation object (with system fields like _createdBy) + automation: Pre-loaded automation object (with system fields like sysCreatedBy) creatorUser: The user who created the automation (workflow runs in this context) services: Services instance (used for interfaceDbApp etc.) @@ -302,10 +302,10 @@ def createAutomationEventHandler(automationId: str, eventUser): logger.warning(f"Automation {automationId} not found or not active, skipping execution") return - # Get creator user ID from automation's _createdBy system field - creatorUserId = getattr(automation, "_createdBy", None) + # Get creator user ID from automation's sysCreatedBy system field + creatorUserId = getattr(automation, "sysCreatedBy", None) if not creatorUserId: - logger.error(f"Automation {automationId} has no creator user (_createdBy missing)") + logger.error(f"Automation {automationId} has no creator user (sysCreatedBy missing)") return # Get creator user from database (using SysAdmin access) diff --git a/scripts/script_db_export_migration.py b/scripts/script_db_export_migration.py index 9fc8a910..e5961e23 100644 --- a/scripts/script_db_export_migration.py +++ b/scripts/script_db_export_migration.py @@ -24,7 +24,7 @@ Optionen: Die Struktur-Datei wird automatisch als _structure.json erstellt --pretty, -p JSON formatiert ausgeben (für bessere Lesbarkeit) --exclude Komma-getrennte Liste von Tabellen, die ausgeschlossen werden sollen - --include-meta System-Metadaten (_createdAt, _modifiedAt, etc.) beibehalten + --include-meta System-Metadaten (sysCreatedAt, sysModifiedAt, etc.) beibehalten --db Nur bestimmte Datenbank(en) exportieren (komma-getrennt) """ @@ -245,7 +245,12 @@ def _getTableData(conn, tableName: str, includeMeta: bool = False) -> List[Dict[ # Optional: System-Metadaten entfernen if not includeMeta: - metaFields = ["_createdAt", "_modifiedAt", "_createdBy", "_modifiedBy"] + metaFields = [ + "sysCreatedAt", + "sysModifiedAt", + "sysCreatedBy", + "sysModifiedBy", + ] for field in metaFields: record.pop(field, None) @@ -789,7 +794,7 @@ Beispiele: parser.add_argument( "--include-meta", - help="System-Metadaten (_createdAt, etc.) beibehalten", + help="System-Metadaten (sysCreatedAt, sysModifiedAt, sysCreatedBy, sysModifiedBy) beibehalten", action="store_true" ) diff --git a/tests/integration/rbac/test_rbac_database.py b/tests/integration/rbac/test_rbac_database.py index 1c081953..72eb1b26 100644 --- a/tests/integration/rbac/test_rbac_database.py +++ b/tests/integration/rbac/test_rbac_database.py @@ -50,7 +50,6 @@ class TestRbacDatabaseFiltering: id="test_user_all", username="testuser", roleLabels=["sysadmin"], - mandateId="test_mandate_all" ) whereClause = db.buildRbacWhereClause(permissions, user, "SomeTable") @@ -73,13 +72,12 @@ class TestRbacDatabaseFiltering: id="test_user_my", username="testuser", roleLabels=["user"], - mandateId="test_mandate_my" ) whereClause = db.buildRbacWhereClause(permissions, user, "SomeTable") assert whereClause is not None - assert whereClause["condition"] == '"_createdBy" = %s' + assert whereClause["condition"] == '"sysCreatedBy" = %s' assert whereClause["values"] == ["test_user_my"] def testBuildRbacWhereClauseGroupAccess(self, db): @@ -93,17 +91,19 @@ class TestRbacDatabaseFiltering: delete=AccessLevel.GROUP ) + mandate_id = "test_mandate_group" user = User( id="test_user_group", username="testuser", roleLabels=["admin"], - mandateId="test_mandate_group" ) - whereClause = db.buildRbacWhereClause(permissions, user, "SomeTable") + whereClause = db.buildRbacWhereClause( + permissions, user, "SomeTable", mandateId=mandate_id + ) assert whereClause is not None - assert whereClause["condition"] == '"mandateId" = %s' + assert whereClause["condition"] == '("mandateId" = %s OR "mandateId" IS NULL)' assert whereClause["values"] == ["test_mandate_group"] def testBuildRbacWhereClauseNoAccess(self, db): @@ -121,7 +121,6 @@ class TestRbacDatabaseFiltering: id="test_user_none", username="testuser", roleLabels=["viewer"], - mandateId="test_mandate_none" ) whereClause = db.buildRbacWhereClause(permissions, user, "SomeTable") @@ -145,7 +144,6 @@ class TestRbacDatabaseFiltering: id="test_user_in_db", username="testuser", roleLabels=["user"], - mandateId="test_mandate_in_db" ) whereClause = db.buildRbacWhereClause(permissions, user, "UserInDB") @@ -156,56 +154,84 @@ class TestRbacDatabaseFiltering: assert whereClause["values"] == ["test_user_in_db"] def testBuildRbacWhereClauseUserConnectionTable(self, db): - """Test WHERE clause building for UserConnection table with GROUP access.""" - # Create test users in the same mandate for GROUP access testing - from modules.datamodels.datamodelUam import UserInDB - testMandateId = "test_mandate_group" - - # Create test users - user1 = UserInDB( - id="test_user1", - username="testuser1", - mandateId=testMandateId - ) - user2 = UserInDB( - id="test_user2", - username="testuser2", - mandateId=testMandateId - ) - + """GROUP on UserConnection resolves member userIds via UserMandate (multi-tenant).""" + from modules.datamodels.datamodelUam import UserInDB, Mandate + from modules.datamodels.datamodelMembership import UserMandate + + testMandateId = "rbac_test_mandate_uc" + user1Id = "rbac_test_user_uc1" + user2Id = "rbac_test_user_uc2" + userMandateIds = [] + try: - user1Data = user1.model_dump() - user1Data["id"] = user1.id - user2Data = user2.model_dump() - user2Data["id"] = user2.id - db.recordCreate(UserInDB, user1Data) - db.recordCreate(UserInDB, user2Data) - + mandate = Mandate( + id=testMandateId, + name="RBAC test mandate", + label="RBAC test", + ) + mandatePayload = mandate.model_dump() + mandatePayload["id"] = mandate.id + db.recordCreate(Mandate, mandatePayload) + + for uid, uname in ( + (user1Id, "rbac_uc_user1"), + (user2Id, "rbac_uc_user2"), + ): + u = UserInDB( + id=uid, + username=uname, + email=f"{uid}@example.com", + hashedPassword="not-used", + ) + payload = u.model_dump() + payload["id"] = u.id + db.recordCreate(UserInDB, payload) + + for uid in (user1Id, user2Id): + um = UserMandate(userId=uid, mandateId=testMandateId, enabled=True) + umPayload = um.model_dump() + umPayload["id"] = um.id + createdUm = db.recordCreate(UserMandate, umPayload) + if createdUm and createdUm.get("id"): + userMandateIds.append(createdUm["id"]) + else: + userMandateIds.append(um.id) + permissions = UserPermissions( view=True, read=AccessLevel.GROUP, create=AccessLevel.GROUP, update=AccessLevel.GROUP, - delete=AccessLevel.GROUP + delete=AccessLevel.GROUP, ) - + user = User( - id="test_user1", - username="testuser1", + id=user1Id, + username="rbac_uc_user1", roleLabels=["admin"], - mandateId=testMandateId ) - - whereClause = db.buildRbacWhereClause(permissions, user, "UserConnection") - + + whereClause = db.buildRbacWhereClause( + permissions, user, "UserConnection", mandateId=testMandateId + ) + assert whereClause is not None + assert whereClause["condition"] != "1 = 0" assert "userId" in whereClause["condition"] assert "IN" in whereClause["condition"] - assert len(whereClause["values"]) >= 2 + assert set(whereClause["values"]) == {user1Id, user2Id} finally: - # Cleanup test users + for umId in userMandateIds: + try: + db.recordDelete(UserMandate, umId) + except Exception: + pass + for uid in (user1Id, user2Id): + try: + db.recordDelete(UserInDB, uid) + except Exception: + pass try: - db.recordDelete(UserInDB, "test_user1") - db.recordDelete(UserInDB, "test_user2") - except: + db.recordDelete(Mandate, testMandateId) + except Exception: pass From 20d2bf215fdea22246a5fa7085a3788ce85beccb Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 18:28:35 +0100 Subject: [PATCH 07/33] fixes --- modules/auth/tokenManager.py | 2 +- .../features/automation2/mainAutomation2.py | 13 +++++++++++ modules/interfaces/interfaceDbApp.py | 13 ++++------- .../services/serviceChat/mainServiceChat.py | 3 ++- modules/shared/dbMultiTenantOptimizations.py | 22 +++++++++++++++++++ 5 files changed, 42 insertions(+), 11 deletions(-) diff --git a/modules/auth/tokenManager.py b/modules/auth/tokenManager.py index 5740a2ac..940de055 100644 --- a/modules/auth/tokenManager.py +++ b/modules/auth/tokenManager.py @@ -181,7 +181,7 @@ class TokenManager: # Only allow a new refresh if at least 10 minutes passed since the token was created/refreshed try: nowTs = getUtcTimestamp() - createdTs = parseTimestamp(oldToken.createdAt, default=0.0) + createdTs = parseTimestamp(oldToken.sysCreatedAt, default=0.0) secondsSinceLastRefresh = nowTs - createdTs if secondsSinceLastRefresh < 10 * 60: logger.info( diff --git a/modules/features/automation2/mainAutomation2.py b/modules/features/automation2/mainAutomation2.py index 9ec97eca..08038e68 100644 --- a/modules/features/automation2/mainAutomation2.py +++ b/modules/features/automation2/mainAutomation2.py @@ -77,6 +77,19 @@ TEMPLATE_ROLES = [ {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, ] }, + { + "roleLabel": "automation2-admin", + "description": { + "en": "Automation2 Admin - Full UI and API for the instance; data remains user-scoped (MY)", + "de": "Automation2 Admin - Volle UI und API für die Instanz; Daten weiterhin benutzerspezifisch (MY)", + "fr": "Administrateur Automation2 - UI et API complets pour l'instance; donnees limitees a l'utilisateur (MY)", + }, + "accessRules": [ + {"context": "UI", "item": None, "view": True}, + {"context": "RESOURCE", "item": None, "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, + ], + }, ] diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 2a6b0f78..ee1dc379 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -2858,8 +2858,8 @@ class AppObjects: # Ensure token has required fields if not token.id: token.id = str(uuid.uuid4()) - if not token.createdAt: - token.createdAt = getUtcTimestamp() + if not token.sysCreatedAt: + token.sysCreatedAt = getUtcTimestamp() # If replace_existing is True, delete old access tokens for this user and authority first if replace_existing: @@ -2892,12 +2892,7 @@ class AppObjects: ) # Continue with saving the new token even if deletion fails - # Convert to dict and ensure all fields are properly set token_dict = token.model_dump() - # Ensure userId is set to current user - # Convert to dict and ensure all fields are properly set - token_dict = token.model_dump() - # Ensure userId is set to current user token_dict["userId"] = self.currentUser.id # Save to database @@ -2936,8 +2931,8 @@ class AppObjects: # Ensure token has required fields if not token.id: token.id = str(uuid.uuid4()) - if not token.createdAt: - token.createdAt = getUtcTimestamp() + if not token.sysCreatedAt: + token.sysCreatedAt = getUtcTimestamp() # Convert to dict and ensure all fields are properly set token_dict = token.model_dump() diff --git a/modules/serviceCenter/services/serviceChat/mainServiceChat.py b/modules/serviceCenter/services/serviceChat/mainServiceChat.py index b05b0c64..40769fae 100644 --- a/modules/serviceCenter/services/serviceChat/mainServiceChat.py +++ b/modules/serviceCenter/services/serviceChat/mainServiceChat.py @@ -333,7 +333,8 @@ class ChatService: token_status = "expired" else: # Check if this token was recently refreshed (within last 5 minutes) - time_since_creation = current_time - token.createdAt if hasattr(token, 'createdAt') else 0 + createdTs = getattr(token, "sysCreatedAt", None) + time_since_creation = (current_time - createdTs) if createdTs else 0 if time_since_creation < 300: # 5 minutes token_status = "valid (refreshed)" else: diff --git a/modules/shared/dbMultiTenantOptimizations.py b/modules/shared/dbMultiTenantOptimizations.py index 95ad6cae..c178c376 100644 --- a/modules/shared/dbMultiTenantOptimizations.py +++ b/modules/shared/dbMultiTenantOptimizations.py @@ -21,6 +21,18 @@ from typing import Optional, List logger = logging.getLogger(__name__) +def _ensureUamTablesMatchModels(dbConnector) -> None: + """Run connector schema sync so sys* columns exist before we CREATE INDEX on them.""" + if not hasattr(dbConnector, "_ensureTableExists"): + return + try: + from modules.datamodels.datamodelInvitation import Invitation + + dbConnector._ensureTableExists(Invitation) + except Exception as e: + logger.debug(f"_ensureUamTablesMatchModels: {e}") + + def _getConnection(dbConnector): """Get a connection from the DatabaseConnector. @@ -176,6 +188,11 @@ def applyMultiTenantOptimizations(dbConnector, tables: Optional[List[str]] = Non except Exception as autoErr: logger.debug(f"Could not set autocommit: {autoErr}") + try: + _ensureUamTablesMatchModels(dbConnector) + except Exception as preIdxErr: + logger.debug(f"Pre-index table ensure: {preIdxErr}") + try: with conn.cursor() as cursor: # Apply indexes @@ -214,6 +231,11 @@ def applyIndexesOnly(dbConnector, tables: Optional[List[str]] = None) -> int: originalAutocommit = conn.autocommit conn.autocommit = True + try: + _ensureUamTablesMatchModels(dbConnector) + except Exception as preIdxErr: + logger.debug(f"Pre-index table ensure: {preIdxErr}") + try: with conn.cursor() as cursor: return _applyIndexes(cursor, tables) From 1883f8cd6a633160617bf6736820d932f1822603 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 21:46:55 +0100 Subject: [PATCH 08/33] fixed sys attributes --- modules/connectors/connectorDbPostgre.py | 148 +++++++++++++- modules/interfaces/interfaceBootstrap.py | 180 +++++++----------- .../methodOutlook/helpers/folderManagement.py | 88 ++++++++- 3 files changed, 301 insertions(+), 115 deletions(-) diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index e168467b..bf8fce44 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -5,7 +5,7 @@ import re import psycopg2 import psycopg2.extras import logging -from typing import List, Dict, Any, Optional, Union, get_origin, get_args, Type +from typing import List, Dict, Any, Optional, Union, get_origin, get_args, Type, Set, Tuple import uuid from pydantic import BaseModel, Field import threading @@ -158,6 +158,83 @@ def _parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: logger.warning(f"Could not parse JSONB field {fieldName}, keeping as string ({context})") +# Legacy system columns (underscore-prefixed internal names) -> PowerOn sys* columns. +_LEGACY_UNDERSCORE_TO_SYS: Tuple[Tuple[str, str], ...] = ( + ("_createdAt", "sysCreatedAt"), + ("_createdBy", "sysCreatedBy"), + ("_modifiedAt", "sysModifiedAt"), + ("_modifiedBy", "sysModifiedBy"), +) + + +def _quotePgIdent(name: str) -> str: + return '"' + str(name).replace('"', '""') + '"' + + +def _resolveColumnCaseInsensitive(cols: Set[str], logicalName: str) -> Optional[str]: + """Match information_schema column_name to logical CamelCase (PG folds unquoted legacy names to lowercase).""" + if not logicalName or not cols: + return None + for c in cols: + if c.lower() == logicalName.lower(): + return c + return None + + +def _pgColumnDataType(cursor, tablePg: str, colPg: str) -> Optional[str]: + cursor.execute( + """ + SELECT data_type FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = %s AND column_name = %s + """, + (tablePg, colPg), + ) + row = cursor.fetchone() + return row["data_type"] if row else None + + +def _legacySourceToSysSqlExpr(srcIdent: str, srcType: Optional[str], tgtType: Optional[str]) -> str: + """Build RHS for UPDATE sys* = expr from legacy _* column (handles text/timestamp -> double precision).""" + s = _quotePgIdent(srcIdent) + sl = (srcType or "").lower() + tl = (tgtType or "").lower() + if "double" in tl or tl == "real" or tl == "numeric": + if any(x in sl for x in ("double precision", "real", "numeric", "integer", "bigint", "smallint")): + return f"{s}::double precision" + if "timestamp" in sl or sl == "date": + return f"EXTRACT(EPOCH FROM {s}::timestamptz)" + if "text" in sl or "character" in sl or sl == "uuid": + return ( + f"CASE WHEN trim({s}::text) ~ '^[+-]?[0-9]+(\\.[0-9]*)?([eE][+-]?[0-9]+)?$' " + f"THEN trim({s}::text)::double precision " + f"ELSE EXTRACT(EPOCH FROM trim({s}::text)::timestamptz) END" + ) + return s + return s + + +def _listPublicBaseTableNames(cursor) -> List[str]: + cursor.execute( + """ + SELECT table_name FROM information_schema.tables + WHERE table_schema = 'public' AND table_type = 'BASE TABLE' + ORDER BY table_name + """ + ) + return [row["table_name"] for row in cursor.fetchall()] + + +def _listTableColumnNames(cursor, tableName: str) -> Set[str]: + cursor.execute( + """ + SELECT column_name FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = %s + """, + (tableName,), + ) + return {row["column_name"] for row in cursor.fetchall()} + + # Cache connectors by (host, database, port) to avoid duplicate inits for same database. # Thread safety: _connector_cache_lock protects cache access. userId is request-scoped via # contextvars to avoid races when concurrent requests share the same connector. @@ -374,6 +451,63 @@ class DatabaseConnector: logger.warning(f"Connection lost, reconnecting: {e}") self._connect() + def migrateLegacyUnderscoreSysColumns(self) -> int: + """ + Scan all public base tables on this connection's database. Where both a legacy + _createdAt / _createdBy / _modifiedAt / _modifiedBy column (any case) and the + matching sys* column exist, copy into sys* rows where sys* IS NULL and legacy IS NOT NULL. + Idempotent; safe to run on every bootstrap. + """ + self._ensure_connection() + total = 0 + try: + with self.connection.cursor() as cursor: + tableNames = _listPublicBaseTableNames(cursor) + for table in tableNames: + with self.connection.cursor() as cursor: + cols = _listTableColumnNames(cursor, table) + for legacyLogical, sysLogical in _LEGACY_UNDERSCORE_TO_SYS: + src = _resolveColumnCaseInsensitive(cols, legacyLogical) + tgt = _resolveColumnCaseInsensitive(cols, sysLogical) + if not src or not tgt or src == tgt: + continue + try: + with self.connection.cursor() as cursor: + srcType = _pgColumnDataType(cursor, table, src) + tgtType = _pgColumnDataType(cursor, table, tgt) + expr = _legacySourceToSysSqlExpr(src, srcType, tgtType) + tq = _quotePgIdent(table) + tr = _quotePgIdent(tgt) + sr = _quotePgIdent(src) + sql = ( + f"UPDATE {tq} SET {tr} = {expr} " + f"WHERE {tr} IS NULL AND {sr} IS NOT NULL" + ) + cursor.execute(sql) + n = cursor.rowcount + self.connection.commit() + total += n + except Exception as e: + try: + self.connection.rollback() + except Exception: + pass + logger.debug( + f"migrateLegacyUnderscoreSysColumns skip {self.dbDatabase}.{table} " + f"{src}->{tgt}: {e}" + ) + except Exception as e: + logger.error(f"migrateLegacyUnderscoreSysColumns failed on {self.dbDatabase}: {e}") + try: + self.connection.rollback() + except Exception: + pass + if total: + logger.info( + f"migrateLegacyUnderscoreSysColumns: {total} cell(s) in {self.dbDatabase}" + ) + return total + def _initializeSystemTable(self): """Initializes the system table if it doesn't exist yet.""" try: @@ -710,6 +844,10 @@ class DatabaseConnector: logger.error(f"Error loading record {recordId} from table {table}: {e}") return None + def getRecord(self, model_class: type, recordId: str) -> Optional[Dict[str, Any]]: + """Load one row by primary key (routes / services; wraps _loadRecord).""" + return self._loadRecord(model_class, str(recordId)) + def _saveRecord( self, model_class: type, recordId: str, record: Dict[str, Any] ) -> bool: @@ -730,8 +868,9 @@ class DatabaseConnector: effective_user_id = self.userId currentTime = getUtcTimestamp() # Set sysCreatedAt/sysCreatedBy on first persist; always refresh modified fields. - # Use falsy check: model_dump() always includes sysCreatedAt key (often None). - if not record.get("sysCreatedAt"): + # Treat None and 0 as unset (legacy rows / bad defaults); model_dump often has sysCreatedAt=None. + createdTs = record.get("sysCreatedAt") + if createdTs is None or createdTs == 0 or createdTs == 0.0: record["sysCreatedAt"] = currentTime if effective_user_id: record["sysCreatedBy"] = effective_user_id @@ -1030,6 +1169,9 @@ class DatabaseConnector: continue colType = fields.get(key, "TEXT") logger.debug(f"_buildPaginationClauses: filter key='{key}' val={val!r} type(val)={type(val).__name__} colType={colType}") + if val is None: + where_parts.append(f'"{key}" IS NULL') + continue if isinstance(val, dict): op = val.get("operator", "equals") v = val.get("value", "") diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 0fb48ffe..98b70466 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -11,9 +11,9 @@ Multi-Tenant Design: """ import logging -from typing import Optional, Dict, Set, Tuple +from typing import Optional, Dict, Tuple from passlib.context import CryptContext -from modules.connectors.connectorDbPostgre import DatabaseConnector +from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector from modules.shared.configuration import APP_CONFIG from modules.datamodels.datamodelUam import ( Mandate, @@ -38,119 +38,79 @@ pwdContext = CryptContext(schemes=["argon2"], deprecated="auto") # Cache für Role-IDs (roleLabel -> roleId) _roleIdCache: Dict[str, str] = {} -# Historical PostgreSQL column identifiers (pre-sys*). Used only in _migrateSystemFieldColumns SQL. -_LEGACY_SYS_PAIR_RENAMES: Tuple[Tuple[str, str], ...] = ( - ("_createdAt", "sysCreatedAt"), - ("_createdBy", "sysCreatedBy"), - ("_modifiedAt", "sysModifiedAt"), - ("_modifiedBy", "sysModifiedBy"), +# PowerOn logical databases to scan (same set as gateway/scripts/script_db_export_migration.py). +_POWERON_DATABASE_NAMES: Tuple[str, ...] = ( + "poweron_app", + "poweron_chat", + "poweron_chatbot", + "poweron_management", + "poweron_realestate", + "poweron_trustee", + "poweron_automation", ) -def _getPublicTableColumns(db: DatabaseConnector, tableName: str) -> Set[str]: - """Column names for a quoted PostgreSQL table (exact case in information_schema).""" +def _configPrefixForPoweronDatabase(dbName: str) -> str: + return { + "poweron_app": "DB_APP", + "poweron_chat": "DB_CHAT", + "poweron_chatbot": "DB_CHATBOT", + "poweron_management": "DB_MANAGEMENT", + "poweron_realestate": "DB_REALESTATE", + "poweron_trustee": "DB_TRUSTEE", + # Same as initAutomationTemplates: default DB_* (not a separate DB_AUTOMATION_* prefix). + "poweron_automation": "DB", + }.get(dbName, "DB") + + +def _openConnectorForPoweronDatabase(dbName: str) -> Optional[DatabaseConnector]: + """Connect to a named PowerOn database using DB_* / DB_APP_* style config (shared with export script).""" + prefix = _configPrefixForPoweronDatabase(dbName) + host = APP_CONFIG.get(f"{prefix}_HOST") or APP_CONFIG.get("DB_HOST", "localhost") + user = APP_CONFIG.get(f"{prefix}_USER") or APP_CONFIG.get("DB_USER") + password = APP_CONFIG.get(f"{prefix}_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD_SECRET") + portRaw = APP_CONFIG.get(f"{prefix}_PORT") or APP_CONFIG.get("DB_PORT", 5432) try: - with db.connection.cursor() as cursor: - cursor.execute( - """ - SELECT column_name FROM information_schema.columns - WHERE table_schema = 'public' AND table_name = %s - """, - (tableName,), - ) - return {row["column_name"] for row in cursor.fetchall()} - except Exception as e: - logger.warning(f"_getPublicTableColumns failed for {tableName}: {e}") - return set() - - -def _migrateSystemFieldColumns(db: DatabaseConnector) -> None: - """Backfill sys* from older physical columns and business duplicates where sys* IS NULL (idempotent).""" - businessFieldMigrations: Dict[str, Dict[str, str]] = { - "FileFolder": {"createdAt": "sysCreatedAt"}, - "FileItem": {"creationDate": "sysCreatedAt"}, - "Invitation": {"createdAt": "sysCreatedAt", "createdBy": "sysCreatedBy"}, - "FeatureDataSource": {"createdAt": "sysCreatedAt"}, - "DataSource": {"createdAt": "sysCreatedAt"}, - "UserNotification": {"createdAt": "sysCreatedAt"}, - "Token": {"createdAt": "sysCreatedAt"}, - "MessagingSubscription": {"createdBy": "sysCreatedBy", "modifiedBy": "sysModifiedBy"}, - "CoachingContext": {"createdAt": "sysCreatedAt"}, - "CoachingSession": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, - "CoachingMessage": {"createdAt": "sysCreatedAt"}, - "CoachingTask": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, - "CoachingScore": {"createdAt": "sysCreatedAt"}, - "CoachingUserProfile": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, - "CoachingPersona": {"createdAt": "sysCreatedAt", "updatedAt": "sysModifiedAt"}, - "CoachingBadge": {"createdAt": "sysCreatedAt"}, - "TeamsbotSession": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, - "TeamsbotTranscript": {"creationDate": "sysCreatedAt"}, - "TeamsbotBotResponse": {"creationDate": "sysCreatedAt"}, - "TeamsbotSystemBot": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, - "TeamsbotUserAccount": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, - "TeamsbotUserSettings": {"creationDate": "sysCreatedAt", "lastModified": "sysModifiedAt"}, - "_system": { - k: v - for k, v in _LEGACY_SYS_PAIR_RENAMES - if k in ("_createdAt", "_modifiedAt") - }, - } - + port = int(portRaw) + except (TypeError, ValueError): + port = 5432 + if not user or not password: + logger.debug( + f"bootstrap: skip legacy _* -> sys* migration for {dbName} (missing credentials for {prefix})" + ) + return None try: - db._ensure_connection() - with db.connection.cursor() as cursor: - cursor.execute( - """ - SELECT table_name FROM information_schema.tables - WHERE table_schema = 'public' AND table_type = 'BASE TABLE' - """ - ) - tableNames = [row["table_name"] for row in cursor.fetchall()] - - totalUpdates = 0 - for table in tableNames: - cols = _getPublicTableColumns(db, table) - if not cols: - continue - - for old_col, new_col in _LEGACY_SYS_PAIR_RENAMES: - if old_col in cols and new_col in cols: - try: - with db.connection.cursor() as cursor: - cursor.execute( - f'UPDATE "{table}" SET "{new_col}" = "{old_col}" ' - f'WHERE "{new_col}" IS NULL AND "{old_col}" IS NOT NULL' - ) - totalUpdates += cursor.rowcount - db.connection.commit() - except Exception as e: - db.connection.rollback() - logger.debug(f"Column migrate skip {table}.{old_col}->{new_col}: {e}") - - biz = businessFieldMigrations.get(table) - if biz: - for old_col, new_col in biz.items(): - if old_col in cols and new_col in cols: - try: - with db.connection.cursor() as cursor: - cursor.execute( - f'UPDATE "{table}" SET "{new_col}" = "{old_col}" ' - f'WHERE "{new_col}" IS NULL AND "{old_col}" IS NOT NULL' - ) - totalUpdates += cursor.rowcount - db.connection.commit() - except Exception as e: - db.connection.rollback() - logger.debug(f"Business field migrate skip {table}.{old_col}->{new_col}: {e}") - - if totalUpdates: - logger.info(f"_migrateSystemFieldColumns: backfilled {totalUpdates} cell(s) on {db.dbDatabase}") + return _get_cached_connector( + dbHost=host, + dbDatabase=dbName, + dbUser=user, + dbPassword=password, + dbPort=port, + userId=None, + ) except Exception as e: - logger.error(f"_migrateSystemFieldColumns failed: {e}") + logger.warning(f"bootstrap: cannot open {dbName} for legacy _* -> sys* migration: {e}") + return None + + +def migrateLegacyUnderscoreSysColumnsAllPoweronDatabases() -> None: + """ + Run DatabaseConnector.migrateLegacyUnderscoreSysColumns on every configured PowerOn database. + Actual table scan and SQL live in the connector module. + """ + grandTotal = 0 + for dbName in _POWERON_DATABASE_NAMES: + conn = _openConnectorForPoweronDatabase(dbName) + if not conn: + continue try: - db.connection.rollback() - except Exception: - pass + grandTotal += conn.migrateLegacyUnderscoreSysColumns() + except Exception as e: + logger.warning(f"bootstrap: migrateLegacyUnderscoreSysColumns failed for {dbName}: {e}") + if grandTotal: + logger.info( + f"bootstrap: legacy _* -> sys* migration total {grandTotal} cell(s) across PowerOn databases" + ) def initBootstrap(db: DatabaseConnector) -> None: @@ -165,8 +125,8 @@ def initBootstrap(db: DatabaseConnector) -> None: # Initialize root mandate mandateId = initRootMandate(db) - # Backfill sys* columns from legacy _* / duplicate business fields (idempotent) - _migrateSystemFieldColumns(db) + # Copy legacy _createdAt/_createdBy/_modifiedAt/_modifiedBy into sys* on all PowerOn DBs (connector routine) + migrateLegacyUnderscoreSysColumnsAllPoweronDatabases() # Migrate existing mandate records: description -> label _migrateMandateDescriptionToLabel(db) diff --git a/modules/workflows/methods/methodOutlook/helpers/folderManagement.py b/modules/workflows/methods/methodOutlook/helpers/folderManagement.py index 47309a8b..2bbb8195 100644 --- a/modules/workflows/methods/methodOutlook/helpers/folderManagement.py +++ b/modules/workflows/methods/methodOutlook/helpers/folderManagement.py @@ -8,10 +8,81 @@ Handles folder ID resolution and folder name lookups. import logging import requests -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, Tuple logger = logging.getLogger(__name__) +# Microsoft Graph well-known folder path segments (always English in the URL; works for any mailbox UI language). +# See https://learn.microsoft.com/en-us/graph/api/resources/mailfolder +_graphWellKnownSegments = frozenset( + { + "inbox", + "drafts", + "sentitems", + "deleteditems", + "junkemail", + "outbox", + "archive", + "clutter", + "conflicts", + "conversationhistory", + "msgfolderroot", + "recoverableitemsdeletions", + "scheduled", + "searchfolders", + "syncissues", + } +) + +# Map common user/tool labels (any language) -> Graph well-known segment +_wellKnownAliases: Tuple[Tuple[str, str], ...] = ( + ("inbox", "inbox"), + ("posteingang", "inbox"), + ("postfach", "inbox"), + ("boîte de réception", "inbox"), + ("boite de reception", "inbox"), + ("drafts", "drafts"), + ("draft", "drafts"), + ("entwürfe", "drafts"), + ("entwurfe", "drafts"), + ("brouillons", "drafts"), + ("brouillon", "drafts"), + ("sent items", "sentitems"), + ("sentitems", "sentitems"), + ("gesendete elemente", "sentitems"), + ("éléments envoyés", "sentitems"), + ("elements envoyes", "sentitems"), + ("deleted items", "deleteditems"), + ("deleteditems", "deleteditems"), + ("gelöschte elemente", "deleteditems"), + ("geloschte elemente", "deleteditems"), + ("éléments supprimés", "deleteditems"), + ("junk email", "junkemail"), + ("junkemail", "junkemail"), + ("junk-e-mail", "junkemail"), + ("junk e-mail", "junkemail"), + ("courrier indésirable", "junkemail"), + ("outbox", "outbox"), + ("postausgang", "outbox"), + ("out box", "outbox"), + ("archive", "archive"), + ("archiv", "archive"), +) + + +def _wellKnownSegmentForName(folderName: str) -> Optional[str]: + """Return Graph mailFolder segment if folderName is a known default folder alias.""" + if not folderName or not str(folderName).strip(): + return None + key = str(folderName).strip().lower() + if key in _graphWellKnownSegments: + return key + for alias, segment in _wellKnownAliases: + if key == alias: + return segment + return None + + class FolderManagementHelper: """Helper for folder management operations""" @@ -42,8 +113,21 @@ class FolderManagementHelper: "Authorization": f"Bearer {connection['accessToken']}", "Content-Type": "application/json" } + + # Resolve default folders by Graph well-known name (locale-independent; avoids missing "Inbox" on paginated /mailFolders lists) + wk = _wellKnownSegmentForName(folder_name) + if wk: + wk_url = f"{graph_url}/me/mailFolders/{wk}" + wk_resp = requests.get(wk_url, headers=headers) + if wk_resp.status_code == 200: + wid = wk_resp.json().get("id") + if wid: + return wid + logger.debug( + f"Well-known folder '{wk}' lookup failed ({wk_resp.status_code}); falling back to folder list" + ) - # Get mail folders + # Get mail folders (first page only; subfolders / pagination may omit Inbox) api_url = f"{graph_url}/me/mailFolders" response = requests.get(api_url, headers=headers) From 1f42c015d658d2a19461c45abc76e8b655c57e08 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 22:29:15 +0100 Subject: [PATCH 09/33] fixes commcoach --- .../features/commcoach/serviceCommcoach.py | 52 ++++++++++++++++++- .../features/commcoach/serviceCommcoachAi.py | 16 ++++-- 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py index 36fc6e16..5e5aa810 100644 --- a/modules/features/commcoach/serviceCommcoach.py +++ b/modules/features/commcoach/serviceCommcoach.py @@ -33,6 +33,7 @@ from .serviceCommcoachContextRetrieval import ( buildSessionSummariesForPrompt, findSessionByDate, searchSessionsByTopic, + searchSessionsByTopicRag, _parseDateFromMessage, PREVIOUS_SESSION_SUMMARIES_COUNT, ROLLING_OVERVIEW_SESSION_THRESHOLD, @@ -1035,10 +1036,29 @@ class CommcoachService: result["rollingOverview"] = rollingOverview elif intent == RetrievalIntent.RECALL_TOPIC: - retrieved = searchSessionsByTopic(completedSessions, userContent) + retrieved = list(searchSessionsByTopic(completedSessions, userContent)) + queryVector = await self._embedUserQuery(userContent) + if queryVector: + ragHits = searchSessionsByTopicRag( + userContent, + self.userId, + self.instanceId, + mandateId=self.mandateId, + queryVector=queryVector, + ) + for hit in ragHits: + content = (hit.get("content") or "").strip() + if not content: + continue + retrieved.append({ + "summary": content[:450], + "date": "", + "source": "rag", + "ragSourceLabel": hit.get("fileName") or "Mandantenwissen", + }) result["retrievedByTopic"] = retrieved if retrieved: - logger.info(f"Topic recall: found {len(retrieved)} sessions for query") + logger.info(f"Topic recall: {len(retrieved)} item(s) (sessions + optional RAG)") result["previousSessionSummaries"] = buildSessionSummariesForPrompt( allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT ) @@ -1101,3 +1121,31 @@ class CommcoachService: ) ) return await aiService.callAi(aiRequest) + + async def _embedUserQuery(self, text: str) -> Optional[List[float]]: + """Embedding for mandate-wide RAG (same ServiceCenter AI service as coaching calls).""" + snippet = (text or "").strip()[:2000] + if not snippet: + return None + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + + serviceContext = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + ) + aiService = getService("ai", serviceContext) + await aiService.ensureAiObjectsInitialized() + try: + response = await aiService.callEmbedding([snippet]) + except Exception as e: + logger.warning(f"CommCoach RAG embedding failed: {e}") + return None + if not response or response.errorCount > 0: + return None + embs = (response.metadata or {}).get("embeddings") or [] + vec = embs[0] if embs else None + if isinstance(vec, list) and len(vec) > 0: + return vec + return None diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py index 7ba52f58..97deb373 100644 --- a/modules/features/commcoach/serviceCommcoachAi.py +++ b/modules/features/commcoach/serviceCommcoachAi.py @@ -229,12 +229,18 @@ WICHTIG: Antworte NUR mit dem JSON-Objekt. Kein Text vor oder nach dem JSON.""" prompt += f"\n{retrievedSession.get('summary', '')[:500]}" if retrievedByTopic: - prompt += "\n\nRelevante Sessions zum angefragten Thema:" - for s in retrievedByTopic[:3]: - summary = s.get("summary", "") + prompt += "\n\nRelevante Sessions und Mandantenwissen zum angefragten Thema:" + for s in retrievedByTopic[:5]: + summary = s.get("summary", s.get("content", "")) + if not summary: + continue dateStr = s.get("date", "") - if summary: - prompt += f"\n- [{dateStr}] {summary[:300]}" + if s.get("source") == "rag": + label = s.get("ragSourceLabel") or "Mandantenwissen" + prompt += f"\n- [Wissen: {label}] {summary[:320]}" + else: + prefix = f"[{dateStr}] " if dateStr else "" + prompt += f"\n- {prefix}{summary[:300]}" if openTasks: prompt += "\n\nOffene Aufgaben:" From 1fdf238aafb1140b5fc06c9ec8c26898a2fec3a2 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 23:54:11 +0100 Subject: [PATCH 10/33] cleaned mandate and unified mandate to be standard type --- app.py | 4 +- modules/datamodels/datamodelBilling.py | 64 +--- modules/datamodels/datamodelSubscription.py | 18 +- modules/datamodels/datamodelUam.py | 29 -- modules/features/chatbot/service.py | 26 +- .../workspace/routeFeatureWorkspace.py | 50 +++ modules/interfaces/interfaceBootstrap.py | 46 +-- modules/interfaces/interfaceDbApp.py | 83 ++--- modules/interfaces/interfaceDbBilling.py | 309 +++--------------- modules/interfaces/interfaceDbChat.py | 26 ++ modules/interfaces/interfaceDbSubscription.py | 34 ++ modules/migration/migrateRootUsers.py | 3 +- modules/routes/routeBilling.py | 102 +----- modules/routes/routeSecurityLocal.py | 108 +++--- modules/routes/routeStore.py | 5 +- modules/routes/routeSubscription.py | 15 +- modules/serviceCenter/context.py | 1 + .../services/serviceAgent/mainServiceAgent.py | 6 + .../services/serviceAi/mainServiceAi.py | 18 +- .../serviceBilling/mainServiceBilling.py | 71 +--- .../services/serviceBilling/stripeCheckout.py | 2 +- tests/test_phase123_basic.py | 11 +- 22 files changed, 366 insertions(+), 665 deletions(-) diff --git a/app.py b/app.py index 63a18f94..80a9505c 100644 --- a/app.py +++ b/app.py @@ -374,7 +374,7 @@ async def lifespan(app: FastAPI): if settingsCreated > 0: logger.info(f"Billing startup: Created {settingsCreated} missing mandate billing settings") - # Step 2: Ensure all users have billing accounts (for PREPAY_USER mandates) + # Step 2: Ensure all users have billing audit accounts accountsCreated = billingInterface.ensureAllUserAccountsExist() if accountsCreated > 0: logger.info(f"Billing startup: Created {accountsCreated} missing user accounts") @@ -500,7 +500,7 @@ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) async def _insufficientBalanceHandler(request: Request, exc: Exception): - """HTTP 402 with structured billing hint (PREPAY_USER vs PREPAY_MANDATE).""" + """HTTP 402 with structured billing hint.""" payload = exc.toClientDict() if hasattr(exc, "toClientDict") else {"error": "INSUFFICIENT_BALANCE", "message": str(exc)} return JSONResponse(status_code=402, content={"detail": payload}) diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py index a61faa59..a0bb4f88 100644 --- a/modules/datamodels/datamodelBilling.py +++ b/modules/datamodels/datamodelBilling.py @@ -11,22 +11,6 @@ from modules.shared.attributeUtils import registerModelLabels import uuid -class BillingModelEnum(str, Enum): - """Billing model types (prepaid only; legacy UNLIMITED in DB maps to PREPAY_MANDATE).""" - PREPAY_MANDATE = "PREPAY_MANDATE" # Prepaid budget shared by all users in mandate - PREPAY_USER = "PREPAY_USER" # Prepaid budget per user within mandate - - -# Nur fuer initRootMandateBilling (Root-Mandant PREPAY_USER + Startguthaben in Settings). -DEFAULT_USER_CREDIT_CHF = 5.0 - - -class AccountTypeEnum(str, Enum): - """Account type for billing accounts.""" - MANDATE = "MANDATE" # Account for entire mandate - USER = "USER" # Account for specific user within mandate - - class TransactionTypeEnum(str, Enum): """Transaction types for billing.""" CREDIT = "CREDIT" # Credit/top-up (positive) @@ -55,8 +39,7 @@ class BillingAccount(PowerOnModel): default_factory=lambda: str(uuid.uuid4()), description="Primary key" ) mandateId: str = Field(..., description="Foreign key to Mandate") - userId: Optional[str] = Field(None, description="Foreign key to User (only for PREPAY_USER)") - accountType: AccountTypeEnum = Field(..., description="Account type: MANDATE or USER") + userId: Optional[str] = Field(None, description="Foreign key to User (None = mandate pool account, set = user audit account)") balance: float = Field(default=0.0, description="Current balance in CHF") warningThreshold: float = Field(default=0.0, description="Warning threshold in CHF") lastWarningAt: Optional[datetime] = Field(None, description="Last warning sent timestamp") @@ -70,7 +53,6 @@ registerModelLabels( "id": {"en": "ID", "de": "ID"}, "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"}, "userId": {"en": "User ID", "de": "Benutzer-ID"}, - "accountType": {"en": "Account Type", "de": "Kontotyp"}, "balance": {"en": "Balance (CHF)", "de": "Guthaben (CHF)"}, "warningThreshold": {"en": "Warning Threshold (CHF)", "de": "Warnschwelle (CHF)"}, "lastWarningAt": {"en": "Last Warning", "de": "Letzte Warnung"}, @@ -130,27 +112,28 @@ registerModelLabels( class BillingSettings(BaseModel): - """Billing settings per mandate.""" + """Billing settings per mandate. Only PREPAY_MANDATE model.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" ) mandateId: str = Field(..., description="Foreign key to Mandate (UNIQUE)") - billingModel: BillingModelEnum = Field(..., description="Billing model") - - # Configuration - defaultUserCredit: float = Field( - default=0.0, - description="Automatic initial credit (CHF) for PREPAY_USER only when a user is newly added to the root mandate; other mandates use 0 on join.", - ) + warningThresholdPercent: float = Field(default=10.0, description="Warning threshold as percentage") # Stripe stripeCustomerId: Optional[str] = Field(None, description="Stripe Customer ID (cus_xxx) — one per mandate") - # Notifications (e.g. mandate owner / finance — also used when PREPAY_MANDATE pool is exhausted) + # Auto-Recharge for AI budget + autoRechargeEnabled: bool = Field(default=False, description="Auto-buy AI budget when low") + rechargeAmountCHF: float = Field(default=10.0, description="Amount per auto-recharge (CHF, prepaid via Stripe)") + rechargeMaxPerMonth: int = Field(default=3, description="Max auto-recharges per month") + rechargesThisMonth: int = Field(default=0, description="Counter: auto-recharges used this month") + monthResetAt: Optional[datetime] = Field(None, description="When rechargesThisMonth was last reset") + + # Notifications notifyEmails: List[str] = Field( default_factory=list, - description="Email addresses for billing alerts (mandate pool exhausted, warnings, etc.)", + description="Email addresses for billing alerts (pool exhausted, warnings, etc.)", ) notifyOnWarning: bool = Field(default=True, description="Send email when warning threshold is reached") @@ -161,16 +144,14 @@ registerModelLabels( { "id": {"en": "ID", "de": "ID"}, "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"}, - "billingModel": {"en": "Billing Model", "de": "Abrechnungsmodell"}, - "defaultUserCredit": { - "en": "Root start credit (CHF)", - "de": "Startguthaben nur Root-Mandant (CHF)", - }, "warningThresholdPercent": {"en": "Warning Threshold (%)", "de": "Warnschwelle (%)"}, "stripeCustomerId": {"en": "Stripe Customer ID", "de": "Stripe-Kunden-ID"}, + "autoRechargeEnabled": {"en": "Auto-Recharge", "de": "Auto-Nachladung"}, + "rechargeAmountCHF": {"en": "Recharge Amount (CHF)", "de": "Nachladebetrag (CHF)"}, + "rechargeMaxPerMonth": {"en": "Max Recharges/Month", "de": "Max. Nachladungen/Monat"}, "notifyEmails": { "en": "Billing notification emails (owner / admin)", - "de": "E-Mails für Billing-Alerts (Inhaber/Admin)", + "de": "E-Mails fuer Billing-Alerts (Inhaber/Admin)", }, "notifyOnWarning": {"en": "Notify on Warning", "de": "Bei Warnung benachrichtigen"}, }, @@ -239,7 +220,6 @@ class BillingBalanceResponse(BaseModel): """Response model for balance endpoint.""" mandateId: str mandateName: str - billingModel: BillingModelEnum balance: float currency: str = "CHF" warningThreshold: float @@ -270,20 +250,8 @@ class BillingCheckResult(BaseModel): reason: Optional[str] = None currentBalance: Optional[float] = None requiredAmount: Optional[float] = None - billingModel: Optional[BillingModelEnum] = None upgradeRequired: Optional[bool] = None subscriptionUiPath: Optional[str] = None userAction: Optional[str] = None -def parseBillingModelFromStoredValue(raw: Optional[str]) -> BillingModelEnum: - """Map DB string to enum. Legacy UNLIMITED / unknown values become PREPAY_MANDATE.""" - if raw is None or (isinstance(raw, str) and raw.strip() == ""): - return BillingModelEnum.PREPAY_MANDATE - s = str(raw).strip().upper() - if s == "UNLIMITED": - return BillingModelEnum.PREPAY_MANDATE - try: - return BillingModelEnum(raw) - except ValueError: - return BillingModelEnum.PREPAY_MANDATE diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index fa9f2c87..3b0e46b9 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -72,6 +72,7 @@ class SubscriptionPlan(BaseModel): maxFeatureInstances: Optional[int] = Field(None, description="Hard cap on active feature instances (None = unlimited)") trialDays: Optional[int] = Field(None, description="Trial duration in days (only for trial plans)") maxDataVolumeMB: Optional[int] = Field(None, description="Soft-limit for data volume in MB per mandate (None = unlimited)") + budgetAiCHF: float = Field(default=0.0, description="AI budget (CHF) included in subscription price per billing period") successorPlanKey: Optional[str] = Field(None, description="Plan to transition to when trial ends") @@ -87,6 +88,7 @@ registerModelLabels( "maxUsers": {"en": "Max Users", "de": "Max. Benutzer", "fr": "Max. utilisateurs"}, "maxFeatureInstances": {"en": "Max Instances", "de": "Max. Instanzen", "fr": "Max. instances"}, "maxDataVolumeMB": {"en": "Data Volume (MB)", "de": "Datenvolumen (MB)"}, + "budgetAiCHF": {"en": "AI Budget (CHF)", "de": "AI-Budget (CHF)"}, }, ) @@ -186,14 +188,15 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { maxUsers=None, maxFeatureInstances=None, maxDataVolumeMB=None, + budgetAiCHF=0.0, ), "TRIAL_7D": SubscriptionPlan( planKey="TRIAL_7D", selectableByUser=False, title={"en": "Free Trial (7 days)", "de": "Gratis-Testphase (7 Tage)", "fr": "Essai gratuit (7 jours)"}, description={ - "en": "Try the platform for 7 days — 1 user, up to 3 feature instances.", - "de": "Plattform 7 Tage testen — 1 User, bis zu 3 Feature-Instanzen.", + "en": "Try the platform for 7 days — 1 user, up to 3 feature instances, 5 CHF AI budget included.", + "de": "Plattform 7 Tage testen — 1 User, bis zu 3 Feature-Instanzen, 5 CHF AI-Budget inklusive.", }, billingPeriod=BillingPeriodEnum.NONE, autoRenew=False, @@ -201,6 +204,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { maxFeatureInstances=3, trialDays=7, maxDataVolumeMB=500, + budgetAiCHF=5.0, successorPlanKey="STANDARD_MONTHLY", ), "STANDARD_MONTHLY": SubscriptionPlan( @@ -208,26 +212,28 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { selectableByUser=True, title={"en": "Standard (Monthly)", "de": "Standard (Monatlich)", "fr": "Standard (Mensuel)"}, description={ - "en": "Usage-based billing per active user and feature instance, billed monthly.", - "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich.", + "en": "Usage-based billing per active user and feature instance, billed monthly. Includes 10 CHF AI budget.", + "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich. Inkl. 10 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.MONTHLY, pricePerUserCHF=90.0, pricePerFeatureInstanceCHF=150.0, maxDataVolumeMB=10240, + budgetAiCHF=10.0, ), "STANDARD_YEARLY": SubscriptionPlan( planKey="STANDARD_YEARLY", selectableByUser=True, title={"en": "Standard (Yearly)", "de": "Standard (Jährlich)", "fr": "Standard (Annuel)"}, description={ - "en": "Usage-based billing per active user and feature instance, billed yearly.", - "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich.", + "en": "Usage-based billing per active user and feature instance, billed yearly. Includes 120 CHF AI budget.", + "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich. Inkl. 120 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.YEARLY, pricePerUserCHF=1080.0, pricePerFeatureInstanceCHF=1800.0, maxDataVolumeMB=10240, + budgetAiCHF=120.0, ), } diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 5a057639..741ce3d5 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -60,12 +60,6 @@ class UserPermissions(BaseModel): ) -class MandateType(str, Enum): - SYSTEM = "system" - PERSONAL = "personal" - COMPANY = "company" - - class Mandate(PowerOnModel): """ Mandate (Mandant/Tenant) model. @@ -95,15 +89,6 @@ class Mandate(PowerOnModel): description="Whether this is a system mandate (e.g. root mandate). Cannot be deleted.", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False} ) - mandateType: MandateType = Field( - default=MandateType.COMPANY, - description="Fachlicher Mandantentyp: system (Root), personal (Solo), company (Team). Mutabel, rein informativ — keine Feature-Gates.", - json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ - {"value": "system", "label": {"en": "System", "de": "System"}}, - {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, - {"value": "company", "label": {"en": "Company", "de": "Unternehmen"}}, - ]} - ) deletedAt: Optional[float] = Field( default=None, description="Timestamp when the mandate was soft-deleted. After 30 days, hard-delete is triggered.", @@ -118,19 +103,6 @@ class Mandate(PowerOnModel): return False return v - @field_validator('mandateType', mode='before') - @classmethod - def _coerceMandateType(cls, v): - if v is None: - return MandateType.COMPANY - if isinstance(v, str): - try: - return MandateType(v) - except ValueError: - return MandateType.COMPANY - return v - - registerModelLabels( "Mandate", {"en": "Mandate", "de": "Mandant", "fr": "Mandat"}, @@ -140,7 +112,6 @@ registerModelLabels( "label": {"en": "Label", "de": "Label", "fr": "Libellé"}, "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"}, "isSystem": {"en": "System Mandate", "de": "System-Mandant", "fr": "Mandat système"}, - "mandateType": {"en": "Mandate Type", "de": "Mandantentyp", "fr": "Type de mandat"}, "deletedAt": {"en": "Deleted at", "de": "Gelöscht am", "fr": "Supprimé le"}, }, ) diff --git a/modules/features/chatbot/service.py b/modules/features/chatbot/service.py index 121ca29b..a98150b5 100644 --- a/modules/features/chatbot/service.py +++ b/modules/features/chatbot/service.py @@ -1222,23 +1222,21 @@ def _preflight_billing_check(services, mandateId: str, featureInstanceId: Option balanceCheck = billingService.checkBalance(0.01) if not balanceCheck.allowed: mid = str(getattr(services, "mandateId", None) or mandateId or "") - from modules.datamodels.datamodelBilling import BillingModelEnum from modules.serviceCenter.services.serviceBilling.billingExhaustedNotify import ( maybeEmailMandatePoolExhausted, ) - if balanceCheck.billingModel == BillingModelEnum.PREPAY_MANDATE: - u = getattr(services, "user", None) - ulabel = ( - (getattr(u, "email", None) or getattr(u, "username", None) or str(getattr(u, "id", ""))) - if u is not None else "" - ) - maybeEmailMandatePoolExhausted( - mid, - str(getattr(u, "id", "") if u is not None else ""), - ulabel, - float(balanceCheck.currentBalance or 0.0), - 0.01, - ) + u = getattr(services, "user", None) + ulabel = ( + (getattr(u, "email", None) or getattr(u, "username", None) or str(getattr(u, "id", ""))) + if u is not None else "" + ) + maybeEmailMandatePoolExhausted( + mid, + str(getattr(u, "id", "") if u is not None else ""), + ulabel, + float(balanceCheck.currentBalance or 0.0), + 0.01, + ) raise BillingService.InsufficientBalanceException.fromBalanceCheck( balanceCheck, mid, diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 7698181a..79295f35 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -87,6 +87,7 @@ class WorkspaceInputRequest(BaseModel): workflowId: Optional[str] = Field(default=None, description="Continue existing workflow") userLanguage: str = Field(default="en", description="User language code") allowedProviders: List[str] = Field(default_factory=list, description="Restrict AI to these providers") + requireNeutralization: Optional[bool] = Field(default=None, description="Per-request neutralization override") async def _getAiObjects() -> AiObjects: @@ -588,6 +589,7 @@ async def streamWorkspaceStart( userLanguage=userInput.userLanguage, instanceConfig=instanceConfig, allowedProviders=userInput.allowedProviders, + requireNeutralization=userInput.requireNeutralization, ) ) eventManager.register_agent_task(queueId, agentTask) @@ -643,6 +645,7 @@ async def _runWorkspaceAgent( userLanguage: str = "en", instanceConfig: Dict[str, Any] = None, allowedProviders: List[str] = None, + requireNeutralization: Optional[bool] = None, ): """Run the serviceAgent loop and forward events to the SSE queue.""" try: @@ -660,6 +663,8 @@ async def _runWorkspaceAgent( if allowedProviders: aiService.services.allowedProviders = allowedProviders + if requireNeutralization is not None: + ctx.requireNeutralization = requireNeutralization wfRecord = chatInterface.getWorkflow(workflowId) if workflowId else None wfName = "" @@ -887,6 +892,7 @@ async def listWorkspaceWorkflows( request: Request, instanceId: str = Path(...), includeArchived: bool = Query(default=False, description="Include archived workflows"), + search: str = Query(default="", description="Fulltext search in workflow titles and message content"), context: RequestContext = Depends(getRequestContext), ): """List workspace workflows/conversations for this instance.""" @@ -930,10 +936,54 @@ async def listWorkspaceWorkflows( item.setdefault("featureLabel", labels["featureLabel"]) item.setdefault("featureCode", labels["featureCode"]) item.setdefault("featureInstanceId", fiId) + + lastMsg = chatInterface.getLastMessageTimestamp(item.get("id")) + if lastMsg: + item["lastMessageAt"] = lastMsg + items.append(item) + + if search and search.strip(): + searchLower = search.strip().lower() + matchedIds = set() + for item in items: + if searchLower in (item.get("name") or "").lower() or searchLower in (item.get("label") or "").lower(): + matchedIds.add(item["id"]) + contentHits = chatInterface.searchWorkflowsByContent(searchLower, limit=50) + matchedIds.update(contentHits) + items = [i for i in items if i["id"] in matchedIds] + return JSONResponse({"workflows": items}) +class ResolveRagRequest(BaseModel): + """Request body for resolving a chat via RAG.""" + chatId: str = Field(..., description="Workflow/chat ID to resolve") + + +@router.post("/{instanceId}/resolve-rag") +@limiter.limit("60/minute") +async def resolveRag( + request: Request, + instanceId: str = Path(...), + body: ResolveRagRequest = Body(...), + context: RequestContext = Depends(getRequestContext), +): + """Build a RAG summary for a chat (workflow) to inject into the input area.""" + _validateInstanceAccess(instanceId, context) + chatInterface = _getChatInterface(context, featureInstanceId=instanceId) + messages = chatInterface.getMessages(body.chatId) or [] + + texts = [] + for msg in messages[:30]: + content = msg.get("message") if isinstance(msg, dict) else getattr(msg, "message", "") + if content: + texts.append(content[:500]) + + summary = "\n---\n".join(texts[:10]) if texts else "" + return JSONResponse({"summary": summary, "chatId": body.chatId, "messageCount": len(texts)}) + + class UpdateWorkflowRequest(BaseModel): """Request body for updating a workflow (PATCH).""" name: Optional[str] = Field(default=None, description="New workflow name") diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 98b70466..0c186475 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -418,8 +418,6 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]: if existingMandates: mandateId = existingMandates[0].get("id") logger.info(f"Root mandate already exists with ID {mandateId}") - # Ensure mandateType is set to system - db.recordModify(Mandate, mandateId, {"mandateType": "system"}) return mandateId # Check for legacy root mandates (name="Root" without isSystem flag) and migrate @@ -435,8 +433,6 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]: createdMandate = db.recordCreate(Mandate, rootMandate) mandateId = createdMandate.get("id") logger.info(f"Root mandate created with ID {mandateId}") - # mandateType already set via Mandate constructor, but ensure: - db.recordModify(Mandate, mandateId, {"mandateType": "system"}) return mandateId @@ -2116,71 +2112,43 @@ def _createStoreResourceRules(db: DatabaseConnector) -> None: def initRootMandateBilling(mandateId: str) -> None: """ - Initialize billing settings for root mandate. - Root mandate uses PREPAY_USER model with default initial credit per user in settings (DEFAULT_USER_CREDIT_CHF at bootstrap only). - Creates billing accounts for ALL users regardless of billing model (for audit trail). - - Args: - mandateId: Root mandate ID + Initialize billing settings for root mandate (PREPAY_MANDATE). + Creates mandate pool account and user audit accounts. """ try: from modules.interfaces.interfaceDbBilling import _getRootInterface from modules.interfaces.interfaceDbApp import getRootInterface as getAppRootInterface - from modules.datamodels.datamodelBilling import ( - BillingSettings, - BillingModelEnum, - DEFAULT_USER_CREDIT_CHF, - parseBillingModelFromStoredValue, - ) + from modules.datamodels.datamodelBilling import BillingSettings billingInterface = _getRootInterface() appInterface = getAppRootInterface() - # Check if settings already exist existingSettings = billingInterface.getSettings(mandateId) if existingSettings: logger.info("Billing settings for root mandate already exist") else: settings = BillingSettings( mandateId=mandateId, - billingModel=BillingModelEnum.PREPAY_USER, - defaultUserCredit=DEFAULT_USER_CREDIT_CHF, warningThresholdPercent=10.0, notifyOnWarning=True ) - billingInterface.createSettings(settings) - logger.info( - f"Created billing settings for root mandate: PREPAY_USER with {DEFAULT_USER_CREDIT_CHF} CHF default credit" - ) + logger.info("Created billing settings for root mandate: PREPAY_MANDATE") existingSettings = billingInterface.getSettings(mandateId) - # Always create user accounts for all users (audit trail) if existingSettings: - billingModel = parseBillingModelFromStoredValue( - existingSettings.get("billingModel") - ).value - - # Initial balance depends on billing model - if billingModel == BillingModelEnum.PREPAY_USER.value: - initialBalance = float(existingSettings.get("defaultUserCredit", 0.0)) - else: - initialBalance = 0.0 # PREPAY_MANDATE: budget on pool account - + billingInterface.getOrCreateMandateAccount(mandateId, initialBalance=0.0) userMandates = appInterface.getUserMandatesByMandate(mandateId) accountsCreated = 0 - for um in userMandates: userId = um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None) if userId: existingAccount = billingInterface.getUserAccount(mandateId, userId) if not existingAccount: - billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=initialBalance) + billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=0.0) accountsCreated += 1 - logger.debug(f"Created billing account for user {userId}") - if accountsCreated > 0: - logger.info(f"Created {accountsCreated} billing accounts for root mandate users with {initialBalance} CHF each") + logger.info(f"Created {accountsCreated} billing audit accounts for root mandate users") except Exception as e: logger.warning(f"Failed to initialize root mandate billing (non-critical): {e}") diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index ee1dc379..13179634 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1407,12 +1407,11 @@ class AppObjects: return Mandate(**createdRecord) - def _provisionMandateForUser(self, userId: str, mandateType: str, mandateName: str, planKey: str) -> Dict[str, Any]: + def _provisionMandateForUser(self, userId: str, mandateName: str, planKey: str) -> Dict[str, Any]: """ Atomic provisioning: create Mandate + UserMandate + Subscription + auto-create FeatureInstances. Internal method — bypasses RBAC (used during registration when user has no permissions yet). """ - from modules.datamodels.datamodelUam import MandateType from modules.datamodels.datamodelSubscription import MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS from modules.datamodels.datamodelFeatures import FeatureInstance from modules.interfaces.interfaceBootstrap import copySystemRolesToMandate @@ -1428,7 +1427,6 @@ class AppObjects: label=mandateName, enabled=True, isSystem=False, - mandateType=MandateType(mandateType), ) createdMandate = self.db.recordCreate(Mandate, mandateData) if not createdMandate or not createdMandate.get("id"): @@ -1497,11 +1495,10 @@ class AppObjects: except Exception as e: logger.error(f"Error auto-creating instance for '{featureName}': {e}") - logger.info(f"Provisioned mandate {mandateId} (type={mandateType}, plan={planKey}) for user {userId}, instances={createdInstances}") + logger.info(f"Provisioned mandate {mandateId} (plan={planKey}) for user {userId}, instances={createdInstances}") return { "mandateId": mandateId, "planKey": planKey, - "mandateType": mandateType, "featureInstances": createdInstances, } except Exception as e: @@ -1632,7 +1629,10 @@ class AppObjects: from modules.datamodels.datamodelChat import ChatWorkflow, ChatMessage, ChatLog from modules.datamodels.datamodelFiles import FileItem from modules.datamodels.datamodelDataSource import DataSource - from modules.datamodels.datamodelKnowledge import FileContentIndex + from modules.datamodels.datamodelKnowledge import FileContentIndex, ContentChunk + from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource + from modules.datamodels.datamodelBilling import BillingSettings, BillingAccount, BillingTransaction + from modules.datamodels.datamodelRbac import FeatureAccessRole, UserMandateRole from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes instances = self.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) @@ -1643,12 +1643,15 @@ class AppObjects: if not instId: continue - # 0a. FileContentIndex (knowledge/RAG) + # 0a. ContentChunk (embeddings) + FileContentIndex (knowledge/RAG) fciRecords = self.db.getRecordset(FileContentIndex, recordFilter={"featureInstanceId": instId}) for rec in fciRecords: + chunks = self.db.getRecordset(ContentChunk, recordFilter={"fileContentIndexId": rec.get("id")}) + for chunk in chunks: + self.db.recordDelete(ContentChunk, chunk.get("id")) self.db.recordDelete(FileContentIndex, rec.get("id")) if fciRecords: - logger.info(f"Cascade: deleted {len(fciRecords)} FileContentIndex records for instance {instId}") + logger.info(f"Cascade: deleted {len(fciRecords)} FileContentIndex records (with chunks) for instance {instId}") # 0b. DataNeutralizerAttributes dnaRecords = self.db.getRecordset(DataNeutralizerAttributes, recordFilter={"featureInstanceId": instId}) @@ -1664,6 +1667,13 @@ class AppObjects: if dsRecords: logger.info(f"Cascade: deleted {len(dsRecords)} DataSource records for instance {instId}") + # 0c2. FeatureDataSource + fdsRecords = self.db.getRecordset(FeatureDataSource, recordFilter={"featureInstanceId": instId}) + for rec in fdsRecords: + self.db.recordDelete(FeatureDataSource, rec.get("id")) + if fdsRecords: + logger.info(f"Cascade: deleted {len(fdsRecords)} FeatureDataSource records for instance {instId}") + # 0d. FileItem fileRecords = self.db.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) for rec in fileRecords: @@ -1687,11 +1697,14 @@ class AppObjects: if workflows: logger.info(f"Cascade: deleted {len(workflows)} ChatWorkflows (with messages/logs) for instance {instId}") - # 1. Delete FeatureAccess + FeatureAccessRole for all instances in this mandate + # 1. Delete FeatureAccess + FeatureAccessRole for all instances for inst in instances: instId = inst.get("id") accesses = self.db.getRecordset(FeatureAccess, recordFilter={"featureInstanceId": instId}) for access in accesses: + roles = self.db.getRecordset(FeatureAccessRole, recordFilter={"featureAccessId": access.get("id")}) + for role in roles: + self.db.recordDelete(FeatureAccessRole, role.get("id")) self.db.recordDelete(FeatureAccess, access.get("id")) self.db.recordDelete(FeatureInstance, instId) logger.info(f"Cascade: deleted {len(instances)} FeatureInstances for mandate {mandateId}") @@ -1699,6 +1712,9 @@ class AppObjects: # 2. Delete UserMandate + UserMandateRole memberships = self.db.getRecordset(UserMandate, recordFilter={"mandateId": mandateId}) for um in memberships: + umRoles = self.db.getRecordset(UserMandateRole, recordFilter={"userMandateId": um.get("id")}) + for umr in umRoles: + self.db.recordDelete(UserMandateRole, umr.get("id")) self.db.recordDelete(UserMandate, um.get("id")) logger.info(f"Cascade: deleted {len(memberships)} UserMandates for mandate {mandateId}") @@ -1718,6 +1734,20 @@ class AppObjects: self.db.recordDelete(MandateSubscription, subId) logger.info(f"Cascade: deleted {len(subs)} subscriptions for mandate {mandateId}") + # 3b. Delete Billing data + billingTxs = self.db.getRecordset(BillingTransaction, recordFilter={"mandateId": mandateId}) if hasattr(BillingTransaction, '__table_name__') else [] + billingAccounts = self.db.getRecordset(BillingAccount, recordFilter={"mandateId": mandateId}) + for acc in billingAccounts: + accTxs = self.db.getRecordset(BillingTransaction, recordFilter={"accountId": acc.get("id")}) + for tx in accTxs: + self.db.recordDelete(BillingTransaction, tx.get("id")) + self.db.recordDelete(BillingAccount, acc.get("id")) + billingSettings = self.db.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) + for bs in billingSettings: + self.db.recordDelete(BillingSettings, bs.get("id")) + if billingAccounts or billingSettings: + logger.info(f"Cascade: deleted billing data for mandate {mandateId}") + # 4. Delete mandate-level Roles from modules.datamodels.datamodelRbac import Role, AccessRule roles = self.db.getRecordset(Role, recordFilter={"mandateId": mandateId}) @@ -1821,7 +1851,7 @@ class AppObjects: def createUserMandate(self, userId: str, mandateId: str, roleIds: List[str] = None) -> UserMandate: """ Create a UserMandate record (add user to mandate). - Also creates a billing account for the user if billing is configured for PREPAY_USER. + Also creates a billing audit account for the user if billing is configured. INVARIANT: A UserMandate MUST have at least one UserMandateRole. @@ -1871,43 +1901,20 @@ class AppObjects: def _ensureUserBillingAccount(self, userId: str, mandateId: str) -> None: """ - Ensure a user has a billing account for the mandate if billing is configured. - User accounts are always created for all billing models (for audit trail). - Initial balance depends on billing model: - - PREPAY_USER: defaultUserCredit from mandate BillingSettings when joining the root mandate (missing key => 0.0); - other mandates get 0.0. - - PREPAY_MANDATE: 0.0 on the user account (shared pool — no per-user start credit) - - Args: - userId: User ID - mandateId: Mandate ID + Ensure a user has a billing audit account for the mandate. + Balance is always on the mandate pool (PREPAY_MANDATE). User accounts are for audit trail only. """ try: from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface - from modules.datamodels.datamodelBilling import BillingModelEnum, parseBillingModelFromStoredValue billingInterface = getBillingRootInterface() settings = billingInterface.getSettings(mandateId) if not settings: - return # No billing configured for this mandate + return - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - # Initial balance depends on billing model (start credit only on root mandate for PREPAY_USER) - rootMandateId = self._getRootMandateId() - isRootMandate = rootMandateId is not None and str(mandateId) == str(rootMandateId) - if billingModel == BillingModelEnum.PREPAY_USER: - initialBalance = ( - float(settings.get("defaultUserCredit", 0.0)) - if isRootMandate - else 0.0 - ) - else: - initialBalance = 0.0 # PREPAY_MANDATE: budget is on pool - - billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=initialBalance) - logger.info(f"Ensured billing account for user {userId} in mandate {mandateId} (model={billingModel.value}, initial={initialBalance} CHF)") + billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=0.0) + logger.info(f"Ensured billing audit account for user {userId} in mandate {mandateId}") except Exception as e: logger.warning(f"Failed to create billing account for user {userId} (non-critical): {e}") diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index 343e2215..c8c13d13 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -24,14 +24,11 @@ from modules.datamodels.datamodelBilling import ( BillingSettings, StripeWebhookEvent, UsageStatistics, - BillingModelEnum, - AccountTypeEnum, TransactionTypeEnum, ReferenceTypeEnum, PeriodTypeEnum, BillingBalanceResponse, BillingCheckResult, - parseBillingModelFromStoredValue, ) logger = logging.getLogger(__name__) @@ -160,8 +157,6 @@ class BillingObjects: """ Get billing settings for a mandate. - Normalizes billingModel for API (legacy UNLIMITED → PREPAY_MANDATE) and persists once. - Args: mandateId: Mandate ID @@ -175,27 +170,7 @@ class BillingObjects: ) if not results: return None - row = dict(results[0]) - raw_bm = row.get("billingModel") - parsed = parseBillingModelFromStoredValue(raw_bm) - if str(raw_bm or "").strip().upper() == "UNLIMITED": - try: - self.updateSettings( - row["id"], - {"billingModel": BillingModelEnum.PREPAY_MANDATE.value}, - ) - logger.info( - "Migrated billing settings for mandate %s: UNLIMITED → PREPAY_MANDATE", - mandateId, - ) - except Exception as mig_err: - logger.warning( - "Could not persist billing model migration for mandate %s: %s", - mandateId, - mig_err, - ) - row["billingModel"] = parsed.value - return row + return dict(results[0]) except Exception as e: logger.error(f"Error getting billing settings: {e}") return None @@ -226,13 +201,12 @@ class BillingObjects: """ return self.db.recordModify(BillingSettings, settingsId, updates) - def getOrCreateSettings(self, mandateId: str, defaultModel: BillingModelEnum = BillingModelEnum.PREPAY_MANDATE) -> Dict[str, Any]: + def getOrCreateSettings(self, mandateId: str) -> Dict[str, Any]: """ Get or create billing settings for a mandate. Args: mandateId: Mandate ID - defaultModel: Default billing model if creating Returns: BillingSettings dict @@ -243,8 +217,6 @@ class BillingObjects: settings = BillingSettings( mandateId=mandateId, - billingModel=defaultModel, - defaultUserCredit=0.0, warningThresholdPercent=10.0, notifyOnWarning=True, ) @@ -281,7 +253,7 @@ class BillingObjects: BillingAccount, recordFilter={ "mandateId": mandateId, - "accountType": AccountTypeEnum.MANDATE.value + "userId": None } ) return results[0] if results else None @@ -305,8 +277,7 @@ class BillingObjects: BillingAccount, recordFilter={ "mandateId": mandateId, - "userId": userId, - "accountType": AccountTypeEnum.USER.value + "userId": userId } ) return results[0] if results else None @@ -376,7 +347,6 @@ class BillingObjects: account = BillingAccount( mandateId=mandateId, - accountType=AccountTypeEnum.MANDATE, balance=initialBalance, enabled=True ) @@ -401,7 +371,6 @@ class BillingObjects: account = BillingAccount( mandateId=mandateId, userId=userId, - accountType=AccountTypeEnum.USER, balance=initialBalance, enabled=True ) @@ -422,7 +391,7 @@ class BillingObjects: def ensureAllMandateSettingsExist(self) -> int: """ Efficiently ensure all mandates have billing settings. - Creates default settings (PREPAY_MANDATE, 0 CHF) for mandates without settings. + Creates default settings (0 CHF) for mandates without settings. Uses bulk queries to minimize database connections. Returns: @@ -451,16 +420,13 @@ class BillingObjects: if not mandateId or mandateId in existingMandateIds: continue - # Create default billing settings settings = BillingSettings( mandateId=mandateId, - billingModel=BillingModelEnum.PREPAY_MANDATE, - defaultUserCredit=0.0, warningThresholdPercent=10.0, notifyOnWarning=True, ) self.createSettings(settings) - existingMandateIds.add(mandateId) # Track newly created + existingMandateIds.add(mandateId) settingsCreated += 1 if settingsCreated > 0: @@ -475,11 +441,7 @@ class BillingObjects: def ensureAllUserAccountsExist(self) -> int: """ Ensure all users across all mandates have billing accounts. - User accounts are always created regardless of billing model (for audit trail). - Initial balance depends on billing model: - - PREPAY_USER: defaultUserCredit from settings only for the root mandate; other mandates get 0.0 - - PREPAY_MANDATE: 0.0 (budget is on pool) - + User accounts are always created for audit trail with initial balance 0.0. Uses bulk queries to minimize database connections. Returns: @@ -488,44 +450,29 @@ class BillingObjects: try: accountsCreated = 0 appDb = _getAppDatabaseConnector() - rootMandateId = _getCachedRootMandateId() - # Step 1: Get all billing settings (all mandates with settings get user accounts) allSettings = self.db.getRecordset(BillingSettings) - billingMandates = {} # mandateId -> (billingModel, defaultCredit) - for s in allSettings: - billingModel = parseBillingModelFromStoredValue(s.get("billingModel")).value - mid = s.get("mandateId") - isRoot = rootMandateId is not None and str(mid) == str(rootMandateId) - if billingModel == BillingModelEnum.PREPAY_USER.value: - defaultCredit = ( - float(s.get("defaultUserCredit", 0.0) or 0.0) if isRoot else 0.0 - ) - else: - defaultCredit = 0.0 - billingMandates[mid] = (billingModel, defaultCredit) + billingMandateIds = set( + s.get("mandateId") for s in allSettings if s.get("mandateId") + ) - if not billingMandates: + if not billingMandateIds: logger.debug("No billable mandates found, skipping account check") return 0 - # Step 2: Get all existing USER accounts in one query - allAccounts = self.db.getRecordset( - BillingAccount, - recordFilter={"accountType": AccountTypeEnum.USER.value} - ) + allAccounts = self.db.getRecordset(BillingAccount) existingAccountKeys = set() for acc in allAccounts: + if not acc.get("userId"): + continue key = (acc.get("mandateId"), acc.get("userId")) existingAccountKeys.add(key) - # Step 3: Get all user-mandate combinations from APP database allUserMandates = appDb.getRecordset( UserMandate, recordFilter={"enabled": True} ) - # Step 4: Create missing accounts for um in allUserMandates: mandateId = um.get("mandateId") userId = um.get("userId") @@ -533,32 +480,20 @@ class BillingObjects: if not mandateId or not userId: continue - if mandateId not in billingMandates: + if mandateId not in billingMandateIds: continue key = (mandateId, userId) if key in existingAccountKeys: continue - billingModel, defaultCredit = billingMandates[mandateId] - account = BillingAccount( mandateId=mandateId, userId=userId, - accountType=AccountTypeEnum.USER, - balance=defaultCredit, + balance=0.0, enabled=True ) - created = self.createAccount(account) - - if defaultCredit > 0: - self.createTransaction(BillingTransaction( - accountId=created["id"], - transactionType=TransactionTypeEnum.CREDIT, - amount=defaultCredit, - description="Initial credit for new user", - referenceType=ReferenceTypeEnum.SYSTEM - )) + self.createAccount(account) existingAccountKeys.add(key) accountsCreated += 1 @@ -810,35 +745,14 @@ class BillingObjects: """ Check if there's sufficient balance for an operation. - - PREPAY_USER: user.balance >= estimatedCost - - PREPAY_MANDATE: mandate pool balance >= estimatedCost - - User accounts are always ensured to exist (for audit trail). - Root mandate + PREPAY_USER: initial credit from settings.defaultUserCredit on first create. - Missing settings: treated as PREPAY_MANDATE with empty pool (strict). + Checks mandate pool balance against estimatedCost. + User accounts are ensured to exist for audit trail. + Missing settings: treated as PREPAY_MANDATE with empty pool. """ - settings = self.getSettings(mandateId) - if not settings: - billingModel = BillingModelEnum.PREPAY_MANDATE - defaultCredit = 0.0 - else: - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - defaultCredit = float(settings.get("defaultUserCredit", 0.0) or 0.0) + self.getOrCreateUserAccount(mandateId, userId, initialBalance=0.0) - rootMandateId = _getCachedRootMandateId() - isRootMandate = rootMandateId is not None and str(mandateId) == str(rootMandateId) - if billingModel == BillingModelEnum.PREPAY_USER: - initialBalance = defaultCredit if isRootMandate else 0.0 - else: - initialBalance = 0.0 - self.getOrCreateUserAccount(mandateId, userId, initialBalance=initialBalance) - - if billingModel == BillingModelEnum.PREPAY_USER: - account = self.getUserAccount(mandateId, userId) - currentBalance = account.get("balance", 0.0) if account else 0.0 - else: - poolAccount = self.getOrCreateMandateAccount(mandateId) - currentBalance = poolAccount.get("balance", 0.0) + poolAccount = self.getOrCreateMandateAccount(mandateId) + currentBalance = poolAccount.get("balance", 0.0) if currentBalance < estimatedCost: return BillingCheckResult( @@ -846,10 +760,9 @@ class BillingObjects: reason="INSUFFICIENT_BALANCE", currentBalance=currentBalance, requiredAmount=estimatedCost, - billingModel=billingModel, ) - return BillingCheckResult(allowed=True, currentBalance=currentBalance, billingModel=billingModel) + return BillingCheckResult(allowed=True, currentBalance=currentBalance) def recordUsage( self, @@ -870,10 +783,8 @@ class BillingObjects: """ Record usage cost as a billing transaction. - Transaction is ALWAYS recorded on the user's account (clean audit trail). - Balance is deducted from the appropriate account based on billing model: - - PREPAY_USER: deduct from user's own balance - - PREPAY_MANDATE: deduct from mandate pool balance + Transaction is recorded on the user's account (audit trail). + Balance is always deducted from the mandate pool account (PREPAY_MANDATE). """ if priceCHF <= 0: return None @@ -883,9 +794,6 @@ class BillingObjects: logger.debug(f"No billing settings for mandate {mandateId}, skipping usage recording") return None - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - # Transaction is ALWAYS on the user's account (audit trail) userAccount = self.getOrCreateUserAccount(mandateId, userId) transaction = BillingTransaction( @@ -906,13 +814,8 @@ class BillingObjects: errorCount=errorCount ) - # Determine where to deduct balance - if billingModel == BillingModelEnum.PREPAY_USER: - return self.createTransaction(transaction) - if billingModel == BillingModelEnum.PREPAY_MANDATE: - poolAccount = self.getOrCreateMandateAccount(mandateId) - return self.createTransaction(transaction, balanceAccountId=poolAccount["id"]) - return None + poolAccount = self.getOrCreateMandateAccount(mandateId) + return self.createTransaction(transaction, balanceAccountId=poolAccount["id"]) # ========================================================================= # Workflow Cost Query @@ -928,112 +831,6 @@ class BillingObjects: ) return sum(t.get("amount", 0.0) for t in transactions) - # ========================================================================= - # Billing Model Switch Operations - # ========================================================================= - - def switchBillingModel(self, mandateId: str, oldModel: BillingModelEnum, newModel: BillingModelEnum) -> Dict[str, Any]: - """ - Switch billing model with budget migration logged as BillingTransactions. - - PREPAY_MANDATE -> PREPAY_USER: pool debited, equal shares credited to user accounts. - PREPAY_USER -> PREPAY_MANDATE: user wallets debited, pool credited with sum. - """ - result = {"oldModel": oldModel.value, "newModel": newModel.value, "migratedAmount": 0.0, "userCount": 0} - - if oldModel == newModel: - return result - - if oldModel == BillingModelEnum.PREPAY_MANDATE and newModel == BillingModelEnum.PREPAY_USER: - poolAccount = self.getMandateAccount(mandateId) - userAccounts = self.db.getRecordset( - BillingAccount, - recordFilter={"mandateId": mandateId, "accountType": AccountTypeEnum.USER.value} - ) - poolBalance = poolAccount.get("balance", 0.0) if poolAccount else 0.0 - n = len(userAccounts) - if poolAccount and poolBalance > 0: - self.createTransaction( - BillingTransaction( - accountId=poolAccount["id"], - transactionType=TransactionTypeEnum.DEBIT, - amount=poolBalance, - description="Model switch: distributed from mandate pool to user wallets", - referenceType=ReferenceTypeEnum.SYSTEM, - ) - ) - result["migratedAmount"] = poolBalance - if n > 0: - remaining = poolBalance - for i, acc in enumerate(userAccounts): - if i == n - 1: - share = round(remaining, 4) - else: - share = round(poolBalance / n, 4) - remaining -= share - if share > 0: - self.createTransaction( - BillingTransaction( - accountId=acc["id"], - transactionType=TransactionTypeEnum.CREDIT, - amount=share, - description="Model switch: share from mandate pool", - referenceType=ReferenceTypeEnum.SYSTEM, - ) - ) - result["userCount"] = n - logger.info( - "Switched %s MANDATE->USER: migrated %.4f CHF to %d user account(s) (transactions logged)", - mandateId, - result["migratedAmount"], - result["userCount"], - ) - return result - - if oldModel == BillingModelEnum.PREPAY_USER and newModel == BillingModelEnum.PREPAY_MANDATE: - userAccounts = self.db.getRecordset( - BillingAccount, - recordFilter={"mandateId": mandateId, "accountType": AccountTypeEnum.USER.value} - ) - totalUserBalance = sum(acc.get("balance", 0.0) for acc in userAccounts) - for acc in userAccounts: - b = acc.get("balance", 0.0) - if b > 0: - self.createTransaction( - BillingTransaction( - accountId=acc["id"], - transactionType=TransactionTypeEnum.DEBIT, - amount=b, - description="Model switch: consolidated to mandate pool", - referenceType=ReferenceTypeEnum.SYSTEM, - ) - ) - poolAccount = self.getOrCreateMandateAccount(mandateId, initialBalance=0.0) - if totalUserBalance > 0: - self.createTransaction( - BillingTransaction( - accountId=poolAccount["id"], - transactionType=TransactionTypeEnum.CREDIT, - amount=totalUserBalance, - description="Model switch: consolidated from user accounts", - referenceType=ReferenceTypeEnum.SYSTEM, - ) - ) - result["migratedAmount"] = totalUserBalance - result["userCount"] = len(userAccounts) - logger.info( - "Switched %s USER->MANDATE: consolidated %.4f CHF from %d users into pool (transactions logged)", - mandateId, - totalUserBalance, - len(userAccounts), - ) - return result - - if newModel == BillingModelEnum.PREPAY_MANDATE: - self.getOrCreateMandateAccount(mandateId, initialBalance=0.0) - - return result - # ========================================================================= # Statistics Operations # ========================================================================= @@ -1128,10 +925,8 @@ class BillingObjects: def getBalancesForUser(self, userId: str) -> List[BillingBalanceResponse]: """ Get all billing balances for a user across mandates. + Shows the mandate pool balance (shared budget visible to user). - Shows the effective available budget: - - PREPAY_USER: user's own account balance - - PREPAY_MANDATE: mandate pool balance (shared budget visible to user) Args: userId: User ID @@ -1163,27 +958,15 @@ class BillingObjects: if not settings: continue - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - if billingModel == BillingModelEnum.PREPAY_USER: - account = self.getOrCreateUserAccount(mandateId, userId) - if not account: - continue - balance = account.get("balance", 0.0) - warningThreshold = account.get("warningThreshold", 0.0) - elif billingModel == BillingModelEnum.PREPAY_MANDATE: - poolAccount = self.getOrCreateMandateAccount(mandateId) - if not poolAccount: - continue - balance = poolAccount.get("balance", 0.0) - warningThreshold = poolAccount.get("warningThreshold", 0.0) - else: + poolAccount = self.getOrCreateMandateAccount(mandateId) + if not poolAccount: continue + balance = poolAccount.get("balance", 0.0) + warningThreshold = poolAccount.get("warningThreshold", 0.0) balances.append(BillingBalanceResponse( mandateId=mandateId, mandateName=mandateName, - billingModel=billingModel, balance=balance, warningThreshold=warningThreshold, isWarning=balance <= warningThreshold, @@ -1280,36 +1063,25 @@ class BillingObjects: if not mandateId: continue - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - # Get mandate info mandate = appInterface.getMandate(mandateId) mandateName = "" if mandate: mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") - # Get user accounts count (always exist now for audit trail) - userAccounts = self.db.getRecordset( + allMandateAccounts = self.db.getRecordset( BillingAccount, - recordFilter={"mandateId": mandateId, "accountType": AccountTypeEnum.USER.value} + recordFilter={"mandateId": mandateId} ) - userCount = len(userAccounts) + userCount = sum(1 for acc in allMandateAccounts if acc.get("userId")) - if billingModel == BillingModelEnum.PREPAY_USER: - totalBalance = sum(acc.get("balance", 0.0) for acc in userAccounts) - elif billingModel == BillingModelEnum.PREPAY_MANDATE: - poolAccount = self.getMandateAccount(mandateId) - totalBalance = poolAccount.get("balance", 0.0) if poolAccount else 0.0 - else: - totalBalance = 0.0 + poolAccount = self.getMandateAccount(mandateId) + totalBalance = poolAccount.get("balance", 0.0) if poolAccount else 0.0 balances.append({ "mandateId": mandateId, "mandateName": mandateName, - "billingModel": billingModel.value, "totalBalance": totalBalance, "userCount": userCount, - "defaultUserCredit": float(settings.get("defaultUserCredit", 0.0) or 0.0), "warningThresholdPercent": settings.get("warningThresholdPercent", 10.0), }) @@ -1385,9 +1157,8 @@ class BillingObjects: try: appInterface = getAppInterface(self.currentUser) - # Get all user accounts - accountFilter = {"accountType": AccountTypeEnum.USER.value} - allAccounts = self.db.getRecordset(BillingAccount, recordFilter=accountFilter) + allAccounts = self.db.getRecordset(BillingAccount) + allAccounts = [acc for acc in allAccounts if acc.get("userId")] # Filter by mandate if specified if mandateIds: diff --git a/modules/interfaces/interfaceDbChat.py b/modules/interfaces/interfaceDbChat.py index 192cbad4..60f4db44 100644 --- a/modules/interfaces/interfaceDbChat.py +++ b/modules/interfaces/interfaceDbChat.py @@ -651,6 +651,32 @@ class ChatObjects: totalPages=totalPages ) + def getLastMessageTimestamp(self, workflowId: str) -> Optional[str]: + """Return the latest publishedAt/sysCreatedAt from ChatMessage for a workflow.""" + messages = self._getRecordset(ChatMessage, recordFilter={"workflowId": workflowId}) + if not messages: + return None + latest = None + for msg in messages: + ts = msg.get("publishedAt") or msg.get("sysCreatedAt") + if ts and (latest is None or str(ts) > str(latest)): + latest = ts + return str(latest) if latest else None + + def searchWorkflowsByContent(self, query: str, limit: int = 50) -> List[str]: + """Return workflow IDs whose messages contain the query string (case-insensitive).""" + allMessages = self._getRecordset(ChatMessage) + matchedIds: set = set() + for msg in allMessages: + content = msg.get("message") or "" + if query in content.lower(): + wfId = msg.get("workflowId") + if wfId: + matchedIds.add(wfId) + if len(matchedIds) >= limit: + break + return list(matchedIds) + def getWorkflow(self, workflowId: str) -> Optional[ChatWorkflow]: """Returns a workflow by ID if user has access.""" # Use RBAC filtering with featureInstanceId for instance-level isolation diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py index f08025ea..2405ec73 100644 --- a/modules/interfaces/interfaceDbSubscription.py +++ b/modules/interfaces/interfaceDbSubscription.py @@ -293,9 +293,43 @@ class SubscriptionObjects: if current + delta > cap: from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionCapacityException raise SubscriptionCapacityException(resourceType=resourceType, currentCount=current, maxAllowed=cap) + elif resourceType == "dataVolumeMB": + cap = plan.maxDataVolumeMB + if cap is None: + return True + currentMB = self._getMandateDataVolumeMB(mandateId) + if currentMB + delta > cap: + from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionCapacityException + raise SubscriptionCapacityException(resourceType=resourceType, currentCount=int(currentMB), maxAllowed=cap) return True + def _getMandateDataVolumeMB(self, mandateId: str) -> float: + """Sum RAG index size (FileContentIndex.totalSize) across all feature instances of the mandate.""" + try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + knowledgeDb = _getAppDatabaseConnector() + indexes = knowledgeDb.getRecordset(FileContentIndex, recordFilter={"mandateId": mandateId}) + totalBytes = sum(int(idx.get("totalSize") or 0) for idx in indexes) + return totalBytes / (1024 * 1024) + except Exception: + return 0.0 + + def getDataVolumeWarning(self, mandateId: str) -> Optional[Dict[str, Any]]: + """Return a warning dict if mandate uses >=80% of maxDataVolumeMB, else None.""" + sub = self.getOperativeForMandate(mandateId) + if not sub: + return None + plan = self.getPlan(sub.get("planKey", "")) + if not plan or not plan.maxDataVolumeMB: + return None + usedMB = self._getMandateDataVolumeMB(mandateId) + limitMB = plan.maxDataVolumeMB + percent = (usedMB / limitMB * 100) if limitMB > 0 else 0 + if percent >= 80: + return {"usedMB": round(usedMB, 2), "limitMB": limitMB, "percent": round(percent, 1), "warning": True} + return {"usedMB": round(usedMB, 2), "limitMB": limitMB, "percent": round(percent, 1), "warning": False} + # ========================================================================= # Counting (cross-DB queries against poweron_app) # ========================================================================= diff --git a/modules/migration/migrateRootUsers.py b/modules/migration/migrateRootUsers.py index 69d1b7af..11424987 100644 --- a/modules/migration/migrateRootUsers.py +++ b/modules/migration/migrateRootUsers.py @@ -241,8 +241,7 @@ def migrateRootUsers(db, dryRun: bool = False) -> dict: try: result = rootInterface._provisionMandateForUser( userId=userId, - mandateType="personal", - mandateName=user.get("fullName") or username, + mandateName=f"Home {username}", planKey="TRIAL_7D", ) targetMandateId = result["mandateId"] diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 88ec0cc6..4062163e 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -30,7 +30,6 @@ from modules.datamodels.datamodelBilling import ( BillingAccount, BillingTransaction, BillingSettings, - BillingModelEnum, TransactionTypeEnum, ReferenceTypeEnum, PeriodTypeEnum, @@ -38,7 +37,6 @@ from modules.datamodels.datamodelBilling import ( BillingStatisticsResponse, BillingStatisticsChartData, BillingCheckResult, - parseBillingModelFromStoredValue, ) # Configure logger @@ -229,14 +227,14 @@ def _filterTransactionsByScope(transactions: list, scope: BillingDataScope) -> l class CreditAddRequest(BaseModel): """Request model for adding or deducting credit from an account.""" - userId: Optional[str] = Field(None, description="Target user ID (for PREPAY_USER model)") + userId: Optional[str] = Field(None, description="Target user ID for audit trail only (optional)") amount: float = Field(..., description="Amount in CHF. Positive = credit, negative = deduction. Must not be zero.") description: str = Field(default="Manual credit", description="Transaction description") class CheckoutCreateRequest(BaseModel): """Request model for creating Stripe Checkout Session.""" - userId: Optional[str] = Field(None, description="Target user ID (for PREPAY_USER model)") + userId: Optional[str] = Field(None, description="Target user ID for audit trail only (optional)") amount: float = Field(..., gt=0, description="Amount to pay in CHF (must be in allowed presets)") returnUrl: str = Field(..., min_length=1, description="Absolute frontend URL used for Stripe success/cancel redirects") @@ -262,8 +260,6 @@ class CheckoutConfirmResponse(BaseModel): class BillingSettingsUpdate(BaseModel): """Request model for updating billing settings.""" - billingModel: Optional[BillingModelEnum] = None - defaultUserCredit: Optional[float] = Field(None, ge=0) warningThresholdPercent: Optional[float] = Field(None, ge=0, le=100) notifyOnWarning: Optional[bool] = None notifyEmails: Optional[List[str]] = None @@ -293,7 +289,6 @@ class AccountSummary(BaseModel): id: str mandateId: str userId: Optional[str] - accountType: str balance: float warningThreshold: float enabled: bool @@ -317,10 +312,8 @@ class MandateBalanceResponse(BaseModel): """Mandate-level balance summary.""" mandateId: str mandateName: str - billingModel: str totalBalance: float userCount: int - defaultUserCredit: float warningThresholdPercent: float @@ -414,15 +407,7 @@ def _creditStripeSessionIfNeeded( if not settings: raise HTTPException(status_code=404, detail="Billing settings not found") - billing_model = parseBillingModelFromStoredValue(settings.get("billingModel")) - if billing_model == BillingModelEnum.PREPAY_USER: - if not user_id: - raise HTTPException(status_code=400, detail="userId required for PREPAY_USER") - account = billingInterface.getOrCreateUserAccount(mandate_id, user_id, initialBalance=0.0) - elif billing_model == BillingModelEnum.PREPAY_MANDATE: - account = billingInterface.getOrCreateMandateAccount(mandate_id, initialBalance=0.0) - else: - raise HTTPException(status_code=400, detail=f"Cannot add credit to {billing_model.value}") + account = billingInterface.getOrCreateMandateAccount(mandate_id, initialBalance=0.0) transaction = BillingTransaction( accountId=account["id"], @@ -516,7 +501,6 @@ def getBalanceForMandate( return BillingBalanceResponse( mandateId=targetMandateId, mandateName=mandateName, - billingModel=checkResult.billingModel or BillingModelEnum.PREPAY_MANDATE, balance=checkResult.currentBalance or 0.0, warningThreshold=0.0, # TODO: Get from account isWarning=False, @@ -608,8 +592,6 @@ def getStatistics( costByFeature={} ) - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - # Transactions are always on user accounts (audit trail) account = billingInterface.getUserAccount(ctx.mandateId, ctx.user.id) @@ -734,18 +716,6 @@ def createOrUpdateSettings( if existingSettings: updates = settingsUpdate.model_dump(exclude_none=True) if updates: - # Check if billing model is changing - trigger budget migration - if "billingModel" in updates: - oldModel = parseBillingModelFromStoredValue(existingSettings.get("billingModel")) - newModel = ( - BillingModelEnum(updates["billingModel"]) - if isinstance(updates["billingModel"], str) - else updates["billingModel"] - ) - if oldModel != newModel: - migrationResult = billingInterface.switchBillingModel(targetMandateId, oldModel, newModel) - logger.info(f"Billing model migration for {targetMandateId}: {migrationResult}") - result = billingInterface.updateSettings(existingSettings["id"], updates) return result or existingSettings return existingSettings @@ -754,16 +724,6 @@ def createOrUpdateSettings( newSettings = BillingSettings( mandateId=targetMandateId, - billingModel=( - settingsUpdate.billingModel - if settingsUpdate.billingModel is not None - else BillingModelEnum.PREPAY_MANDATE - ), - defaultUserCredit=( - settingsUpdate.defaultUserCredit - if settingsUpdate.defaultUserCredit is not None - else 0.0 - ), warningThresholdPercent=( settingsUpdate.warningThresholdPercent if settingsUpdate.warningThresholdPercent is not None @@ -797,34 +757,15 @@ def addCredit( ): """ Add credit to a billing account (SysAdmin only). - For PREPAY_USER model, specify userId. For PREPAY_MANDATE, leave userId empty. """ try: - # Get settings to determine billing model billingInterface = getBillingInterface(ctx.user, targetMandateId) settings = billingInterface.getSettings(targetMandateId) if not settings: raise HTTPException(status_code=404, detail="Billing settings not found for this mandate") - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - # Validate request based on billing model - if billingModel == BillingModelEnum.PREPAY_USER: - if not creditRequest.userId: - raise HTTPException(status_code=400, detail="userId is required for PREPAY_USER model") - - # Create user-level account if needed and add credit - account = billingInterface.getOrCreateUserAccount( - targetMandateId, - creditRequest.userId, - initialBalance=0.0 - ) - elif billingModel == BillingModelEnum.PREPAY_MANDATE: - # Create mandate-level account if needed and add credit - account = billingInterface.getOrCreateMandateAccount(targetMandateId, initialBalance=0.0) - else: - raise HTTPException(status_code=400, detail=f"Cannot add credit to {billingModel.value} billing model") + account = billingInterface.getOrCreateMandateAccount(targetMandateId, initialBalance=0.0) if creditRequest.amount == 0: raise HTTPException(status_code=400, detail="Amount must not be zero") @@ -867,8 +808,7 @@ def createCheckoutSession( ): """ Create Stripe Checkout Session for credit top-up. Returns redirect URL. - RBAC: PREPAY_USER requires mandate membership (user loads own account), - PREPAY_MANDATE requires mandate admin role. + Requires mandate admin role. """ try: billingInterface = getBillingInterface(ctx.user, targetMandateId) @@ -877,20 +817,8 @@ def createCheckoutSession( if not settings: raise HTTPException(status_code=404, detail="Billing settings not found for this mandate") - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - if billingModel == BillingModelEnum.PREPAY_USER: - if not checkoutRequest.userId: - raise HTTPException(status_code=400, detail="userId is required for PREPAY_USER model") - if str(checkoutRequest.userId) != str(ctx.user.id): - raise HTTPException(status_code=403, detail="Users can only load credit to their own account") - if not _isMemberOfMandate(ctx, targetMandateId): - raise HTTPException(status_code=403, detail="User is not a member of this mandate") - elif billingModel == BillingModelEnum.PREPAY_MANDATE: - if not _isAdminOfMandate(ctx, targetMandateId): - raise HTTPException(status_code=403, detail="Mandate admin role required to load mandate credit") - else: - raise HTTPException(status_code=400, detail=f"Cannot add credit to {billingModel.value} billing model") + if not _isAdminOfMandate(ctx, targetMandateId): + raise HTTPException(status_code=403, detail="Mandate admin role required to load mandate credit") from modules.serviceCenter.services.serviceBilling.stripeCheckout import create_checkout_session redirect_url = create_checkout_session( @@ -944,19 +872,8 @@ def confirmCheckoutSession( if not settings: raise HTTPException(status_code=404, detail="Billing settings not found") - billing_model = parseBillingModelFromStoredValue(settings.get("billingModel")) - if billing_model == BillingModelEnum.PREPAY_USER: - if not user_id: - raise HTTPException(status_code=400, detail="userId required for PREPAY_USER") - if str(user_id) != str(ctx.user.id): - raise HTTPException(status_code=403, detail="Users can only confirm their own payment sessions") - if not _isMemberOfMandate(ctx, mandate_id): - raise HTTPException(status_code=403, detail="User is not a member of this mandate") - elif billing_model == BillingModelEnum.PREPAY_MANDATE: - if not _isAdminOfMandate(ctx, mandate_id): - raise HTTPException(status_code=403, detail="Mandate admin role required") - else: - raise HTTPException(status_code=400, detail=f"Cannot add credit to {billing_model.value}") + if not _isAdminOfMandate(ctx, mandate_id): + raise HTTPException(status_code=403, detail="Mandate admin role required") root_billing_interface = _getRootInterface() return _creditStripeSessionIfNeeded(root_billing_interface, session_dict, eventId=None) @@ -1321,7 +1238,6 @@ def getAccounts( id=acc.get("id"), mandateId=acc.get("mandateId"), userId=acc.get("userId"), - accountType=acc.get("accountType"), balance=acc.get("balance", 0.0), warningThreshold=acc.get("warningThreshold", 0.0), enabled=acc.get("enabled", True) diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index f066fda2..fb71444b 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -17,7 +17,7 @@ from jose import jwt from modules.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM, getRequestContext, RequestContext from modules.auth import createAccessToken, createRefreshToken, setAccessTokenCookie, setRefreshTokenCookie, clearAccessTokenCookie, clearRefreshTokenCookie from modules.interfaces.interfaceDbApp import getInterface, getRootInterface -from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, Mandate, MandateType +from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, Mandate from modules.datamodels.datamodelSecurity import Token, TokenPurpose from modules.shared.configuration import APP_CONFIG from modules.shared.timeUtils import getUtcTimestamp @@ -87,6 +87,22 @@ router = APIRouter( } ) +def _ensureHomeMandate(rootInterface, user) -> None: + """Ensure user has a Home mandate. Creates 'Home {username}' if none exists.""" + userMandates = rootInterface.getUserMandates(str(user.id)) + homeMandateName = f"Home {user.username}" + for um in userMandates: + mandate = rootInterface.getMandate(um.mandateId) + if mandate and (mandate.name or "").startswith("Home ") and not mandate.isSystem: + return + rootInterface._provisionMandateForUser( + userId=str(user.id), + mandateName=homeMandateName, + planKey="TRIAL_7D", + ) + logger.info(f"Created Home mandate '{homeMandateName}' for user {user.username}") + + @router.post("/login") @limiter.limit("30/minute") def login( @@ -183,6 +199,12 @@ def login( except Exception as subErr: logger.error(f"Error activating subscriptions on login: {subErr}") + # Ensure user has a Home mandate (created on first login if missing) + try: + _ensureHomeMandate(rootInterface, user) + except Exception as homeErr: + logger.error(f"Error ensuring Home mandate for user {user.username}: {homeErr}") + # Log successful login (app log file + audit DB for traceability) logger.info("Login successful for username=%s (userId=%s)", formData.username, str(user.id)) try: @@ -298,32 +320,35 @@ def register_user( detail="Failed to register user" ) - # Provision mandate for new user + # Provision Home mandate for every new user ("Home {username}") provisionResult = None try: - if registrationType == "company": - if not companyName: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="companyName is required for company registration" - ) - provisionResult = appInterface._provisionMandateForUser( + homeMandateName = f"Home {user.username}" + provisionResult = appInterface._provisionMandateForUser( + userId=str(user.id), + mandateName=homeMandateName, + planKey="TRIAL_7D", + ) + logger.info(f"Provisioned Home mandate for user {user.id}: {provisionResult}") + except Exception as provErr: + logger.error(f"Error provisioning Home mandate for user {user.id}: {provErr}") + + # If company registration, also create a company mandate with the paid plan + if registrationType == "company": + if not companyName: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="companyName is required for company registration" + ) + try: + companyResult = appInterface._provisionMandateForUser( userId=str(user.id), - mandateType="company", mandateName=companyName, planKey="STANDARD_MONTHLY", ) - else: - provisionResult = appInterface._provisionMandateForUser( - userId=str(user.id), - mandateType="personal", - mandateName=user.fullName or user.username, - planKey="TRIAL_7D", - ) - logger.info(f"Provisioned mandate for user {user.id}: {provisionResult}") - except Exception as provErr: - logger.error(f"Error provisioning mandate for user {user.id}: {provErr}") - # Don't fail registration if provisioning fails — user can still use store + logger.info(f"Provisioned company mandate for user {user.id}: {companyResult}") + except Exception as compErr: + logger.error(f"Error provisioning company mandate for user {user.id}: {compErr}") # Generate reset token for password setup token, expires = appInterface.generateResetTokenAndExpiry() @@ -406,7 +431,6 @@ Falls Sie sich nicht registriert haben, können Sie diese E-Mail ignorieren.""" } if provisionResult: responseData["mandateId"] = provisionResult.get("mandateId") - responseData["mandateType"] = provisionResult.get("mandateType") return responseData except ValueError as e: @@ -698,37 +722,24 @@ Falls Sie diese Anforderung nicht gestellt haben, können Sie diese E-Mail ignor def onboarding_provision( request: Request, currentUser: User = Depends(getCurrentUser), - mandateType: str = Body("personal", embed=True), companyName: str = Body(None, embed=True), + planKey: str = Body("TRIAL_7D", embed=True), ) -> Dict[str, Any]: - """Post-login onboarding: provision mandate for OAuth users who registered without one.""" + """Post-login onboarding: ensure Home mandate exists and optionally create a company mandate.""" try: appInterface = getRootInterface() - userMandates = appInterface.getUserMandates(str(currentUser.id)) - hasOwnMandate = False - for um in userMandates: - mandate = appInterface.getMandate(um.mandateId) - if mandate and not mandate.isSystem: - hasOwnMandate = True - break + _ensureHomeMandate(appInterface, currentUser) - if hasOwnMandate: - return {"message": "User already has a mandate", "alreadyProvisioned": True} - - if mandateType == "company": - mandateName = companyName or currentUser.fullName or currentUser.username - planKey = "STANDARD_MONTHLY" - else: - mandateName = currentUser.fullName or currentUser.username - planKey = "TRIAL_7D" - - result = appInterface._provisionMandateForUser( - userId=str(currentUser.id), - mandateType=mandateType, - mandateName=mandateName, - planKey=planKey, - ) + result = None + if companyName and companyName.strip(): + if planKey not in ("STANDARD_MONTHLY", "STANDARD_YEARLY"): + planKey = "STANDARD_MONTHLY" + result = appInterface._provisionMandateForUser( + userId=str(currentUser.id), + mandateName=companyName.strip(), + planKey=planKey, + ) try: activatedCount = appInterface._activatePendingSubscriptions(str(currentUser.id)) @@ -740,8 +751,7 @@ def onboarding_provision( logger.info(f"Onboarding provision for {currentUser.username}: {result}") return { "message": "Mandate provisioned successfully", - "mandateId": result.get("mandateId"), - "mandateType": result.get("mandateType"), + "mandateId": result.get("mandateId") if result else None, "alreadyProvisioned": False, } diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index cbd4ef6e..19b81ca7 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -146,10 +146,10 @@ def listUserMandates( adminMandateIds = _getUserAdminMandateIds(db, userId) if not adminMandateIds: + homeMandateName = f"Home {context.user.username}" provisionResult = rootInterface._provisionMandateForUser( userId=userId, - mandateType="personal", - mandateName=context.user.fullName or context.user.username, + mandateName=homeMandateName, planKey="TRIAL_7D", ) adminMandateIds = [provisionResult["mandateId"]] @@ -164,7 +164,6 @@ def listUserMandates( "id": mid, "name": m.get("name", ""), "label": m.get("label") or m.get("name", ""), - "mandateType": m.get("mandateType", "company"), }) return result except Exception as e: diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 0c5eed4e..7aad386f 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -468,7 +468,12 @@ def _getDataVolumeUsage( size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) totalBytes += (size or 0) - usedMB = round(totalBytes / (1024 * 1024), 2) + filesMB = round(totalBytes / (1024 * 1024), 2) + + from modules.datamodels.datamodelKnowledge import FileContentIndex + ragIndexes = rootIf.db.getRecordset(FileContentIndex, recordFilter={"mandateId": mandateId}) + ragBytes = sum(int(idx.get("totalSize") or 0) if isinstance(idx, dict) else int(getattr(idx, "totalSize", 0) or 0) for idx in ragIndexes) + ragMB = round(ragBytes / (1024 * 1024), 2) maxMB = None subs = rootIf.db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) @@ -484,10 +489,14 @@ def _getDataVolumeUsage( if maxMB: break + usedMB = ragMB + percentUsed = round((usedMB / maxMB) * 100, 1) if maxMB else None return { "mandateId": mandateId, "usedMB": usedMB, + "filesMB": filesMB, + "ragIndexMB": ragMB, "maxDataVolumeMB": maxMB, - "percentUsed": round((usedMB / maxMB) * 100, 1) if maxMB else None, - "warning": usedMB >= (maxMB * 0.8) if maxMB else False, + "percentUsed": percentUsed, + "warning": (percentUsed or 0) >= 80, } diff --git a/modules/serviceCenter/context.py b/modules/serviceCenter/context.py index f9ab0a44..acad6d61 100644 --- a/modules/serviceCenter/context.py +++ b/modules/serviceCenter/context.py @@ -20,6 +20,7 @@ class ServiceCenterContext: feature_instance_id: Optional[str] = None workflow_id: Optional[str] = None workflow: Any = None + requireNeutralization: Optional[bool] = None @property def mandateId(self) -> Optional[str]: diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 539d3672..c4e1f877 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -322,14 +322,20 @@ class AgentService: def _createAiCallFn(self) -> Callable[[AiCallRequest], AiCallResponse]: """Create the AI call function that wraps serviceAi with billing.""" + ctxNeutralization = getattr(self.ctx, 'requireNeutralization', None) async def _aiCallFn(request: AiCallRequest) -> AiCallResponse: + if ctxNeutralization is not None and request.requireNeutralization is None: + request.requireNeutralization = ctxNeutralization aiService = self.services.ai return await aiService.callAi(request) return _aiCallFn def _createAiCallStreamFn(self): """Create the streaming AI call function. Yields str deltas, then AiCallResponse.""" + ctxNeutralization = getattr(self.ctx, 'requireNeutralization', None) async def _aiCallStreamFn(request: AiCallRequest): + if ctxNeutralization is not None and request.requireNeutralization is None: + request.requireNeutralization = ctxNeutralization aiService = self.services.ai async for chunk in aiService.callAiStream(request): yield chunk diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index 541835a3..b25374d5 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -17,7 +17,6 @@ from modules.shared.jsonUtils import ( ) from .subJsonResponseHandling import JsonResponseHandler from modules.datamodels.datamodelAi import JsonAccumulationState -from modules.datamodels.datamodelBilling import BillingModelEnum from modules.serviceCenter.services.serviceBilling.billingExhaustedNotify import ( maybeEmailMandatePoolExhausted, ) @@ -747,15 +746,14 @@ detectedIntent-Werte: f"Balance {balance_str} CHF, " f"Reason: {reason}" ) - if balanceCheck.billingModel == BillingModelEnum.PREPAY_MANDATE: - ulabel = (getattr(user, "email", None) or getattr(user, "username", None) or str(user.id)) - maybeEmailMandatePoolExhausted( - str(mandateId), - str(user.id), - str(ulabel), - float(balanceCheck.currentBalance or 0.0), - float(estimatedCost), - ) + ulabel = (getattr(user, "email", None) or getattr(user, "username", None) or str(user.id)) + maybeEmailMandatePoolExhausted( + str(mandateId), + str(user.id), + str(ulabel), + float(balanceCheck.currentBalance or 0.0), + float(estimatedCost), + ) raise InsufficientBalanceException.fromBalanceCheck( balanceCheck, str(mandateId), diff --git a/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py b/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py index 790612ed..3a33f1f6 100644 --- a/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py +++ b/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py @@ -16,13 +16,11 @@ from datetime import datetime from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelBilling import ( - BillingModelEnum, BillingCheckResult, TransactionTypeEnum, ReferenceTypeEnum, BillingTransaction, BillingBalanceResponse, - parseBillingModelFromStoredValue, ) from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface @@ -369,20 +367,10 @@ class BillingService: logger.warning(f"No billing settings for mandate {self.mandateId}") return None - billingModel = parseBillingModelFromStoredValue(settings.get("billingModel")) - - # Get or create account - if billingModel == BillingModelEnum.PREPAY_USER: - account = self._billingInterface.getOrCreateUserAccount( - self.mandateId, - self.currentUser.id, - initialBalance=0.0 - ) - else: - account = self._billingInterface.getOrCreateMandateAccount( - self.mandateId, - initialBalance=0.0 - ) + account = self._billingInterface.getOrCreateMandateAccount( + self.mandateId, + initialBalance=0.0 + ) # Create credit transaction transaction = BillingTransaction( @@ -429,45 +417,32 @@ BILLING_USER_ACTION_TOP_UP_SELF = "TOP_UP_SELF" BILLING_USER_ACTION_CONTACT_MANDATE_ADMIN = "CONTACT_MANDATE_ADMIN" -def _userActionForBillingModel(bm: BillingModelEnum) -> str: - if bm == BillingModelEnum.PREPAY_USER: - return BILLING_USER_ACTION_TOP_UP_SELF +def _defaultInsufficientBalanceUserAction() -> str: return BILLING_USER_ACTION_CONTACT_MANDATE_ADMIN def _buildInsufficientBalanceMessages( - bm: BillingModelEnum, currentBalance: float, requiredAmount: float, ) -> tuple: bal_s = f"{currentBalance:.2f}" req_s = f"{requiredAmount:.2f}" - if bm == BillingModelEnum.PREPAY_USER: - msg_de = ( - f"Ihr persönliches Guthaben ist aufgebraucht (aktuell CHF {bal_s}, benötigt mindestens CHF {req_s}). " - "Bitte laden Sie unter „Billing“ Guthaben nach." - ) - msg_en = ( - f"Your personal balance is exhausted (current CHF {bal_s}, at least CHF {req_s} required). " - "Please top up under Billing." - ) - else: - msg_de = ( - f"Das Mandanten-Budget ist aufgebraucht (aktuell CHF {bal_s}, benötigt mindestens CHF {req_s}). " - "Bitte informieren Sie die Administratorin bzw. den Administrator Ihres Mandanten. " - "Die in den Billing-Einstellungen hinterlegten Kontakte wurden per E-Mail informiert (falls konfiguriert)." - ) - msg_en = ( - f"The organization budget is exhausted (current CHF {bal_s}, at least CHF {req_s} required). " - "Please contact your mandate administrator. Billing notification contacts were emailed if configured." - ) + msg_de = ( + f"Das Mandanten-Budget ist aufgebraucht (aktuell CHF {bal_s}, benötigt mindestens CHF {req_s}). " + "Bitte informieren Sie die Administratorin bzw. den Administrator Ihres Mandanten. " + "Die in den Billing-Einstellungen hinterlegten Kontakte wurden per E-Mail informiert (falls konfiguriert)." + ) + msg_en = ( + f"The organization budget is exhausted (current CHF {bal_s}, at least CHF {req_s} required). " + "Please contact your mandate administrator. Billing notification contacts were emailed if configured." + ) return msg_de, msg_en class InsufficientBalanceException(Exception): """Raised when there's insufficient balance for an operation. - Carries structured fields for API/SSE clients (userAction, billingModel, localized hints). + Carries structured fields for API/SSE clients (userAction, localized hints). """ def __init__( @@ -476,7 +451,6 @@ class InsufficientBalanceException(Exception): requiredAmount: float, message: Optional[str] = None, *, - billing_model: Optional[BillingModelEnum] = None, mandate_id: str = "", user_action: Optional[str] = None, message_de: Optional[str] = None, @@ -484,12 +458,8 @@ class InsufficientBalanceException(Exception): ): self.currentBalance = float(currentBalance) self.requiredAmount = float(requiredAmount) - self.billing_model = billing_model self.mandate_id = mandate_id or "" - if billing_model is not None: - self.user_action = user_action or _userActionForBillingModel(billing_model) - else: - self.user_action = user_action or BILLING_USER_ACTION_TOP_UP_SELF + self.user_action = user_action or _defaultInsufficientBalanceUserAction() if message_de is not None and message_en is not None: self.message_de = message_de @@ -500,8 +470,7 @@ class InsufficientBalanceException(Exception): self.message_de = message self.message_en = message else: - bm = billing_model or BillingModelEnum.PREPAY_USER - md, me = _buildInsufficientBalanceMessages(bm, self.currentBalance, self.requiredAmount) + md, me = _buildInsufficientBalanceMessages(self.currentBalance, self.requiredAmount) self.message_de = md self.message_en = me self.message = md @@ -514,14 +483,12 @@ class InsufficientBalanceException(Exception): mandate_id: str, required_amount: float, ) -> "InsufficientBalanceException": - bm = check.billingModel or BillingModelEnum.PREPAY_MANDATE bal = float(check.currentBalance or 0.0) - msg_de, msg_en = _buildInsufficientBalanceMessages(bm, bal, required_amount) + msg_de, msg_en = _buildInsufficientBalanceMessages(bal, required_amount) return cls( bal, required_amount, message=msg_de, - billing_model=bm, mandate_id=mandate_id or "", message_de=msg_de, message_en=msg_en, @@ -538,8 +505,6 @@ class InsufficientBalanceException(Exception): "messageEn": self.message_en, "userAction": self.user_action, } - if self.billing_model is not None: - out["billingModel"] = self.billing_model.value if self.mandate_id: out["mandateId"] = self.mandate_id if self.user_action == BILLING_USER_ACTION_TOP_UP_SELF: diff --git a/modules/serviceCenter/services/serviceBilling/stripeCheckout.py b/modules/serviceCenter/services/serviceBilling/stripeCheckout.py index 8d6b4a57..bc98cc65 100644 --- a/modules/serviceCenter/services/serviceBilling/stripeCheckout.py +++ b/modules/serviceCenter/services/serviceBilling/stripeCheckout.py @@ -65,7 +65,7 @@ def create_checkout_session( Args: mandate_id: Target mandate ID - user_id: Target user ID (for PREPAY_USER) or None (for mandate pool) + user_id: Target user ID for audit trail (optional) amount_chf: Amount in CHF (must be in ALLOWED_AMOUNTS_CHF) Returns: diff --git a/tests/test_phase123_basic.py b/tests/test_phase123_basic.py index 18c4188f..222c6043 100644 --- a/tests/test_phase123_basic.py +++ b/tests/test_phase123_basic.py @@ -26,12 +26,11 @@ def _check(label, condition, detail=""): print("\n--- Phase 1: Data Models ---") try: - from modules.datamodels.datamodelUam import Mandate, MandateType - _check("MandateType Enum exists", hasattr(MandateType, "SYSTEM")) - _check("MandateType values", set(MandateType) == {MandateType.SYSTEM, MandateType.PERSONAL, MandateType.COMPANY}) - m = Mandate(name="test", label="test", mandateType="personal") - _check("Mandate has mandateType field", hasattr(m, "mandateType")) - _check("Mandate mandateType coercion", m.mandateType == MandateType.PERSONAL) + from modules.datamodels.datamodelUam import Mandate + m = Mandate(name="test", label="test") + _check("Mandate has isSystem field", hasattr(m, "isSystem")) + _check("Mandate isSystem default False", m.isSystem is False) + _check("Mandate no mandateType field", not hasattr(m, "mandateType")) except Exception as e: errors.append(f"Phase 1 DataModel: {e}") print(f" [FAIL] Phase 1 DataModel import: {e}") From c12a75f87f9327b066fa0217a4a3e88db211d73d Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sat, 28 Mar 2026 23:56:28 +0100 Subject: [PATCH 11/33] fix user name resolution --- modules/routes/routeBilling.py | 62 +++++++++++++++------------------- 1 file changed, 27 insertions(+), 35 deletions(-) diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 4062163e..37674e53 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -1320,6 +1320,31 @@ def getUsersForMandate( raise HTTPException(status_code=500, detail=str(e)) +def _attachCreatedByUserNamesToTransactionRows(rows: List[Dict[str, Any]]) -> None: + """Resolve createdByUserId to userName using root app interface (sysadmin transaction views).""" + try: + from modules.interfaces.interfaceDbApp import getRootInterface + + appRoot = getRootInterface() + userNames: Dict[str, str] = {} + for row in rows: + uid = row.get("createdByUserId") + if not uid: + row["userName"] = "" + continue + if uid not in userNames: + try: + u = appRoot.getUser(uid) + userNames[uid] = u.username if u else uid[:8] + except Exception: + userNames[uid] = uid[:8] + row["userName"] = userNames.get(uid, "") + except Exception: + for row in rows: + uid = row.get("createdByUserId") + row["userName"] = uid[:8] if uid else "" + + def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]: """Convert raw transaction dicts to enriched TransactionResponse rows with resolved usernames.""" result = [] @@ -1341,23 +1366,7 @@ def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]: ) result.append(row.model_dump()) - try: - from modules.interfaces.interfaceDbUam import _getRootInterface as getUamRoot - uamInterface = getUamRoot() - userNames: Dict[str, str] = {} - for row in result: - uid = row.get("createdByUserId") - if uid and uid not in userNames: - try: - user = uamInterface.getUser(uid) - userNames[uid] = user.get("username", uid[:8]) if user else uid[:8] - except Exception: - userNames[uid] = uid[:8] - row["userName"] = userNames.get(uid, "") if uid else "" - except Exception: - for row in result: - row["userName"] = row.get("createdByUserId", "")[:8] if row.get("createdByUserId") else "" - + _attachCreatedByUserNamesToTransactionRows(result) return result @@ -1385,24 +1394,7 @@ def _buildTransactionsList(ctx: RequestContext, targetMandateId: str) -> List[Di ) result.append(row.model_dump()) - # Resolve user names - try: - from modules.interfaces.interfaceDbUam import _getRootInterface as getUamRoot - uamInterface = getUamRoot() - userNames: Dict[str, str] = {} - for row in result: - uid = row.get("createdByUserId") - if uid and uid not in userNames: - try: - user = uamInterface.getUser(uid) - userNames[uid] = user.get("username", uid[:8]) if user else uid[:8] - except Exception: - userNames[uid] = uid[:8] - row["userName"] = userNames.get(uid, "") if uid else "" - except Exception: - for row in result: - row["userName"] = row.get("createdByUserId", "")[:8] if row.get("createdByUserId") else "" - + _attachCreatedByUserNamesToTransactionRows(result) return result From 3ac25a269a0e1a30cdcaf5b55bf2f48589b960f0 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 29 Mar 2026 12:18:58 +0200 Subject: [PATCH 12/33] streamlined billing incl ai and storage budget --- app.py | 3 + modules/connectors/connectorDbPostgre.py | 37 +- modules/connectors/connectorVoiceGoogle.py | 41 ++- modules/datamodels/datamodelBilling.py | 22 ++ modules/datamodels/datamodelSubscription.py | 4 +- .../workspace/routeFeatureWorkspace.py | 26 ++ modules/interfaces/interfaceBootstrap.py | 13 +- modules/interfaces/interfaceDbBilling.py | 169 ++++++++- modules/interfaces/interfaceDbKnowledge.py | 12 +- modules/interfaces/interfaceDbSubscription.py | 8 +- modules/routes/routeBilling.py | 38 +- modules/routes/routeSecurityLocal.py | 121 +------ modules/routes/routeStore.py | 22 +- modules/routes/routeVoiceUser.py | 327 ++++++++++++++++++ modules/serviceCenter/context.py | 2 + .../services/serviceAgent/mainServiceAgent.py | 4 +- .../services/serviceAi/mainServiceAi.py | 8 +- .../serviceBilling/mainServiceBilling.py | 3 + .../serviceKnowledge/mainServiceKnowledge.py | 7 + scripts/script_db_export_migration.py | 39 ++- tests/test_phase123_basic.py | 9 +- 21 files changed, 740 insertions(+), 175 deletions(-) create mode 100644 modules/routes/routeVoiceUser.py diff --git a/app.py b/app.py index 80a9505c..0f3d29a6 100644 --- a/app.py +++ b/app.py @@ -566,6 +566,9 @@ app.include_router(googleRouter) from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter app.include_router(voiceGoogleRouter) +from modules.routes.routeVoiceUser import router as voiceUserRouter +app.include_router(voiceUserRouter) + from modules.routes.routeSecurityAdmin import router as adminSecurityRouter app.include_router(adminSecurityRouter) diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index bf8fce44..6bd661b4 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -158,12 +158,17 @@ def _parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: logger.warning(f"Could not parse JSONB field {fieldName}, keeping as string ({context})") -# Legacy system columns (underscore-prefixed internal names) -> PowerOn sys* columns. -_LEGACY_UNDERSCORE_TO_SYS: Tuple[Tuple[str, str], ...] = ( +# Legacy column names (historical _* internal names and old camelCase audit fields) -> PowerOn sys* columns. +# Order matters: more specific / underscore names first; first successful copy wins per cell via IS NULL on target. +_LEGACY_FIELD_TO_SYS: Tuple[Tuple[str, str], ...] = ( ("_createdAt", "sysCreatedAt"), ("_createdBy", "sysCreatedBy"), ("_modifiedAt", "sysModifiedAt"), ("_modifiedBy", "sysModifiedBy"), + ("createdAt", "sysCreatedAt"), + ("creationDate", "sysCreatedAt"), + ("updatedAt", "sysModifiedAt"), + ("lastModified", "sysModifiedAt"), ) @@ -454,9 +459,9 @@ class DatabaseConnector: def migrateLegacyUnderscoreSysColumns(self) -> int: """ Scan all public base tables on this connection's database. Where both a legacy - _createdAt / _createdBy / _modifiedAt / _modifiedBy column (any case) and the - matching sys* column exist, copy into sys* rows where sys* IS NULL and legacy IS NOT NULL. - Idempotent; safe to run on every bootstrap. + source column (any case: _createdAt, createdAt, creationDate, …) and the matching + sys* column exist, UPDATE sys* from legacy where sys* IS NULL AND legacy IS NOT NULL. + Idempotent; run after schema adds sys* columns (see _ensureTableExists). """ self._ensure_connection() total = 0 @@ -466,7 +471,7 @@ class DatabaseConnector: for table in tableNames: with self.connection.cursor() as cursor: cols = _listTableColumnNames(cursor, table) - for legacyLogical, sysLogical in _LEGACY_UNDERSCORE_TO_SYS: + for legacyLogical, sysLogical in _LEGACY_FIELD_TO_SYS: src = _resolveColumnCaseInsensitive(cols, legacyLogical) tgt = _resolveColumnCaseInsensitive(cols, sysLogical) if not src or not tgt or src == tgt: @@ -629,6 +634,7 @@ class DatabaseConnector: try: self._ensure_connection() + schemaTouched = False with self.connection.cursor() as cursor: # Check if table exists by querying information_schema with case-insensitive search @@ -647,6 +653,7 @@ class DatabaseConnector: logger.info( f"Created table '{table}' with columns from Pydantic model" ) + schemaTouched = True else: # Table exists: ensure all columns from model are present (simple additive migration) try: @@ -680,6 +687,7 @@ class DatabaseConnector: logger.info( f"Added missing column '{col}' ({sql_type}) to '{table}'" ) + schemaTouched = True except Exception as add_err: logger.warning( f"Could not add column '{col}' to '{table}': {add_err}" @@ -690,6 +698,23 @@ class DatabaseConnector: ) self.connection.commit() + if schemaTouched: + try: + n = self.migrateLegacyUnderscoreSysColumns() + if n: + logger.info( + "After schema change on %s.%s: legacy -> sys* migration wrote %s cell(s)", + self.dbDatabase, + table, + n, + ) + except Exception as mig_err: + logger.error( + "migrateLegacyUnderscoreSysColumns failed after schema change %s.%s: %s", + self.dbDatabase, + table, + mig_err, + ) return True except Exception as e: logger.error(f"Error ensuring table {table} exists: {e}") diff --git a/modules/connectors/connectorVoiceGoogle.py b/modules/connectors/connectorVoiceGoogle.py index ddb0d864..0dbb46a5 100644 --- a/modules/connectors/connectorVoiceGoogle.py +++ b/modules/connectors/connectorVoiceGoogle.py @@ -18,6 +18,11 @@ from modules.shared.configuration import APP_CONFIG logger = logging.getLogger(__name__) +# Gemini-TTS speaker IDs from voices.list use short names (e.g. "Kore") and require model_name + prompt. +_GEMINI_TTS_DEFAULT_MODEL = "gemini-2.5-flash-tts" +_GEMINI_TTS_NEUTRAL_PROMPT = "Say the following" + + class ConnectorGoogleSpeech: """ Google Cloud Speech-to-Text and Translation connector. @@ -902,6 +907,13 @@ class ConnectorGoogleSpeech: "error": f"Validation error: {e}" } + def _isGeminiTtsSpeakerVoiceName(self, voiceName: str) -> bool: + """True when voice name is a Gemini-TTS speaker id (no BCP-47 prefix like en-US-...).""" + if not voiceName or not isinstance(voiceName, str): + return False + stripped = voiceName.strip() + return bool(stripped) and "-" not in stripped + async def textToSpeech(self, text: str, languageCode: str = "de-DE", voiceName: str = None) -> Dict[str, Any]: """ Convert text to speech using Google Cloud Text-to-Speech. @@ -917,9 +929,6 @@ class ConnectorGoogleSpeech: try: logger.info(f"Converting text to speech: '{text[:50]}...' in {languageCode}") - # Set up the synthesis input - synthesisInput = texttospeech.SynthesisInput(text=text) - # Build the voice request selectedVoice = voiceName or self._getDefaultVoice(languageCode) @@ -931,11 +940,24 @@ class ConnectorGoogleSpeech: logger.info(f"Using TTS voice: {selectedVoice} for language: {languageCode}") - voice = texttospeech.VoiceSelectionParams( - language_code=languageCode, - name=selectedVoice, - ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL - ) + if self._isGeminiTtsSpeakerVoiceName(selectedVoice): + synthesisInput = texttospeech.SynthesisInput( + text=text, + prompt=_GEMINI_TTS_NEUTRAL_PROMPT, + ) + voice = texttospeech.VoiceSelectionParams( + language_code=languageCode, + name=selectedVoice, + model_name=_GEMINI_TTS_DEFAULT_MODEL, + ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL, + ) + else: + synthesisInput = texttospeech.SynthesisInput(text=text) + voice = texttospeech.VoiceSelectionParams( + language_code=languageCode, + name=selectedVoice, + ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL, + ) # Select the type of audio file to return audioConfig = texttospeech.AudioConfig( @@ -1059,7 +1081,8 @@ class ConnectorGoogleSpeech: "language_codes": list(voice.language_codes) if voice.language_codes else [], "gender": gender, "ssml_gender": voice.ssml_gender.name if voice.ssml_gender else "NEUTRAL", - "natural_sample_rate_hertz": voice.natural_sample_rate_hertz + "natural_sample_rate_hertz": voice.natural_sample_rate_hertz, + "geminiTts": self._isGeminiTtsSpeakerVoiceName(voice.name or ""), } # Include any additional fields if available from Google API diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py index a0bb4f88..2d3bfdb1 100644 --- a/modules/datamodels/datamodelBilling.py +++ b/modules/datamodels/datamodelBilling.py @@ -10,6 +10,9 @@ from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid +# End-customer price for storage above plan-included volume (CHF per GB per month). +STORAGE_PRICE_PER_GB_CHF = 0.50 + class TransactionTypeEnum(str, Enum): """Transaction types for billing.""" @@ -24,6 +27,7 @@ class ReferenceTypeEnum(str, Enum): PAYMENT = "PAYMENT" # Payment/top-up ADMIN = "ADMIN" # Admin adjustment SYSTEM = "SYSTEM" # System credit (e.g., initial credit) + STORAGE = "STORAGE" # Metered storage overage (prepay pool) class PeriodTypeEnum(str, Enum): @@ -137,6 +141,18 @@ class BillingSettings(BaseModel): ) notifyOnWarning: bool = Field(default=True, description="Send email when warning threshold is reached") + # Storage overage (high-watermark within subscription period; resets on new period) + storageHighWatermarkMB: float = Field( + default=0.0, description="Peak indexed data volume MB this billing period" + ) + storagePeriodStartAt: Optional[datetime] = Field( + None, description="Subscription billing period start used for storage reset" + ) + storageBilledUpToMB: float = Field( + default=0.0, + description="Overage MB already debited this period (above plan-included volume)", + ) + registerModelLabels( "BillingSettings", @@ -154,6 +170,12 @@ registerModelLabels( "de": "E-Mails fuer Billing-Alerts (Inhaber/Admin)", }, "notifyOnWarning": {"en": "Notify on Warning", "de": "Bei Warnung benachrichtigen"}, + "storageHighWatermarkMB": {"en": "Storage peak (MB)", "de": "Speicher-Peak (MB)"}, + "storagePeriodStartAt": {"en": "Storage period start", "de": "Speicher-Periodenbeginn"}, + "storageBilledUpToMB": { + "en": "Storage billed overage (MB)", + "de": "Speicher abgerechneter Überhang (MB)", + }, }, ) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 3b0e46b9..c5547c0a 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -218,7 +218,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { billingPeriod=BillingPeriodEnum.MONTHLY, pricePerUserCHF=90.0, pricePerFeatureInstanceCHF=150.0, - maxDataVolumeMB=10240, + maxDataVolumeMB=1024, budgetAiCHF=10.0, ), "STANDARD_YEARLY": SubscriptionPlan( @@ -232,7 +232,7 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { billingPeriod=BillingPeriodEnum.YEARLY, pricePerUserCHF=1080.0, pricePerFeatureInstanceCHF=1800.0, - maxDataVolumeMB=10240, + maxDataVolumeMB=1024, budgetAiCHF=120.0, ), } diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 79295f35..6271a8cd 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -76,6 +76,27 @@ class _PendingEditsStore: _pendingEditsStore = _PendingEditsStore() +def _workspaceBillingFeatureCode(user, mandateId: Optional[str], instanceId: str) -> Optional[str]: + """Resolve FeatureInstance.featureCode for billing/UI when workflow is not on ServiceCenterContext.""" + if not instanceId or not str(instanceId).strip(): + return None + try: + from modules.interfaces.interfaceDbApp import getInterface as getAppInterface + + appIf = getAppInterface(user, mandateId=mandateId or None) + inst = appIf.getFeatureInstance(str(instanceId).strip()) + if not inst: + return None + if isinstance(inst, dict): + code = inst.get("featureCode") + else: + code = getattr(inst, "featureCode", None) + return str(code).strip() if code else None + except Exception as e: + logger.debug("Workspace: feature code lookup failed for instance %s: %s", instanceId, e) + return None + + class WorkspaceInputRequest(BaseModel): """Prompt input for the unified workspace.""" prompt: str = Field(description="User prompt text") @@ -546,11 +567,13 @@ async def streamWorkspaceStart( from modules.serviceCenter import getService from modules.serviceCenter.context import ServiceCenterContext + wsBillingFeatureCode = _workspaceBillingFeatureCode(context.user, mandateId or "", instanceId) svcCtx = ServiceCenterContext( user=context.user, mandate_id=mandateId or "", feature_instance_id=instanceId, workflow_id=workflowId, + feature_code=wsBillingFeatureCode, ) chatSvc = getService("chat", svcCtx) attachmentLabel = _buildWorkspaceAttachmentLabel( @@ -590,6 +613,7 @@ async def streamWorkspaceStart( instanceConfig=instanceConfig, allowedProviders=userInput.allowedProviders, requireNeutralization=userInput.requireNeutralization, + billingFeatureCode=wsBillingFeatureCode, ) ) eventManager.register_agent_task(queueId, agentTask) @@ -646,6 +670,7 @@ async def _runWorkspaceAgent( instanceConfig: Dict[str, Any] = None, allowedProviders: List[str] = None, requireNeutralization: Optional[bool] = None, + billingFeatureCode: Optional[str] = None, ): """Run the serviceAgent loop and forward events to the SSE queue.""" try: @@ -656,6 +681,7 @@ async def _runWorkspaceAgent( mandate_id=mandateId, feature_instance_id=instanceId, workflow_id=workflowId, + feature_code=billingFeatureCode, ) agentService = getService("agent", ctx) chatService = getService("chat", ctx) diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 0c186475..7eccb3ee 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -38,15 +38,23 @@ pwdContext = CryptContext(schemes=["argon2"], deprecated="auto") # Cache für Role-IDs (roleLabel -> roleId) _roleIdCache: Dict[str, str] = {} -# PowerOn logical databases to scan (same set as gateway/scripts/script_db_export_migration.py). +# PowerOn logical databases to scan (same set as gateway/scripts/script_db_export_migration.py ALL_DATABASES). _POWERON_DATABASE_NAMES: Tuple[str, ...] = ( "poweron_app", + "poweron_automation", + "poweron_automation2", + "poweron_billing", "poweron_chat", "poweron_chatbot", + "poweron_commcoach", + "poweron_knowledge", "poweron_management", + "poweron_neutralization", "poweron_realestate", + "poweron_teamsbot", + "poweron_test", "poweron_trustee", - "poweron_automation", + "poweron_workspace", ) @@ -60,6 +68,7 @@ def _configPrefixForPoweronDatabase(dbName: str) -> str: "poweron_trustee": "DB_TRUSTEE", # Same as initAutomationTemplates: default DB_* (not a separate DB_AUTOMATION_* prefix). "poweron_automation": "DB", + "poweron_billing": "DB", }.get(dbName, "DB") diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index c8c13d13..1069314f 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -9,7 +9,7 @@ All billing data is stored in the poweron_billing database. import logging from typing import Dict, Any, List, Optional, Union -from datetime import date, datetime, timedelta +from datetime import date, datetime, timedelta, timezone import uuid from modules.connectors.connectorDbPostgre import DatabaseConnector @@ -29,11 +29,44 @@ from modules.datamodels.datamodelBilling import ( PeriodTypeEnum, BillingBalanceResponse, BillingCheckResult, + STORAGE_PRICE_PER_GB_CHF, ) logger = logging.getLogger(__name__) +def _logBillingTransactionsMissingSysCreatedAt(rows: List[Dict[str, Any]], context: str) -> None: + """Log ERROR when sysCreatedAt is missing; does not raise.""" + missingIds = [r.get("id") for r in rows if r.get("sysCreatedAt") is None] + if not missingIds: + return + cap = 40 + sample = missingIds[:cap] + suffix = f"; ... (+{len(missingIds) - cap} more)" if len(missingIds) > cap else "" + logger.error( + "BillingTransaction missing sysCreatedAt (%s): count=%s; transactionIds=%s%s", + context, + len(missingIds), + sample, + suffix, + ) + + +def _numericSysCreatedAtForSort(row: Dict[str, Any]) -> float: + v = row["sysCreatedAt"] + if isinstance(v, datetime): + return v.timestamp() + return float(v) + + +def _sortBillingTransactionsBySysCreatedAtDesc(rows: List[Dict[str, Any]], context: str) -> None: + _logBillingTransactionsMissingSysCreatedAt(rows, context) + valid = [r for r in rows if r.get("sysCreatedAt") is not None] + invalid = [r for r in rows if r.get("sysCreatedAt") is None] + valid.sort(key=_numericSysCreatedAtForSort, reverse=True) + rows[:] = valid + invalid + + def _getAppDatabaseConnector() -> DatabaseConnector: """App DB connector (same config as UserMandate reads in this module).""" return DatabaseConnector( @@ -553,6 +586,17 @@ class BillingObjects: # Create transaction record (always on transaction.accountId for audit) transactionDict = transaction.model_dump(exclude_none=True) + ts = getUtcTimestamp() + uid = str(self.userId) if self.userId else None + if transactionDict.get("sysCreatedAt") is None: + transactionDict["sysCreatedAt"] = ts + if transactionDict.get("sysModifiedAt") is None: + transactionDict["sysModifiedAt"] = ts + if uid: + if transactionDict.get("sysCreatedBy") is None: + transactionDict["sysCreatedBy"] = uid + if transactionDict.get("sysModifiedBy") is None: + transactionDict["sysModifiedBy"] = uid created = self.db.recordCreate(BillingTransaction, transactionDict) # Update balance on the target account @@ -597,6 +641,10 @@ class BillingObjects: pagination=pagination, recordFilter=recordFilter ) + _logBillingTransactionsMissingSysCreatedAt( + result["items"], + "getTransactions(accountId) paginated", + ) return PaginatedResult( items=result["items"], totalItems=result["totalItems"], @@ -619,7 +667,7 @@ class BillingObjects: filtered.append(t) results = filtered - results.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) + _sortBillingTransactionsBySysCreatedAtDesc(results, "getTransactions(accountId)") return results[offset:offset + limit] except Exception as e: @@ -674,7 +722,10 @@ class BillingObjects: transactions = self.getTransactions(account["id"], limit=limit) allTransactions.extend(transactions) - allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) + _sortBillingTransactionsBySysCreatedAtDesc( + allTransactions, + "getTransactionsByMandate", + ) return allTransactions[:limit] # ========================================================================= @@ -816,11 +867,113 @@ class BillingObjects: poolAccount = self.getOrCreateMandateAccount(mandateId) return self.createTransaction(transaction, balanceAccountId=poolAccount["id"]) - + + def _parseSettingsDateTime(self, value: Any) -> Optional[datetime]: + """Parse datetime from billing settings row (ISO string or datetime).""" + if value is None: + return None + if isinstance(value, datetime): + if value.tzinfo: + return value.astimezone(timezone.utc) + return value.replace(tzinfo=timezone.utc) + if isinstance(value, str): + s = value.replace("Z", "+00:00") + try: + dt = datetime.fromisoformat(s) + except ValueError: + return None + if dt.tzinfo: + return dt.astimezone(timezone.utc) + return dt.replace(tzinfo=timezone.utc) + return None + + def resetStorageBillingPeriod(self, mandateId: str, periodStartAt: datetime) -> None: + """Reset storage watermark state for a new subscription billing period (e.g. Stripe invoice.paid).""" + if periodStartAt.tzinfo is None: + periodStartAt = periodStartAt.replace(tzinfo=timezone.utc) + else: + periodStartAt = periodStartAt.astimezone(timezone.utc) + settings = self.getOrCreateSettings(mandateId) + prev = self._parseSettingsDateTime(settings.get("storagePeriodStartAt")) + if prev is not None and abs((prev - periodStartAt).total_seconds()) < 2: + return + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + + usedMB = float(_getSubRoot().getMandateDataVolumeMB(mandateId)) + self.updateSettings( + settings["id"], + { + "storageHighWatermarkMB": usedMB, + "storageBilledUpToMB": 0.0, + "storagePeriodStartAt": periodStartAt, + }, + ) + logger.info( + "Storage billing period reset for mandate %s at %s (usedMB=%.2f)", + mandateId, + periodStartAt.isoformat(), + usedMB, + ) + + def reconcileMandateStorageBilling(self, mandateId: str) -> Optional[Dict[str, Any]]: + """Debit prepay pool for new storage overage using period high-watermark (no credit on delete).""" + settings = self.getSettings(mandateId) + if not settings: + return None + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.datamodels.datamodelSubscription import _getPlan + + subIface = _getSubRoot() + usedMB = float(subIface.getMandateDataVolumeMB(mandateId)) + sub = subIface.getOperativeForMandate(mandateId) + plan = _getPlan(sub.get("planKey", "")) if sub else None + includedMB = plan.maxDataVolumeMB if plan and plan.maxDataVolumeMB is not None else None + if includedMB is None: + return None + + prevHigh = float(settings.get("storageHighWatermarkMB") or 0.0) + high = max(prevHigh, usedMB) + overageMB = max(0.0, high - float(includedMB)) + billed = float(settings.get("storageBilledUpToMB") or 0.0) + deltaOverage = overageMB - billed + settingsUpdates: Dict[str, Any] = {} + if high != prevHigh: + settingsUpdates["storageHighWatermarkMB"] = high + if deltaOverage <= 1e-9: + if settingsUpdates: + self.updateSettings(settings["id"], settingsUpdates) + return None + + costCHF = round((deltaOverage / 1024.0) * float(STORAGE_PRICE_PER_GB_CHF), 4) + if costCHF <= 0: + if settingsUpdates: + self.updateSettings(settings["id"], settingsUpdates) + return None + + poolAccount = self.getOrCreateMandateAccount(mandateId) + transaction = BillingTransaction( + accountId=poolAccount["id"], + transactionType=TransactionTypeEnum.DEBIT, + amount=costCHF, + description=f"Speicher-Überhang ({deltaOverage:.2f} MB über Plan)", + referenceType=ReferenceTypeEnum.STORAGE, + referenceId=mandateId, + ) + created = self.createTransaction(transaction) + settingsUpdates["storageBilledUpToMB"] = overageMB + self.updateSettings(settings["id"], settingsUpdates) + logger.info( + "Storage overage billed mandate=%s deltaOverageMB=%.4f costCHF=%s", + mandateId, + deltaOverage, + costCHF, + ) + return created + # ========================================================================= # Workflow Cost Query # ========================================================================= - + def getWorkflowCost(self, workflowId: str) -> float: """Sum of all transaction amounts for a workflow.""" if not workflowId: @@ -1027,7 +1180,7 @@ class BillingObjects: except Exception as e: logger.error(f"Error getting transactions for user: {e}") - allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) + _sortBillingTransactionsBySysCreatedAtDesc(allTransactions, "getTransactionsForUser") return allTransactions[:limit] # ========================================================================= @@ -1133,7 +1286,7 @@ class BillingObjects: logger.error(f"Error getting mandate transactions: {e}") # Sort by creation date descending and limit - allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) + _sortBillingTransactionsBySysCreatedAtDesc(allTransactions, "getMandateTransactions") return allTransactions[:limit] # ========================================================================= @@ -1320,5 +1473,5 @@ class BillingObjects: logger.error(f"Error getting user transactions for mandates: {e}") # Sort by creation date descending and limit - allTransactions.sort(key=lambda x: x.get("sysCreatedAt", ""), reverse=True) + _sortBillingTransactionsBySysCreatedAtDesc(allTransactions, "getUserTransactionsForMandates") return allTransactions[:limit] diff --git a/modules/interfaces/interfaceDbKnowledge.py b/modules/interfaces/interfaceDbKnowledge.py index c7f50543..c7b9e29a 100644 --- a/modules/interfaces/interfaceDbKnowledge.py +++ b/modules/interfaces/interfaceDbKnowledge.py @@ -91,10 +91,20 @@ class KnowledgeObjects: def deleteFileContentIndex(self, fileId: str) -> bool: """Delete a FileContentIndex and all associated ContentChunks.""" + existing = self.getFileContentIndex(fileId) + mandateId = (existing or {}).get("mandateId") or "" chunks = self.db.getRecordset(ContentChunk, recordFilter={"fileId": fileId}) for chunk in chunks: self.db.recordDelete(ContentChunk, chunk["id"]) - return self.db.recordDelete(FileContentIndex, fileId) + ok = self.db.recordDelete(FileContentIndex, fileId) + if ok and mandateId: + try: + from modules.interfaces.interfaceDbBilling import _getRootInterface + + _getRootInterface().reconcileMandateStorageBilling(str(mandateId)) + except Exception as ex: + logger.warning("reconcileMandateStorageBilling after delete failed: %s", ex) + return ok # ========================================================================= # ContentChunk CRUD diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py index 2405ec73..d6832f14 100644 --- a/modules/interfaces/interfaceDbSubscription.py +++ b/modules/interfaces/interfaceDbSubscription.py @@ -297,13 +297,17 @@ class SubscriptionObjects: cap = plan.maxDataVolumeMB if cap is None: return True - currentMB = self._getMandateDataVolumeMB(mandateId) + currentMB = self.getMandateDataVolumeMB(mandateId) if currentMB + delta > cap: from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionCapacityException raise SubscriptionCapacityException(resourceType=resourceType, currentCount=int(currentMB), maxAllowed=cap) return True + def getMandateDataVolumeMB(self, mandateId: str) -> float: + """Total indexed data volume for the mandate (MB), for billing and capacity checks.""" + return self._getMandateDataVolumeMB(mandateId) + def _getMandateDataVolumeMB(self, mandateId: str) -> float: """Sum RAG index size (FileContentIndex.totalSize) across all feature instances of the mandate.""" try: @@ -323,7 +327,7 @@ class SubscriptionObjects: plan = self.getPlan(sub.get("planKey", "")) if not plan or not plan.maxDataVolumeMB: return None - usedMB = self._getMandateDataVolumeMB(mandateId) + usedMB = self.getMandateDataVolumeMB(mandateId) limitMB = plan.maxDataVolumeMB percent = (usedMB / limitMB * 100) if limitMB > 0 else 0 if percent >= 80: diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 37674e53..13e94559 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -13,7 +13,7 @@ from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Resp from typing import List, Dict, Any, Optional from fastapi import status import logging -from datetime import date, datetime +from datetime import date, datetime, timezone from pydantic import BaseModel, Field # Import auth module @@ -263,6 +263,9 @@ class BillingSettingsUpdate(BaseModel): warningThresholdPercent: Optional[float] = Field(None, ge=0, le=100) notifyOnWarning: Optional[bool] = None notifyEmails: Optional[List[str]] = None + autoRechargeEnabled: Optional[bool] = None + rechargeAmountCHF: Optional[float] = Field(None, gt=0) + rechargeMaxPerMonth: Optional[int] = Field(None, ge=0) class TransactionResponse(BaseModel): @@ -704,11 +707,13 @@ def createOrUpdateSettings( targetMandateId: str = Path(..., description="Mandate ID"), settingsUpdate: BillingSettingsUpdate = Body(...), ctx: RequestContext = Depends(getRequestContext), - _admin = Depends(requireSysAdminRole) ): """ - Create or update billing settings for a mandate (SysAdmin only). + Create or update billing settings for a mandate. + Access: SysAdmin (any mandate) or MandateAdmin (own mandate). """ + if not _isAdminOfMandate(ctx, targetMandateId): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate") try: billingInterface = getBillingInterface(ctx.user, targetMandateId) existingSettings = billingInterface.getSettings(targetMandateId) @@ -735,6 +740,21 @@ def createOrUpdateSettings( else True ), notifyEmails=settingsUpdate.notifyEmails or [], + autoRechargeEnabled=( + settingsUpdate.autoRechargeEnabled + if settingsUpdate.autoRechargeEnabled is not None + else False + ), + rechargeAmountCHF=( + settingsUpdate.rechargeAmountCHF + if settingsUpdate.rechargeAmountCHF is not None + else 10.0 + ), + rechargeMaxPerMonth=( + settingsUpdate.rechargeMaxPerMonth + if settingsUpdate.rechargeMaxPerMonth is not None + else 3 + ), ) return billingInterface.createSettings(newSettings) @@ -1103,7 +1123,8 @@ def _handleSubscriptionWebhook(event) -> None: from datetime import datetime, timezone obj = event.data.object - stripeSubId = obj.get("id") if event.type.startswith("customer.subscription") else obj.get("subscription") + rawSub = obj.get("id") if event.type.startswith("customer.subscription") else obj.get("subscription") + stripeSubId = rawSub.get("id") if isinstance(rawSub, dict) else rawSub if not stripeSubId: logger.warning("Subscription webhook %s has no subscription ID", event.type) return @@ -1209,6 +1230,15 @@ def _handleSubscriptionWebhook(event) -> None: logger.error("Failed to notify about trial ending: %s", e) elif event.type == "invoice.paid": + period_ts = obj.get("period_start") + if period_ts: + period_start_at = datetime.fromtimestamp(int(period_ts), tz=timezone.utc) + try: + billing_if = _getRootInterface() + billing_if.resetStorageBillingPeriod(mandateId, period_start_at) + billing_if.reconcileMandateStorageBilling(mandateId) + except Exception as ex: + logger.error("Storage billing on invoice.paid failed: %s", ex) logger.info("Invoice paid for sub %s (mandate %s)", subId, mandateId) return None diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index fb71444b..11b6cb0f 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -4,7 +4,7 @@ Routes for local security and authentication. """ -from fastapi import APIRouter, HTTPException, status, Depends, Request, Response, Body, Query, Path +from fastapi import APIRouter, HTTPException, status, Depends, Request, Response, Body, Path from fastapi.security import OAuth2PasswordRequestForm import logging from typing import Dict, Any @@ -822,125 +822,6 @@ def password_reset( ) -# ============================================================ -# Voice Preferences (user-level, shared across features) -# ============================================================ - -@router.get("/voice-preferences") -@limiter.limit("60/minute") -def getVoicePreferences( - request: Request, - currentUser: User = Depends(getCurrentUser), -) -> Dict[str, Any]: - """Get user's voice/language preferences (optionally scoped to mandate via header).""" - rootInterface = getRootInterface() - from modules.datamodels.datamodelUam import UserVoicePreferences - - mandateId = request.headers.get("X-Mandate-Id") or None - userId = str(currentUser.id) - - prefs = rootInterface.db.getRecordset( - UserVoicePreferences, - recordFilter={"userId": userId, "mandateId": mandateId} - ) - if prefs: - return prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() - return UserVoicePreferences(userId=userId, mandateId=mandateId).model_dump() - - -@router.put("/voice-preferences") -@limiter.limit("30/minute") -def updateVoicePreferences( - request: Request, - preferences: Dict[str, Any] = Body(...), - currentUser: User = Depends(getCurrentUser), -) -> Dict[str, Any]: - """Update user's voice/language preferences (upsert).""" - rootInterface = getRootInterface() - from modules.datamodels.datamodelUam import UserVoicePreferences - - mandateId = request.headers.get("X-Mandate-Id") or None - userId = str(currentUser.id) - - existing = rootInterface.db.getRecordset( - UserVoicePreferences, - recordFilter={"userId": userId, "mandateId": mandateId} - ) - - allowedFields = { - "sttLanguage", "ttsLanguage", "ttsVoice", "ttsVoiceMap", - "translationSourceLanguage", "translationTargetLanguage", - } - updateData = {k: v for k, v in preferences.items() if k in allowedFields} - - if existing: - existingRecord = existing[0] - existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id - rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData) - updated = rootInterface.db.getRecordset(UserVoicePreferences, recordFilter={"id": existingId}) - return updated[0] if updated else {"message": "Updated", **updateData} - else: - newPrefs = UserVoicePreferences(userId=userId, mandateId=mandateId, **updateData) - created = rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump()) - return created if isinstance(created, dict) else created.model_dump() - - -@router.get("/voice/languages") -@limiter.limit("120/minute") -async def getVoiceLanguages( - request: Request, - currentUser: User = Depends(getCurrentUser), -) -> Dict[str, Any]: - """Return available TTS languages (user-level, no instance context needed).""" - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(currentUser) - languagesResult = await voiceInterface.getAvailableLanguages() - languageList = languagesResult.get("languages", []) if isinstance(languagesResult, dict) else languagesResult - return {"languages": languageList} - - -@router.get("/voice/voices") -@limiter.limit("120/minute") -async def getVoiceVoices( - request: Request, - language: str = Query("de-DE"), - currentUser: User = Depends(getCurrentUser), -) -> Dict[str, Any]: - """Return available TTS voices for a given language.""" - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(currentUser) - voicesResult = await voiceInterface.getAvailableVoices(language) - voiceList = voicesResult.get("voices", []) if isinstance(voicesResult, dict) else voicesResult - return {"voices": voiceList} - - -@router.post("/voice/test") -@limiter.limit("30/minute") -async def testVoice( - request: Request, - body: Dict[str, Any] = Body(...), - currentUser: User = Depends(getCurrentUser), -) -> Dict[str, Any]: - """Test a specific voice with a sample text.""" - import base64 - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - - text = body.get("text", "Hallo, das ist ein Stimmtest.") - language = body.get("language", "de-DE") - voiceId = body.get("voiceId") - - voiceInterface = getVoiceInterface(currentUser) - result = await voiceInterface.textToSpeech(text=text, languageCode=language, voiceName=voiceId) - if result and isinstance(result, dict): - audioContent = result.get("audioContent") - if audioContent: - audioB64 = base64.b64encode( - audioContent if isinstance(audioContent, bytes) else audioContent.encode() - ).decode() - return {"success": True, "audio": audioB64, "format": "mp3", "text": text} - return {"success": False, "error": "TTS returned no audio"} - - # ============================================================ # Neutralization Mappings (user-level, view/delete) # ============================================================ diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 19b81ca7..c9512d3f 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -190,12 +190,22 @@ def getSubscriptionInfo( mandateId = adminMandateIds[0] if not mandateId: - return {"plan": None, "maxDataVolumeMB": None, "maxFeatureInstances": None} + return { + "plan": None, + "maxDataVolumeMB": None, + "maxFeatureInstances": None, + "budgetAiCHF": None, + } from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS subs = db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) if not subs: - return {"plan": None, "maxDataVolumeMB": None, "maxFeatureInstances": None} + return { + "plan": None, + "maxDataVolumeMB": None, + "maxFeatureInstances": None, + "budgetAiCHF": None, + } sub = subs[0] plan = BUILTIN_PLANS.get(sub.get("planKey")) @@ -206,12 +216,18 @@ def getSubscriptionInfo( "status": sub.get("status"), "maxDataVolumeMB": plan.maxDataVolumeMB if plan else None, "maxFeatureInstances": plan.maxFeatureInstances if plan else None, + "budgetAiCHF": plan.budgetAiCHF if plan else None, "currentFeatureInstances": len(currentInstances), "trialEndsAt": sub.get("trialEndsAt"), } except Exception as e: logger.error(f"Error getting subscription info: {e}") - return {"plan": None, "maxDataVolumeMB": None, "maxFeatureInstances": None} + return { + "plan": None, + "maxDataVolumeMB": None, + "maxFeatureInstances": None, + "budgetAiCHF": None, + } @router.get("/features", response_model=List[StoreFeatureResponse]) diff --git a/modules/routes/routeVoiceUser.py b/modules/routes/routeVoiceUser.py new file mode 100644 index 00000000..9b628eeb --- /dev/null +++ b/modules/routes/routeVoiceUser.py @@ -0,0 +1,327 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +User-scoped voice settings and TTS/STT catalog endpoints. + +Uses modules.interfaces.interfaceVoiceObjects (voice core) and persists preferences +via UserVoicePreferences — same domain as routeVoiceGoogle (Google connector ops). +""" + +import base64 +import logging +from typing import Any, Dict + +from fastapi import APIRouter, Body, Depends, HTTPException, Query, Request, status + +from modules.auth import getCurrentUser, limiter +from modules.datamodels.datamodelUam import User, UserVoicePreferences +from modules.interfaces.interfaceDbApp import getRootInterface +from modules.interfaces.interfaceVoiceObjects import getVoiceInterface + +logger = logging.getLogger(__name__) + +router = APIRouter( + prefix="/api/voice", + tags=["Voice User"], + responses={ + 404: {"description": "Not found"}, + 400: {"description": "Bad request"}, + 401: {"description": "Unauthorized"}, + 403: {"description": "Forbidden"}, + 500: {"description": "Internal server error"}, + }, +) + + +@router.get("/preferences") +@limiter.limit("60/minute") +def getVoicePreferences( + request: Request, + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Get user's voice/language preferences (optionally scoped to mandate via header).""" + rootInterface = getRootInterface() + mandateId = request.headers.get("X-Mandate-Id") or None + userId = str(currentUser.id) + + prefs = rootInterface.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId}, + ) + if prefs: + return prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() + return UserVoicePreferences(userId=userId, mandateId=mandateId).model_dump() + + +@router.put("/preferences") +@limiter.limit("30/minute") +def updateVoicePreferences( + request: Request, + preferences: Dict[str, Any] = Body(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Update user's voice/language preferences (upsert).""" + rootInterface = getRootInterface() + mandateId = request.headers.get("X-Mandate-Id") or None + userId = str(currentUser.id) + + existing = rootInterface.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId, "mandateId": mandateId}, + ) + + allowedFields = { + "sttLanguage", + "ttsLanguage", + "ttsVoice", + "ttsVoiceMap", + "translationSourceLanguage", + "translationTargetLanguage", + } + updateData = {k: v for k, v in preferences.items() if k in allowedFields} + + if existing: + existingRecord = existing[0] + existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id + rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData) + updated = rootInterface.db.getRecordset(UserVoicePreferences, recordFilter={"id": existingId}) + return updated[0] if updated else {"message": "Updated", **updateData} + newPrefs = UserVoicePreferences(userId=userId, mandateId=mandateId, **updateData) + created = rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump()) + return created if isinstance(created, dict) else created.model_dump() + + +@router.get("/languages") +@limiter.limit("120/minute") +async def getVoiceLanguages( + request: Request, + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Return available TTS languages (user-level, no instance context needed).""" + voiceInterface = getVoiceInterface(currentUser) + languagesResult = await voiceInterface.getAvailableLanguages() + languageList = languagesResult.get("languages", []) if isinstance(languagesResult, dict) else languagesResult + return {"languages": languageList} + + +@router.get("/voices") +@limiter.limit("120/minute") +async def getVoiceVoices( + request: Request, + language: str = Query("de-DE"), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Return available TTS voices for a given language.""" + voiceInterface = getVoiceInterface(currentUser) + voicesResult = await voiceInterface.getAvailableVoices(language) + voiceList = voicesResult.get("voices", []) if isinstance(voicesResult, dict) else voicesResult + return {"voices": voiceList} + + +# Same minimum as modules.serviceCenter.services.serviceAi.mainServiceAi._checkBillingBeforeAiCall +_MIN_AI_BILLING_ESTIMATE_CHF = 0.01 + + +def _userMandateIds(rootInterface, currentUser: User): + memberships = rootInterface.getUserMandates(str(currentUser.id)) + out = [] + for um in memberships: + mid = getattr(um, "mandateId", None) or (um.get("mandateId") if isinstance(um, dict) else None) + if mid: + out.append(str(mid)) + return list(dict.fromkeys(out)) + + +def _mandatePassesAiPoolBilling(currentUser: User, mandateId: str, userId: str) -> bool: + """True if mandate pool passes the same billing gate as AI calls (subscription + pool >= estimate).""" + from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface + + bi = getBillingInterface(currentUser, mandateId) + res = bi.checkBalance(mandateId, userId, _MIN_AI_BILLING_ESTIMATE_CHF) + return bool(res.allowed) + + +def _mandatePoolBalanceChf(currentUser: User, mandateId: str) -> float: + from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface + + bi = getBillingInterface(currentUser, mandateId) + acc = bi.getMandateAccount(mandateId) + if not acc: + return 0.0 + return float(acc.get("balance", 0.0) or 0.0) + + +def _resolveMandateIdForVoiceTestAi(request: Request, currentUser: User) -> str: + """ + AI sample billing uses mandate pool (PREPAY), not per-user wallet. + Prefer X-Mandate-Id when the user is a member and that mandate's pool can pay; + otherwise pick the member mandate with the highest pool balance that passes the AI billing check. + """ + rootInterface = getRootInterface() + userId = str(currentUser.id) + memberIds = _userMandateIds(rootInterface, currentUser) + if not memberIds: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=( + "Voice test needs at least one mandate membership for AI billing. " + "Join a mandate or open the app from a mandate context." + ), + ) + + headerRaw = (request.headers.get("X-Mandate-Id") or request.headers.get("x-mandate-id") or "").strip() + if headerRaw: + if headerRaw not in memberIds: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="X-Mandate-Id is not a mandate you belong to.", + ) + if _mandatePassesAiPoolBilling(currentUser, headerRaw, userId): + logger.info( + "Voice test AI billing: using header mandate %s (pool ok for estimate %.4f CHF)", + headerRaw, + _MIN_AI_BILLING_ESTIMATE_CHF, + ) + return headerRaw + logger.warning( + "Voice test AI billing: header mandate %s has insufficient mandate pool or subscription; " + "trying other memberships", + headerRaw, + ) + + bestMid = None + bestBal = -1.0 + for mid in memberIds: + if not _mandatePassesAiPoolBilling(currentUser, mid, userId): + continue + bal = _mandatePoolBalanceChf(currentUser, mid) + if bal > bestBal: + bestBal = bal + bestMid = mid + + if bestMid: + logger.info( + "Voice test AI billing: selected mandate %s (mandate pool %.2f CHF, estimate %.4f CHF)", + bestMid, + bestBal, + _MIN_AI_BILLING_ESTIMATE_CHF, + ) + return bestMid + + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail=( + "No mandate you belong to has sufficient shared pool balance for AI (or subscription inactive). " + "Top up the mandate pool or use a mandate with budget." + ), + ) + + +def _sanitizeAiTtsSample(raw: str) -> str: + s = (raw or "").strip() + if s.startswith("```"): + nl = s.find("\n") + if nl != -1: + s = s[nl + 1 :] + if s.rstrip().endswith("```"): + s = s.rstrip()[:-3].strip() + if len(s) >= 2 and ((s[0] == s[-1] == '"') or (s[0] == s[-1] == "'")): + s = s[1:-1].strip() + return s + + +async def _generateTtsSampleTextForLocale( + request: Request, + currentUser: User, + localeTag: str, +) -> str: + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum + from modules.serviceCenter.services.serviceBilling.mainServiceBilling import ( + BillingContextError, + InsufficientBalanceException, + ProviderNotAllowedException, + ) + from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionInactiveException + + mandateId = _resolveMandateIdForVoiceTestAi(request, currentUser) + ctx = ServiceCenterContext(user=currentUser, mandate_id=mandateId, feature_instance_id=None) + aiService = getService("ai", ctx) + + systemPrompt = ( + "You write short text-to-speech demo lines for end users.\n" + "Task: Output exactly one or two natural sentences a user would enjoy hearing when testing a voice.\n" + "The entire output MUST be written ONLY in the natural spoken language that matches the given " + "BCP-47 locale tag. Do not use any other language.\n" + "Do not mention locales, tags, tests, artificial intelligence, or these instructions.\n" + "No quotation marks around the text. No markdown. Plain text only." + ) + userPrompt = f"BCP-47 locale tag: `{localeTag}`.\nWrite the sample now." + + aiRequest = AiCallRequest( + prompt=userPrompt, + context=systemPrompt, + requireNeutralization=False, + options=AiCallOptions( + operationType=OperationTypeEnum.DATA_GENERATE, + priority=PriorityEnum.SPEED, + processingMode=ProcessingModeEnum.BASIC, + compressPrompt=False, + compressContext=False, + temperature=0.75, + maxParts=1, + ), + ) + try: + response = await aiService.callAi(aiRequest) + except SubscriptionInactiveException as e: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=e.message) from e + except InsufficientBalanceException as e: + raise HTTPException(status_code=status.HTTP_402_PAYMENT_REQUIRED, detail=str(e)) from e + except ProviderNotAllowedException as e: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=getattr(e, "message", None) or str(e), + ) from e + except BillingContextError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) from e + + content = _sanitizeAiTtsSample(getattr(response, "content", None) or "") + if getattr(response, "errorCount", 0) or not content: + logger.warning("Voice test AI sample empty or errorCount=%s", getattr(response, "errorCount", None)) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail="Could not generate voice test sample text.", + ) + if len(content) > 500: + content = content[:500].rstrip() + return content + + +@router.post("/test") +@limiter.limit("30/minute") +async def testVoice( + request: Request, + body: Dict[str, Any] = Body(...), + currentUser: User = Depends(getCurrentUser), +) -> Dict[str, Any]: + """Test a specific voice. Sample text is AI-generated in the voice locale unless `text` is supplied.""" + textRaw = body.get("text") + language = body.get("language", "de-DE") + voiceId = body.get("voiceId") + + text = (textRaw or "").strip() if isinstance(textRaw, str) else "" + if not text: + text = await _generateTtsSampleTextForLocale(request, currentUser, language) + + voiceInterface = getVoiceInterface(currentUser) + result = await voiceInterface.textToSpeech(text=text, languageCode=language, voiceName=voiceId) + if result and isinstance(result, dict): + audioContent = result.get("audioContent") + if audioContent: + audioB64 = base64.b64encode( + audioContent if isinstance(audioContent, bytes) else audioContent.encode() + ).decode() + return {"success": True, "audio": audioB64, "format": "mp3", "text": text} + return {"success": False, "error": "TTS returned no audio"} diff --git a/modules/serviceCenter/context.py b/modules/serviceCenter/context.py index acad6d61..24868fca 100644 --- a/modules/serviceCenter/context.py +++ b/modules/serviceCenter/context.py @@ -21,6 +21,8 @@ class ServiceCenterContext: workflow_id: Optional[str] = None workflow: Any = None requireNeutralization: Optional[bool] = None + # When workflow is absent (e.g. workspace agent), billing/UI still need feature code for transactions. + feature_code: Optional[str] = None @property def mandateId(self) -> Optional[str]: diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index c4e1f877..23a749ab 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -322,7 +322,7 @@ class AgentService: def _createAiCallFn(self) -> Callable[[AiCallRequest], AiCallResponse]: """Create the AI call function that wraps serviceAi with billing.""" - ctxNeutralization = getattr(self.ctx, 'requireNeutralization', None) + ctxNeutralization = getattr(self._context, "requireNeutralization", None) async def _aiCallFn(request: AiCallRequest) -> AiCallResponse: if ctxNeutralization is not None and request.requireNeutralization is None: request.requireNeutralization = ctxNeutralization @@ -332,7 +332,7 @@ class AgentService: def _createAiCallStreamFn(self): """Create the streaming AI call function. Yields str deltas, then AiCallResponse.""" - ctxNeutralization = getattr(self.ctx, 'requireNeutralization', None) + ctxNeutralization = getattr(self._context, "requireNeutralization", None) async def _aiCallStreamFn(request: AiCallRequest): if ctxNeutralization is not None and request.requireNeutralization is None: request.requireNeutralization = ctxNeutralization diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index b25374d5..e2de43e6 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -77,6 +77,9 @@ class _ServicesAdapter: @property def featureCode(self) -> Optional[str]: + fc = getattr(self._context, "feature_code", None) + if fc and str(fc).strip(): + return str(fc).strip() w = self.workflow if w and hasattr(w, "feature") and w.feature: return getattr(w.feature, "code", None) @@ -742,9 +745,8 @@ detectedIntent-Werte: balance_str = f"{(balanceCheck.currentBalance or 0):.2f}" logger.warning( - f"Billing check failed for user {user.id}: " - f"Balance {balance_str} CHF, " - f"Reason: {reason}" + f"AI billing check failed (mandate pool): mandate={mandateId} user={user.id} " + f"poolBalance={balance_str} CHF required~={estimatedCost:.4f} CHF reason={reason}" ) ulabel = (getattr(user, "email", None) or getattr(user, "username", None) or str(user.id)) maybeEmailMandatePoolExhausted( diff --git a/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py b/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py index 3a33f1f6..90c9a347 100644 --- a/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py +++ b/modules/serviceCenter/services/serviceBilling/mainServiceBilling.py @@ -58,6 +58,9 @@ def getService(currentUser: User, mandateId: str, featureInstanceId: str = None, def _get_feature_code_from_context(context) -> Optional[str]: """Extract featureCode from ServiceCenterContext.""" + explicit = getattr(context, "feature_code", None) + if explicit and str(explicit).strip(): + return str(explicit).strip() if context.workflow and hasattr(context.workflow, "feature") and context.workflow.feature: return getattr(context.workflow.feature, "code", None) return getattr(context.workflow, "featureCode", None) if context.workflow else None diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index 7d85edcc..0f20bc7f 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -210,6 +210,13 @@ class KnowledgeService: except Exception as e: logger.debug(f"Could not set neutralizationStatus for file {fileId}: {e}") logger.info(f"Indexed file {fileId} ({fileName}): {len(contentObjects)} objects, {len(textObjects)} text chunks") + if mandateId: + try: + from modules.interfaces.interfaceDbBilling import _getRootInterface + + _getRootInterface().reconcileMandateStorageBilling(str(mandateId)) + except Exception as ex: + logger.warning("reconcileMandateStorageBilling after index failed: %s", ex) return index # ========================================================================= diff --git a/scripts/script_db_export_migration.py b/scripts/script_db_export_migration.py index e5961e23..b85dcf54 100644 --- a/scripts/script_db_export_migration.py +++ b/scripts/script_db_export_migration.py @@ -99,25 +99,44 @@ try: except Exception as e: logger.warning(f"Could not refresh APP_CONFIG: {e}") -# Alle PowerOn Datenbanken +# Alle PowerOn Datenbanken (keep in sync with interfaceBootstrap._POWERON_DATABASE_NAMES) ALL_DATABASES = [ - "poweron_app", # Haupt-App: User, Mandate, RBAC, Features - "poweron_chat", # Chat-Konversationen - "poweron_chatbot", # Chatbot-Feature: Konversationen, Nachrichten, Logs - "poweron_management", # Workflows, Prompts, Connections - "poweron_realestate", # Real Estate - "poweron_trustee", # Trustee + "poweron_app", + "poweron_automation", + "poweron_automation2", + "poweron_billing", + "poweron_chat", + "poweron_chatbot", + "poweron_commcoach", + "poweron_knowledge", + "poweron_management", + "poweron_neutralization", + "poweron_realestate", + "poweron_teamsbot", + "poweron_test", + "poweron_trustee", + "poweron_workspace", ] # Datenbank-Konfiguration: Mapping von DB-Name zu Config-Prefix # Jede Datenbank hat ihre eigenen Variablen: DB_APP_HOST, DB_CHAT_HOST, etc. +# Unlisted names use prefix "DB" (DB_HOST, DB_USER, …) via _getDbConfig fallback. DATABASE_CONFIG = { - "poweron_app": "DB_APP", # DB_APP_HOST, DB_APP_USER, DB_APP_PASSWORD_SECRET, etc. - "poweron_chat": "DB_CHAT", # DB_CHAT_HOST, DB_CHAT_USER, etc. - "poweron_chatbot": "DB_CHATBOT", # DB_CHATBOT_* (fallsback to DB_*) + "poweron_app": "DB_APP", + "poweron_chat": "DB_CHAT", + "poweron_chatbot": "DB_CHATBOT", "poweron_management": "DB_MANAGEMENT", "poweron_realestate": "DB_REALESTATE", "poweron_trustee": "DB_TRUSTEE", + "poweron_automation": "DB", + "poweron_automation2": "DB", + "poweron_billing": "DB", + "poweron_commcoach": "DB", + "poweron_knowledge": "DB", + "poweron_neutralization": "DB", + "poweron_teamsbot": "DB", + "poweron_test": "DB", + "poweron_workspace": "DB", } diff --git a/tests/test_phase123_basic.py b/tests/test_phase123_basic.py index 222c6043..d13c4271 100644 --- a/tests/test_phase123_basic.py +++ b/tests/test_phase123_basic.py @@ -284,10 +284,13 @@ except Exception as e: print(f" [FAIL] Fix 5: {e}") try: - with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), - "modules", "routes", "routeSecurityLocal.py"), "r") as f: + voiceUserPath = os.path.join( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "modules", "routes", "routeVoiceUser.py", + ) + with open(voiceUserPath, "r") as f: source = f.read() - _check("Voice preferences GET endpoint", "voice-preferences" in source and "getVoicePreferences" in source) + _check("Voice preferences GET endpoint", '"/preferences"' in source and "getVoicePreferences" in source) _check("Voice preferences PUT endpoint", "updateVoicePreferences" in source) except Exception as e: errors.append(f"Fix 5 Routes: {e}") From 7e8800572197da35727e28ae6bf927563d3247ea Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Sun, 29 Mar 2026 21:55:09 +0200 Subject: [PATCH 13/33] unified failsafe neutralization architecture --- modules/aicore/aicorePluginPrivateLlm.py | 9 +- modules/datamodels/datamodelAi.py | 4 + modules/datamodels/datamodelBilling.py | 1 + modules/datamodels/datamodelKnowledge.py | 10 +- modules/datamodels/datamodelUam.py | 34 ++- .../mainServiceNeutralization.py | 97 +++++++- modules/interfaces/interfaceBootstrap.py | 7 + modules/interfaces/interfaceDbApp.py | 18 +- modules/interfaces/interfaceDbBilling.py | 35 +++ modules/interfaces/interfaceDbKnowledge.py | 95 ++++++-- modules/interfaces/interfaceDbManagement.py | 13 +- modules/interfaces/interfaceDbSubscription.py | 23 +- modules/migration/migrateRagScopeFields.py | 114 ++++++++++ modules/routes/routeAdminFeatures.py | 9 +- modules/routes/routeBilling.py | 23 +- modules/routes/routeDataFiles.py | 112 +++++---- modules/routes/routeStore.py | 135 ++++++++--- modules/routes/routeSubscription.py | 59 ++--- modules/routes/routeVoiceGoogle.py | 4 +- modules/routes/routeVoiceUser.py | 4 +- .../services/serviceAgent/mainServiceAgent.py | 112 ++++++++- .../services/serviceAi/mainServiceAi.py | 215 +++++++++++++++--- .../serviceKnowledge/mainServiceKnowledge.py | 145 +++++++----- .../services/serviceKnowledge/subPreScan.py | 2 + modules/shared/notifyMandateAdmins.py | 10 +- .../methodContext/actions/extractContent.py | 71 +----- .../methodContext/actions/neutralizeData.py | 30 ++- modules/workflows/workflowManager.py | 96 +------- 28 files changed, 1064 insertions(+), 423 deletions(-) create mode 100644 modules/migration/migrateRagScopeFields.py diff --git a/modules/aicore/aicorePluginPrivateLlm.py b/modules/aicore/aicorePluginPrivateLlm.py index 718c5905..38baa35e 100644 --- a/modules/aicore/aicorePluginPrivateLlm.py +++ b/modules/aicore/aicorePluginPrivateLlm.py @@ -7,9 +7,9 @@ Connects to the private-llm service running on-premise with Ollama backend. Provides OCR and Vision capabilities via local AI models. Models: -- poweron-ocr-general: Text extraction and OCR (deepseek backend) -- poweron-vision-general: General vision tasks (qwen2.5vl backend) -- poweron-vision-deep: Deep vision analysis (granite3.2 backend) +- poweron-text-general: Text (qwen2.5); NEUTRALIZATION_TEXT + data/plan ops +- poweron-vision-general: Vision (qwen2.5vl); IMAGE_ANALYSE + NEUTRALIZATION_IMAGE +- poweron-vision-deep: Vision (granite3.2); IMAGE_ANALYSE + NEUTRALIZATION_IMAGE Pricing (CHF per call): - Text models: CHF 0.010 @@ -245,6 +245,7 @@ class AiPrivateLlm(BaseConnectorAi): (OperationTypeEnum.DATA_ANALYSE, 8), (OperationTypeEnum.DATA_GENERATE, 8), (OperationTypeEnum.DATA_EXTRACT, 8), + (OperationTypeEnum.NEUTRALIZATION_TEXT, 9), ), version="qwen2.5:7b", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_TEXT_PER_CALL @@ -270,6 +271,7 @@ class AiPrivateLlm(BaseConnectorAi): processingMode=ProcessingModeEnum.ADVANCED, operationTypes=createOperationTypeRatings( (OperationTypeEnum.IMAGE_ANALYSE, 9), + (OperationTypeEnum.NEUTRALIZATION_IMAGE, 9), ), version="qwen2.5vl:7b", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_VISION_PER_CALL @@ -295,6 +297,7 @@ class AiPrivateLlm(BaseConnectorAi): processingMode=ProcessingModeEnum.DETAILED, operationTypes=createOperationTypeRatings( (OperationTypeEnum.IMAGE_ANALYSE, 9), + (OperationTypeEnum.NEUTRALIZATION_IMAGE, 9), ), version="granite3.2-vision", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_VISION_PER_CALL diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py index c31d5696..96e05185 100644 --- a/modules/datamodels/datamodelAi.py +++ b/modules/datamodels/datamodelAi.py @@ -22,6 +22,10 @@ class OperationTypeEnum(str, Enum): IMAGE_ANALYSE = "imageAnalyse" IMAGE_GENERATE = "imageGenerate" + # Neutralization (dedicated model selection; text vs vision backends) + NEUTRALIZATION_TEXT = "neutralizationText" + NEUTRALIZATION_IMAGE = "neutralizationImage" + # Web Operations WEB_SEARCH_DATA = "webSearch" # Returns list of URLs only WEB_CRAWL = "webCrawl" # Web crawl for a given URL diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py index 2d3bfdb1..ccf1f4a1 100644 --- a/modules/datamodels/datamodelBilling.py +++ b/modules/datamodels/datamodelBilling.py @@ -28,6 +28,7 @@ class ReferenceTypeEnum(str, Enum): ADMIN = "ADMIN" # Admin adjustment SYSTEM = "SYSTEM" # System credit (e.g., initial credit) STORAGE = "STORAGE" # Metered storage overage (prepay pool) + SUBSCRIPTION = "SUBSCRIPTION" # AI budget credit from subscription plan class PeriodTypeEnum(str, Enum): diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py index 3742a84b..7ac12c15 100644 --- a/modules/datamodels/datamodelKnowledge.py +++ b/modules/datamodels/datamodelKnowledge.py @@ -3,8 +3,10 @@ """Knowledge Store data models: FileContentIndex, ContentChunk, WorkflowMemory. These models support the 3-tier RAG architecture: -- Shared Layer: mandateId-scoped, isShared=True -- Instance Layer: userId + featureInstanceId-scoped +- Personal Layer: scope=personal, userId-scoped +- Instance Layer: scope=featureInstance, featureInstanceId-scoped +- Mandate Layer: scope=mandate, mandateId-scoped (visible to all mandate users) +- Global Layer: scope=global (sysAdmin only) - Workflow Layer: workflowId-scoped (WorkflowMemory) Vector fields use json_schema_extra={"db_type": "vector(1536)"} for pgvector. @@ -20,12 +22,11 @@ import uuid class FileContentIndex(PowerOnModel): """Structural index of a file's content objects. Created without AI. - Lives in the Instance Layer; optionally promoted to Shared Layer via isShared.""" + Scope is mirrored from FileItem (poweron_management) at indexing time.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key (typically = fileId)") userId: str = Field(description="Owner user ID") featureInstanceId: str = Field(default="", description="Feature instance scope") mandateId: str = Field(default="", description="Mandate scope") - isShared: bool = Field(default=False, description="Visible in Shared Layer for all mandate users") fileName: str = Field(description="Original file name") mimeType: str = Field(description="MIME type of the file") containerPath: Optional[str] = Field(default=None, description="Path within a container (e.g. 'archive.zip/folder/report.pdf')") @@ -57,7 +58,6 @@ registerModelLabels( "userId": {"en": "User ID", "fr": "ID utilisateur"}, "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"}, "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"}, - "isShared": {"en": "Shared", "fr": "Partagé"}, "fileName": {"en": "File Name", "fr": "Nom de fichier"}, "mimeType": {"en": "MIME Type", "fr": "Type MIME"}, "containerPath": {"en": "Container Path", "fr": "Chemin du conteneur"}, diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 741ce3d5..e0c4f13c 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -10,7 +10,7 @@ Multi-Tenant Design: """ import uuid -from typing import Optional, List, Dict +from typing import Optional, List, Dict, Any from enum import Enum from pydantic import BaseModel, Field, EmailStr, field_validator, computed_field from modules.datamodels.datamodelBase import PowerOnModel @@ -303,6 +303,33 @@ registerModelLabels( ) +def _normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]: + """ + Coerce ttsVoiceMap payloads to Dict[str, str]. + + UI/clients may send per-locale objects like {"voiceName": "de-DE-Chirp3-HD-Achird"}; + storage and model field type are locale -> voice id string. + """ + if value is None: + return None + if not isinstance(value, dict): + return None + out: Dict[str, str] = {} + for rawKey, rawVal in value.items(): + key = str(rawKey) + if rawVal is None: + continue + if isinstance(rawVal, str): + out[key] = rawVal + elif isinstance(rawVal, dict): + vn = rawVal.get("voiceName") + if vn is not None and str(vn).strip() != "": + out[key] = str(vn).strip() + else: + out[key] = str(rawVal) + return out if out else None + + class UserVoicePreferences(PowerOnModel): """User-level voice/language preferences, shared across all features.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") @@ -315,6 +342,11 @@ class UserVoicePreferences(PowerOnModel): translationSourceLanguage: Optional[str] = Field(default=None, description="Source language for translations") translationTargetLanguage: Optional[str] = Field(default=None, description="Target language for translations") + @field_validator("ttsVoiceMap", mode="before") + @classmethod + def _validateTtsVoiceMap(cls, value: Any) -> Optional[Dict[str, str]]: + return _normalizeTtsVoiceMap(value) + registerModelLabels( "UserVoicePreferences", diff --git a/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py b/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py index c803b375..e583c60b 100644 --- a/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py +++ b/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py @@ -203,6 +203,89 @@ class NeutralizationService: 'processed_info': {'type': 'binary', 'status': 'error', 'error': str(e)} } + async def processImageAsync(self, imageBytes: bytes, fileName: str, mimeType: str = "image/png") -> Dict[str, Any]: + """Analyze image via internal vision model to check for sensitive content. + + Returns dict with: + - 'status': 'ok' | 'blocked' | 'error' + - 'hasSensitiveContent': bool + - 'analysis': str (model's analysis text, if available) + - 'processed_info': dict with details + + Uses NEUTRALIZATION_IMAGE operation type → only internal Private-LLM models. + If no internal model available → returns 'blocked'. + """ + import base64 + try: + aiService = None + if self._getService: + try: + aiService = self._getService("ai") + except Exception: + pass + if not aiService or not hasattr(aiService, 'callAi'): + logger.warning(f"processImage: AI service not available — blocking image '{fileName}'") + return { + 'status': 'blocked', + 'hasSensitiveContent': True, + 'analysis': '', + 'processed_info': {'type': 'image', 'status': 'blocked', 'reason': 'AI service unavailable'} + } + + from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum + + _b64Data = base64.b64encode(imageBytes).decode('utf-8') + _dataUrl = f"data:{mimeType};base64,{_b64Data}" + + _prompt = ( + "Analyze this image for personally identifiable information (PII). " + "Check for: names, addresses, phone numbers, email addresses, ID numbers, " + "faces, signatures, handwritten text, license plates, financial data. " + "Respond with JSON: {\"hasPII\": true/false, \"findings\": [\"...\"]}" + ) + + _request = AiCallRequest( + prompt=_prompt, + options=AiCallOptions(operationType=OperationTypeEnum.NEUTRALIZATION_IMAGE), + messages=[{"role": "user", "content": [ + {"type": "text", "text": _prompt}, + {"type": "image_url", "image_url": {"url": _dataUrl}}, + ]}], + ) + + _response = await aiService.callAi(_request) + + _hasPII = False + _analysis = _response.content if _response and hasattr(_response, 'content') else '' + if _analysis: + _lowerAnalysis = _analysis.lower() + if '"haspii": true' in _lowerAnalysis or '"haspii":true' in _lowerAnalysis: + _hasPII = True + + return { + 'status': 'blocked' if _hasPII else 'ok', + 'hasSensitiveContent': _hasPII, + 'analysis': _analysis, + 'processed_info': {'type': 'image', 'status': 'blocked' if _hasPII else 'ok', 'fileName': fileName} + } + except Exception as e: + logger.error(f"processImage failed for '{fileName}': {e}") + return { + 'status': 'blocked', + 'hasSensitiveContent': True, + 'analysis': '', + 'processed_info': {'type': 'image', 'status': 'error', 'error': str(e)} + } + + def processImage(self, imageBytes: bytes, fileName: str, mimeType: str = "image/png") -> Dict[str, Any]: + """Sync wrapper for processImageAsync. Uses asyncio.run when no event loop is running.""" + import asyncio + try: + return asyncio.run(self.processImageAsync(imageBytes, fileName, mimeType)) + except RuntimeError: + loop = asyncio.get_event_loop() + return loop.run_until_complete(self.processImageAsync(imageBytes, fileName, mimeType)) + def resolveText(self, text: str) -> str: if not self.interfaceNeutralizer: return text @@ -295,9 +378,21 @@ class NeutralizationService: p = part if isinstance(part, dict) else part.model_dump() if hasattr(part, 'model_dump') else part type_group = p.get('typeGroup', '') data = p.get('data', '') - if type_group in ('binary', 'image') or not (data and str(data).strip()): + if type_group == 'binary' or not (data and str(data).strip()): neutralized_parts.append(part) continue + if type_group == 'image': + import base64 as _b64img + try: + _imgBytes = _b64img.b64decode(str(data)) + _imgResult = await self.processImageAsync(_imgBytes, fileName) + if _imgResult.get("status") == "ok": + neutralized_parts.append(part) + else: + logger.warning(f"Image part blocked in binary file '{fileName}' (PII detected), removing") + except Exception as _imgErr: + logger.warning(f"Image check failed in binary file '{fileName}': {_imgErr}, removing (fail-safe)") + continue nr = self._neutralizeText(str(data), 'text' if type_group != 'table' else 'csv') proc = nr.get('processed_info', {}) or {} if isinstance(proc, dict) and proc.get('type') == 'error': diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py index 7eccb3ee..93b17d6a 100644 --- a/modules/interfaces/interfaceBootstrap.py +++ b/modules/interfaces/interfaceBootstrap.py @@ -201,6 +201,13 @@ def initBootstrap(db: DatabaseConnector) -> None: except Exception as e: logger.error(f"Voice & documents migration failed: {e}") + # Backfill FileContentIndex scope fields from FileItem (one-time) + try: + from modules.migration.migrateRagScopeFields import runMigration as migrateRagScope + migrateRagScope(appDb=db) + except Exception as e: + logger.error(f"RAG scope fields migration failed: {e}") + # After migration: root mandate is purely technical — no feature instances if not migrationDone and mandateId: initRootMandateFeatures(db, mandateId) diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 13179634..ffde890f 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1931,14 +1931,26 @@ class AppObjects: raise logger.debug(f"Subscription capacity check skipped: {e}") - def _syncSubscriptionQuantity(self, mandateId: str) -> None: - """Sync Stripe subscription quantities after a resource mutation.""" + def _syncSubscriptionQuantity(self, mandateId: str, *, raiseOnError: bool = False) -> None: + """Sync Stripe subscription quantities after a resource mutation. + + Args: + raiseOnError: If True, propagate errors (billing-critical paths). + """ try: from modules.interfaces.interfaceDbSubscription import getInterface as getSubInterface from modules.security.rootAccess import getRootUser subIf = getSubInterface(getRootUser(), mandateId) - subIf.syncQuantityToStripe(mandateId) + operative = subIf.getOperativeForMandate(mandateId) + if not operative: + if raiseOnError: + raise ValueError(f"Kein operatives Abonnement für Mandant {mandateId}") + logger.debug("No operative subscription for mandate %s — quantity sync skipped", mandateId) + return + subIf.syncQuantityToStripe(operative["id"], raiseOnError=raiseOnError) except Exception as e: + if raiseOnError: + raise logger.debug(f"Subscription quantity sync skipped: {e}") def deleteUserMandate(self, userId: str, mandateId: str) -> bool: diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index 1069314f..bb2dc5c9 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -970,6 +970,41 @@ class BillingObjects: ) return created + # ========================================================================= + # Subscription AI-Budget Credit + # ========================================================================= + + def creditSubscriptionBudget(self, mandateId: str, planKey: str, periodLabel: str = "") -> Optional[Dict[str, Any]]: + """Credit the plan's budgetAiCHF to the mandate pool account. + + Should be called once per billing period (initial activation + each invoice.paid). + Returns the created CREDIT transaction or None if budget is 0.""" + from modules.datamodels.datamodelSubscription import _getPlan + + plan = _getPlan(planKey) + if not plan or not plan.budgetAiCHF or plan.budgetAiCHF <= 0: + return None + + poolAccount = self.getOrCreateMandateAccount(mandateId) + description = f"AI-Budget ({planKey})" + if periodLabel: + description += f" – {periodLabel}" + + transaction = BillingTransaction( + accountId=poolAccount["id"], + transactionType=TransactionTypeEnum.CREDIT, + amount=plan.budgetAiCHF, + description=description, + referenceType=ReferenceTypeEnum.SUBSCRIPTION, + referenceId=mandateId, + ) + created = self.createTransaction(transaction) + logger.info( + "AI-Budget credited mandate=%s plan=%s amount=%.2f CHF", + mandateId, planKey, plan.budgetAiCHF, + ) + return created + # ========================================================================= # Workflow Cost Query # ========================================================================= diff --git a/modules/interfaces/interfaceDbKnowledge.py b/modules/interfaces/interfaceDbKnowledge.py index c7b9e29a..ede37c87 100644 --- a/modules/interfaces/interfaceDbKnowledge.py +++ b/modules/interfaces/interfaceDbKnowledge.py @@ -294,7 +294,6 @@ class KnowledgeObjects: userId: str = None, featureInstanceId: str = None, mandateId: str = None, - isShared: bool = None, scope: str = None, limit: int = 10, minScore: float = None, @@ -305,10 +304,9 @@ class KnowledgeObjects: Args: queryVector: Query embedding vector. - userId: Filter by user (Instance Layer). + userId: Filter by user (personal scope). featureInstanceId: Filter by feature instance. - mandateId: Filter by mandate (for Shared Layer lookups). - isShared: If True, search Shared Layer via FileContentIndex join. + mandateId: Filter by mandate (scope=mandate means visible to all mandate users). scope: If provided, filter by this specific scope value. If not provided, use scope-union approach (personal + featureInstance + mandate + global). limit: Max results. @@ -323,8 +321,13 @@ class KnowledgeObjects: recordFilter["contentType"] = contentType if scope: + scopeFilter: Dict[str, Any] = {"scope": scope} + if mandateId: + scopeFilter["mandateId"] = mandateId + if featureInstanceId: + scopeFilter["featureInstanceId"] = featureInstanceId scopedFileIds = self.db.getRecordset( - FileContentIndex, recordFilter={"scope": scope} + FileContentIndex, recordFilter=scopeFilter ) fileIds = [ idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) @@ -334,16 +337,6 @@ class KnowledgeObjects: if not fileIds: return [] recordFilter["fileId"] = fileIds - elif isShared and mandateId: - sharedIndexes = self.db.getRecordset( - FileContentIndex, - recordFilter={"mandateId": mandateId, "isShared": True}, - ) - sharedFileIds = [idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) for idx in sharedIndexes] - sharedFileIds = [fid for fid in sharedFileIds if fid] - if not sharedFileIds: - return [] - recordFilter["fileId"] = sharedFileIds elif userId or featureInstanceId or mandateId: scopedFileIds = self._getScopedFileIds( userId=userId, @@ -410,7 +403,7 @@ class KnowledgeObjects: if mandateId: files_shared = self.db.getRecordset( FileContentIndex, - recordFilter={"mandateId": mandateId, "isShared": True}, + recordFilter={"mandateId": mandateId, "scope": "mandate"}, ) by_id: Dict[str, Dict[str, Any]] = {} @@ -559,6 +552,76 @@ class KnowledgeObjects: } +def aggregateMandateRagTotalBytes(mandateId: str) -> int: + """Sum FileContentIndex.totalSize for a mandate. + + Primary strategy (relies on correct scope fields on FileContentIndex): + 1. FileContentIndex rows with mandateId on the index + 2. FileContentIndex rows with featureInstanceId of any mandate FeatureInstance + Deduplicates by id. + """ + if not mandateId: + return 0 + from modules.datamodels.datamodelFeatures import FeatureInstance + from modules.interfaces.interfaceDbApp import getRootInterface + + knowDb = getInterface(None).db + appDb = getRootInterface().db + byId: Dict[str, Dict[str, Any]] = {} + + for row in knowDb.getRecordset(FileContentIndex, recordFilter={"mandateId": mandateId}): + rid = row.get("id") + if rid: + byId[str(rid)] = row + + instances = appDb.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + instIds = [str(inst.get("id", "")) for inst in instances if inst.get("id")] + + for instId in instIds: + for row in knowDb.getRecordset(FileContentIndex, recordFilter={"featureInstanceId": instId}): + rid = row.get("id") + if rid and str(rid) not in byId: + byId[str(rid)] = row + + # DEPRECATED: file-ID-correlation fallback from poweron_management. + # Only needed for pre-migration data where mandateId/featureInstanceId on the + # FileContentIndex are empty. Remove once migrateRagScopeFields has been run. + _fallbackCount = 0 + try: + from modules.datamodels.datamodelFiles import FileItem + from modules.interfaces.interfaceDbManagement import ComponentObjects + mgmtDb = ComponentObjects().db + knowledgeIf = getInterface(None) + + fileIds: set = set() + for f in mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId}): + fid = f.get("id") if isinstance(f, dict) else getattr(f, "id", None) + if fid: + fileIds.add(str(fid)) + for instId in instIds: + for f in mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}): + fid = f.get("id") if isinstance(f, dict) else getattr(f, "id", None) + if fid: + fileIds.add(str(fid)) + + for fid in fileIds: + if fid in byId: + continue + row = knowledgeIf.getFileContentIndex(fid) + if row: + byId[fid] = row + _fallbackCount += 1 + except Exception as e: + logger.warning("aggregateMandateRagTotalBytes fallback failed: %s", e) + + total = sum(int(r.get("totalSize") or 0) for r in byId.values()) + logger.info( + "aggregateMandateRagTotalBytes(%s): %d indexes, %d bytes (fallback: %d)", + mandateId, len(byId), total, _fallbackCount, + ) + return total + + def getInterface(currentUser: Optional[User] = None) -> KnowledgeObjects: """Get or create a KnowledgeObjects singleton.""" if "default" not in _instances: diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py index 28842958..0a16b734 100644 --- a/modules/interfaces/interfaceDbManagement.py +++ b/modules/interfaces/interfaceDbManagement.py @@ -1053,15 +1053,20 @@ class ComponentObjects: # Ensure fileName is unique uniqueName = self._generateUniquefileName(name) - # Use mandateId and featureInstanceId from context for proper data isolation - # Convert None to empty string to satisfy Pydantic validation mandateId = self.mandateId or "" featureInstanceId = self.featureInstanceId or "" - - # Create FileItem instance + + if featureInstanceId: + scope = "featureInstance" + elif mandateId: + scope = "mandate" + else: + scope = "personal" + fileItem = FileItem( mandateId=mandateId, featureInstanceId=featureInstanceId, + scope=scope, fileName=uniqueName, mimeType=mimeType, fileSize=fileSize, diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py index d6832f14..f1d7ccf7 100644 --- a/modules/interfaces/interfaceDbSubscription.py +++ b/modules/interfaces/interfaceDbSubscription.py @@ -309,13 +309,11 @@ class SubscriptionObjects: return self._getMandateDataVolumeMB(mandateId) def _getMandateDataVolumeMB(self, mandateId: str) -> float: - """Sum RAG index size (FileContentIndex.totalSize) across all feature instances of the mandate.""" + """Sum RAG index size (FileContentIndex.totalSize) for the mandate; reads poweron_knowledge.""" try: - from modules.datamodels.datamodelKnowledge import FileContentIndex - knowledgeDb = _getAppDatabaseConnector() - indexes = knowledgeDb.getRecordset(FileContentIndex, recordFilter={"mandateId": mandateId}) - totalBytes = sum(int(idx.get("totalSize") or 0) for idx in indexes) - return totalBytes / (1024 * 1024) + from modules.interfaces.interfaceDbKnowledge import aggregateMandateRagTotalBytes + + return aggregateMandateRagTotalBytes(mandateId) / (1024 * 1024) except Exception: return 0.0 @@ -359,11 +357,18 @@ class SubscriptionObjects: # Stripe quantity sync # ========================================================================= - def syncQuantityToStripe(self, subscriptionId: str) -> None: + def syncQuantityToStripe(self, subscriptionId: str, *, raiseOnError: bool = False) -> None: """Update Stripe subscription item quantities to match actual active counts. - Takes subscriptionId, not mandateId.""" + Takes subscriptionId, not mandateId. + + Args: + raiseOnError: If True, propagate Stripe API errors instead of logging them. + Use True for billing-critical paths (store activation). + """ sub = self.getById(subscriptionId) if not sub or not sub.get("stripeSubscriptionId"): + if raiseOnError: + raise ValueError(f"Subscription {subscriptionId} hat keine Stripe-Anbindung — Abrechnung nicht möglich.") return mandateId = sub["mandateId"] @@ -389,3 +394,5 @@ class SubscriptionObjects: logger.info("Stripe quantity synced for sub %s: users=%d, instances=%d", subscriptionId, activeUsers, activeInstances) except Exception as e: logger.error("syncQuantityToStripe(%s) failed: %s", subscriptionId, e) + if raiseOnError: + raise diff --git a/modules/migration/migrateRagScopeFields.py b/modules/migration/migrateRagScopeFields.py new file mode 100644 index 00000000..82e0e3fb --- /dev/null +++ b/modules/migration/migrateRagScopeFields.py @@ -0,0 +1,114 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Migration: Backfill FileContentIndex scope fields from FileItem (Single Source of Truth). + +Fixes legacy rows in poweron_knowledge where scope/mandateId/featureInstanceId +are empty or default ("personal") despite the corresponding FileItem having correct values. + +Idempotent — safe to run multiple times. Uses a DB flag to skip if already completed. +""" + +import logging +from modules.shared.configuration import APP_CONFIG +from modules.connectors.connectorDbPostgre import _get_cached_connector + +logger = logging.getLogger(__name__) + +_MIGRATION_FLAG_KEY = "migration_rag_scope_fields_completed" + + +def _isMigrationCompleted(appDb) -> bool: + try: + from modules.datamodels.datamodelUam import Mandate + records = appDb.getRecordset(Mandate, recordFilter={"name": _MIGRATION_FLAG_KEY}) + return len(records) > 0 + except Exception: + return False + + +def _setMigrationCompleted(appDb) -> None: + try: + from modules.datamodels.datamodelUam import Mandate + flag = Mandate(name=_MIGRATION_FLAG_KEY, description="RAG scope fields migration completed") + appDb.recordCreate(Mandate, flag) + except Exception as e: + logger.error("Could not set migration flag: %s", e) + + +def runMigration(appDb=None) -> dict: + """Backfill FileContentIndex rows from FileItem metadata. + + Returns dict with counts: {total, updated, skipped, orphaned}. + """ + from modules.datamodels.datamodelKnowledge import FileContentIndex + from modules.datamodels.datamodelFiles import FileItem + from modules.interfaces.interfaceDbKnowledge import getInterface as getKnowledgeInterface + from modules.interfaces.interfaceDbManagement import ComponentObjects + + if appDb is None: + from modules.interfaces.interfaceDbApp import getRootInterface + appDb = getRootInterface().db + + if _isMigrationCompleted(appDb): + logger.info("migrateRagScopeFields: already completed, skipping") + return {"total": 0, "updated": 0, "skipped": 0, "orphaned": 0} + + knowDb = getKnowledgeInterface(None).db + mgmtDb = ComponentObjects().db + + allIndexes = knowDb.getRecordset(FileContentIndex, recordFilter={}) + total = len(allIndexes) + updated = 0 + skipped = 0 + orphaned = 0 + + logger.info("migrateRagScopeFields: processing %d FileContentIndex rows", total) + + for idx in allIndexes: + idxId = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if not idxId: + skipped += 1 + continue + + fileItem = mgmtDb._loadRecord(FileItem, str(idxId)) + if not fileItem: + orphaned += 1 + continue + + _get = (lambda k, d="": fileItem.get(k, d)) if isinstance(fileItem, dict) else (lambda k, d="": getattr(fileItem, k, d)) + + fiScope = _get("scope") or "personal" + fiMandateId = str(_get("mandateId") or "") + fiFeatureInstanceId = str(_get("featureInstanceId") or "") + + idxGet = (lambda k, d="": idx.get(k, d)) if isinstance(idx, dict) else (lambda k, d="": getattr(idx, k, d)) + currentScope = idxGet("scope") or "personal" + currentMandateId = str(idxGet("mandateId") or "") + currentFeatureInstanceId = str(idxGet("featureInstanceId") or "") + + updates = {} + if fiScope != currentScope: + updates["scope"] = fiScope + if fiMandateId and fiMandateId != currentMandateId: + updates["mandateId"] = fiMandateId + if fiFeatureInstanceId and fiFeatureInstanceId != currentFeatureInstanceId: + updates["featureInstanceId"] = fiFeatureInstanceId + + if updates: + try: + knowDb.recordModify(FileContentIndex, str(idxId), updates) + updated += 1 + logger.debug("migrateRagScopeFields: updated %s -> %s", idxId, updates) + except Exception as e: + logger.error("migrateRagScopeFields: failed to update %s: %s", idxId, e) + skipped += 1 + else: + skipped += 1 + + _setMigrationCompleted(appDb) + logger.info( + "migrateRagScopeFields complete: total=%d, updated=%d, skipped=%d, orphaned=%d", + total, updated, skipped, orphaned, + ) + return {"total": total, "updated": updated, "skipped": skipped, "orphaned": orphaned} diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py index 12206b06..e69df7b9 100644 --- a/modules/routes/routeAdminFeatures.py +++ b/modules/routes/routeAdminFeatures.py @@ -576,14 +576,15 @@ def create_feature_instance( config=data.config ) - # Sync Stripe quantity after successful creation try: from modules.interfaces.interfaceDbSubscription import getInterface as _getSubIf2 from modules.security.rootAccess import getRootUser as _getRU _subIf2 = _getSubIf2(_getRU(), mandateIdStr) - _subIf2.syncQuantityToStripe(mandateIdStr) - except Exception: - pass + _operative = _subIf2.getOperativeForMandate(mandateIdStr) + if _operative: + _subIf2.syncQuantityToStripe(_operative["id"], raiseOnError=True) + except Exception as e: + logger.error("Stripe quantity sync failed for admin feature creation in mandate %s: %s", mandateIdStr, e) logger.info( f"User {context.user.id} created feature instance '{data.label}' " diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 13e94559..0f612d45 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -1104,6 +1104,12 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: updatedSub = subInterface.getById(subscriptionRecordId) _notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=updatedSub, platformUrl=platformUrl) + try: + billingIf = _getRootInterface() + billingIf.creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") + except Exception as ex: + logger.error("creditSubscriptionBudget on activation failed: %s", ex) + logger.info( "Checkout completed: sub=%s -> %s, mandate=%s, plan=%s", subscriptionRecordId, toStatus.value, mandateId, planKey, @@ -1162,9 +1168,14 @@ def _handleSubscriptionWebhook(event) -> None: if stripeStatus == "active" and currentStatus == SubscriptionStatusEnum.SCHEDULED: subInterface.transitionStatus(subId, SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE) subService.invalidateCache(mandateId) - plan = _getPlan(sub.get("planKey", "")) + planKey = sub.get("planKey", "") + plan = _getPlan(planKey) refreshedSub = subInterface.getById(subId) _notifySubscriptionChange(mandateId, "activated", plan, subscriptionRecord=refreshedSub, platformUrl=webhookPlatformUrl) + try: + _getRootInterface().creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") + except Exception as ex: + logger.error("creditSubscriptionBudget SCHEDULED->ACTIVE failed: %s", ex) logger.info("SCHEDULED -> ACTIVE for sub %s (mandate %s)", subId, mandateId) elif stripeStatus == "active" and currentStatus == SubscriptionStatusEnum.PAST_DUE: @@ -1231,14 +1242,24 @@ def _handleSubscriptionWebhook(event) -> None: elif event.type == "invoice.paid": period_ts = obj.get("period_start") + periodLabel = "" if period_ts: period_start_at = datetime.fromtimestamp(int(period_ts), tz=timezone.utc) + periodLabel = period_start_at.strftime("%Y-%m-%d") try: billing_if = _getRootInterface() billing_if.resetStorageBillingPeriod(mandateId, period_start_at) billing_if.reconcileMandateStorageBilling(mandateId) except Exception as ex: logger.error("Storage billing on invoice.paid failed: %s", ex) + + planKey = sub.get("planKey", "") + try: + billing_if = _getRootInterface() + billing_if.creditSubscriptionBudget(mandateId, planKey, periodLabel=periodLabel or "Periodenverlängerung") + except Exception as ex: + logger.error("creditSubscriptionBudget on invoice.paid failed: %s", ex) + logger.info("Invoice paid for sub %s (mandate %s)", subId, mandateId) return None diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index e95da174..17e0ef56 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -1,6 +1,6 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. -from fastapi import APIRouter, HTTPException, Depends, File, UploadFile, Form, Path, Request, status, Query, Response, Body +from fastapi import APIRouter, HTTPException, Depends, File, UploadFile, Form, Path, Request, status, Query, Response, Body, BackgroundTasks from fastapi.responses import JSONResponse from typing import List, Dict, Any, Optional import logging @@ -41,13 +41,16 @@ async def _autoIndexFile(fileId: str, fileName: str, mimeType: str, user): file_meta = mgmtInterface.getFile(fileId) feature_instance_id = "" mandate_id = "" + file_scope = "personal" if file_meta: if isinstance(file_meta, dict): feature_instance_id = file_meta.get("featureInstanceId") or "" mandate_id = file_meta.get("mandateId") or "" + file_scope = file_meta.get("scope") or "personal" else: feature_instance_id = getattr(file_meta, "featureInstanceId", None) or "" mandate_id = getattr(file_meta, "mandateId", None) or "" + file_scope = getattr(file_meta, "scope", None) or "personal" logger.info(f"Auto-index starting for {fileName} ({len(rawBytes)} bytes, {mimeType})") @@ -61,6 +64,7 @@ async def _autoIndexFile(fileId: str, fileName: str, mimeType: str, user): userId=userId, featureInstanceId=str(feature_instance_id) if feature_instance_id else "", mandateId=str(mandate_id) if mandate_id else "", + scope=file_scope, ) logger.info( f"Pre-scan complete for {fileName}: " @@ -667,6 +671,7 @@ def batch_move_items( @limiter.limit("30/minute") def updateFileScope( request: Request, + background_tasks: BackgroundTasks, fileId: str = Path(..., description="ID of the file"), scope: str = Body(..., embed=True), context: RequestContext = Depends(getRequestContext), @@ -700,19 +705,18 @@ def updateFileScope( except Exception as e: logger.warning(f"Failed to update FileContentIndex scope for file {fileId}: {e}") - # Trigger re-indexing so RAG embeddings metadata reflects the new scope - try: - fileMeta = managementInterface.getFile(fileId) - if fileMeta: - import asyncio - asyncio.ensure_future(_autoIndexFile( - fileId=fileId, - fileName=fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", ""), - mimeType=fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", ""), - user=context.user, - )) - except Exception as e: - logger.warning(f"Failed to trigger re-index after scope change for file {fileId}: {e}") + fileMeta = managementInterface.getFile(fileId) + if fileMeta: + fn = fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", "") + mt = fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", "") + + async def _runReindexAfterScopeChange(): + try: + await _autoIndexFile(fileId=fileId, fileName=fn, mimeType=mt, user=context.user) + except Exception as ex: + logger.warning("Re-index after scope change failed for %s: %s", fileId, ex) + + background_tasks.add_task(_runReindexAfterScopeChange) return {"fileId": fileId, "scope": scope, "updated": True} except HTTPException: @@ -726,11 +730,18 @@ def updateFileScope( @limiter.limit("30/minute") def updateFileNeutralize( request: Request, + background_tasks: BackgroundTasks, fileId: str = Path(..., description="ID of the file"), neutralize: bool = Body(..., embed=True), context: RequestContext = Depends(getRequestContext), ) -> Dict[str, Any]: - """Toggle neutralization flag on a file.""" + """Toggle neutralization flag on a file. + + FAILSAFE: When turning neutralize ON, the existing Knowledge Store index + and all content chunks are deleted SYNCHRONOUSLY before the response is + returned. The re-index happens in a background task. If re-indexing + fails the file simply has no index — no un-neutralized data can leak. + """ try: managementInterface = interfaceDbManagement.getInterface( context.user, @@ -740,35 +751,54 @@ def updateFileNeutralize( managementInterface.updateFile(fileId, {"neutralize": neutralize}) - # Update FileContentIndex neutralization metadata - try: - from modules.interfaces.interfaceDbKnowledge import getInterface as getKnowledgeInterface - from modules.datamodels.datamodelKnowledge import FileContentIndex - knowledgeDb = getKnowledgeInterface() - neutralizationStatus = "neutralized" if neutralize else "original" - indices = knowledgeDb.db.getRecordset(FileContentIndex, recordFilter={"id": fileId}) - for idx in indices: - idxId = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) - if idxId: - knowledgeDb.db.recordModify(FileContentIndex, idxId, {"neutralizationStatus": neutralizationStatus}) - except Exception as e: - logger.warning(f"Failed to update FileContentIndex neutralize for file {fileId}: {e}") + from modules.interfaces.interfaceDbKnowledge import getInterface as getKnowledgeInterface + knowledgeDb = getKnowledgeInterface() - # Trigger re-indexing so content is re-processed with/without neutralization - try: - fileMeta = managementInterface.getFile(fileId) - if fileMeta: - import asyncio - asyncio.ensure_future(_autoIndexFile( - fileId=fileId, - fileName=fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", ""), - mimeType=fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", ""), - user=context.user, - )) - except Exception as e: - logger.warning(f"Failed to trigger re-index after neutralize change for file {fileId}: {e}") + if neutralize: + # ── CRITICAL: purge existing (potentially un-neutralized) index + # This MUST succeed before the response is sent so that no stale + # raw-text chunks remain searchable while re-indexing runs. + try: + knowledgeDb.deleteFileContentIndex(fileId) + logger.info("Neutralize toggle ON: deleted index + chunks for file %s", fileId) + except Exception as e: + logger.error("Neutralize toggle ON: FAILED to delete index for file %s: %s", fileId, e) + raise HTTPException( + status_code=500, + detail=f"Could not purge existing index for neutralization — aborting toggle. Error: {e}", + ) + else: + # Turning neutralize OFF: update metadata only; re-index will overwrite + try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + indices = knowledgeDb.db.getRecordset(FileContentIndex, recordFilter={"id": fileId}) + for idx in indices: + idxId = idx.get("id") if isinstance(idx, dict) else getattr(idx, "id", None) + if idxId: + knowledgeDb.db.recordModify(FileContentIndex, idxId, { + "neutralizationStatus": "original", + "isNeutralized": False, + }) + except Exception as e: + logger.warning("Failed to update FileContentIndex after neutralize-OFF for %s: %s", fileId, e) + + # Background re-index (safe: if it fails, there is simply no index) + fileMeta = managementInterface.getFile(fileId) + if fileMeta: + fn = fileMeta.fileName if hasattr(fileMeta, "fileName") else fileMeta.get("fileName", "") + mt = fileMeta.mimeType if hasattr(fileMeta, "mimeType") else fileMeta.get("mimeType", "") + + async def _runReindexAfterNeutralizeToggle(): + try: + await _autoIndexFile(fileId=fileId, fileName=fn, mimeType=mt, user=context.user) + except Exception as ex: + logger.error("Re-index after neutralize toggle failed for %s: %s (file has NO index until next re-index)", fileId, ex) + + background_tasks.add_task(_runReindexAfterNeutralizeToggle) return {"fileId": fileId, "neutralize": neutralize, "updated": True} + except HTTPException: + raise except Exception as e: logger.error(f"Error updating file neutralize flag: {e}") raise HTTPException(status_code=500, detail=str(e)) diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index c9512d3f..5c6f782a 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -282,8 +282,9 @@ def activateStoreFeature( context: RequestContext = Depends(getRequestContext) ) -> Dict[str, Any]: """ - Activate a store feature. Creates a new FeatureInstance in the target mandate. - If user has no admin mandate, auto-creates a personal mandate. + Activate a store feature. Billing-gated: a feature instance is ONLY created + if the Stripe subscription quantity update succeeds (proration confirmed). + On any billing failure the provisioned instance is rolled back. """ featureCode = data.featureCode userId = str(context.user.id) @@ -302,21 +303,39 @@ def activateStoreFeature( if not _isUserAdminInMandate(db, userId, mandateId): raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Not admin in target mandate") - # Check subscription capacity - from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS - subs = db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) - if subs: - sub = subs[0] - plan = BUILTIN_PLANS.get(sub.get("planKey")) - if plan and plan.maxFeatureInstances is not None: - currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) - if len(currentInstances) >= plan.maxFeatureInstances: - raise HTTPException( - status_code=status.HTTP_402_PAYMENT_REQUIRED, - detail=f"Feature instance limit reached ({plan.maxFeatureInstances}). Upgrade your plan." - ) + # ── 1. Resolve subscription & plan ────────────────────────────── + from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS, SubscriptionStatusEnum + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot - # Create new FeatureInstance + subInterface = _getSubRoot() + operative = subInterface.getOperativeForMandate(mandateId) + if not operative: + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail="Kein aktives Abonnement. Bitte zuerst ein Abo abschliessen.", + ) + + planKey = operative.get("planKey", "") + plan = BUILTIN_PLANS.get(planKey) + isBillable = plan is not None and (plan.pricePerFeatureInstanceCHF or 0) > 0 + + if isBillable: + if not operative.get("stripeSubscriptionId") or not operative.get("stripeItemIdInstances"): + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail="Stripe-Abonnement ist nicht vollständig eingerichtet — Aktivierung nicht möglich.", + ) + + # ── 2. Capacity check ─────────────────────────────────────────── + if plan and plan.maxFeatureInstances is not None: + currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) + if len(currentInstances) >= plan.maxFeatureInstances: + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail=f"Feature-Instanz-Limit erreicht ({plan.maxFeatureInstances}). Bitte Plan upgraden.", + ) + + # ── 3. Provision instance ─────────────────────────────────────── featureInterface = getFeatureInterface(db) featureLabel = featureDef.get("label", {}).get("en", featureCode) instance = featureInterface.createFeatureInstance( @@ -332,7 +351,6 @@ def activateStoreFeature( instanceId = instance.get("id") if isinstance(instance, dict) else instance.id - # Grant FeatureAccess with admin role — MUST be feature-specific (e.g. workspace-admin) instanceRoles = db.getRecordset(Role, recordFilter={"featureInstanceId": instanceId}) adminRoleId = None for ir in instanceRoles: @@ -342,21 +360,34 @@ def activateStoreFeature( break if not adminRoleId: + _rollbackInstance(db, instanceId) raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"No feature-specific admin role (e.g. {featureCode}-admin) found for instance {instanceId}. " - f"Template roles were not correctly copied.", + detail=f"Keine Feature-Admin-Rolle für {featureCode} gefunden — Rollback.", ) rootInterface.createFeatureAccess(userId, instanceId, roleIds=[adminRoleId]) - # Sync subscription quantity - try: - rootInterface._syncSubscriptionQuantity(mandateId) - except Exception as e: - logger.warning(f"Failed to sync subscription quantity: {e}") + # ── 4. Billing gate: Stripe quantity sync (MUST succeed) ──────── + if isBillable: + try: + rootInterface._syncSubscriptionQuantity(mandateId, raiseOnError=True) + except Exception as e: + logger.error("Stripe billing for feature activation failed — rolling back instance %s: %s", instanceId, e) + _rollbackInstance(db, instanceId, userId=userId) + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail=f"Stripe-Abrechnung fehlgeschlagen: {e}. Feature wurde NICHT aktiviert.", + ) + else: + try: + rootInterface._syncSubscriptionQuantity(mandateId) + except Exception as e: + logger.warning("Non-critical Stripe sync failed for free feature: %s", e) - logger.info(f"User {userId} activated '{featureCode}' in mandate {mandateId} (instance={instanceId})") + # ── 5. Confirmed — notify ────────────────────────────────────── + _notifyFeatureActivation(mandateId, featureLabel, featureCode, sub=operative, plan=plan) + logger.info("User %s activated '%s' in mandate %s (instance=%s, billed=%s)", userId, featureCode, mandateId, instanceId, isBillable) return { "featureCode": featureCode, @@ -412,11 +443,10 @@ def deactivateStoreFeature( instanceDeleted = True logger.info(f"Orphan Control: deleted instance {instanceId} (no remaining accesses)") - # Sync subscription quantity try: - rootInterface._syncSubscriptionQuantity(mandateId) + rootInterface._syncSubscriptionQuantity(mandateId, raiseOnError=True) except Exception as e: - logger.warning(f"Failed to sync subscription quantity: {e}") + logger.error("Stripe quantity sync after deactivation failed for mandate %s: %s", mandateId, e) logger.info(f"User {userId} deactivated instance {instanceId} in mandate {mandateId} (deleted={instanceDeleted})") @@ -433,3 +463,52 @@ def deactivateStoreFeature( except Exception as e: logger.error(f"Error deactivating store feature: {e}") raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) + + +# ============================================================================ +# Internal helpers +# ============================================================================ + +def _rollbackInstance(db, instanceId: str, userId: str = None) -> None: + """Delete a freshly provisioned FeatureInstance (and its access) on billing failure.""" + try: + if userId: + accesses = db.getRecordset(FeatureAccess, recordFilter={"userId": userId, "featureInstanceId": instanceId}) + for a in accesses: + db.recordDelete(FeatureAccess, a.get("id")) + db.recordDelete(FeatureInstance, instanceId) + logger.info("Rolled back feature instance %s (billing gate)", instanceId) + except Exception as e: + logger.error("Rollback of instance %s failed: %s", instanceId, e) + + +def _notifyFeatureActivation( + mandateId: str, + featureLabel: str, + featureCode: str, + sub: dict = None, + plan = None, +) -> None: + """Send email notification to mandate admins about a newly activated feature.""" + try: + from modules.shared.notifyMandateAdmins import notifyMandateAdmins + + priceLine = "" + if plan and plan.pricePerFeatureInstanceCHF: + priceLine = f"Kosten: CHF {plan.pricePerFeatureInstanceCHF:.2f} / {plan.billingPeriod.value} (anteilig via Stripe-Proration)." + + bodyParagraphs = [ + f"Die Feature-Instanz «{featureLabel}» ({featureCode}) wurde soeben für Ihren Mandanten aktiviert.", + ] + if priceLine: + bodyParagraphs.append(priceLine) + bodyParagraphs.append("Die Stripe-Abrechnung wird automatisch angepasst.") + + notifyMandateAdmins( + mandateId=mandateId, + subject=f"Feature aktiviert: {featureLabel}", + headline="Neue Feature-Instanz aktiviert", + bodyParagraphs=bodyParagraphs, + ) + except Exception as e: + logger.warning("_notifyFeatureActivation failed for mandate %s: %s", mandateId, e) diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 7aad386f..88d0b21c 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -183,7 +183,7 @@ def activatePlan( @router.post("/cancel", response_model=Dict[str, Any]) -@limiter.limit("5/minute") +@limiter.limit("30/minute") def cancelSubscription( request: Request, data: CancelRequest, @@ -209,7 +209,7 @@ def cancelSubscription( @router.post("/reactivate", response_model=Dict[str, Any]) -@limiter.limit("5/minute") +@limiter.limit("30/minute") def reactivateSubscription( request: Request, data: ReactivateRequest, @@ -235,7 +235,7 @@ def reactivateSubscription( @router.post("/force-cancel", response_model=Dict[str, Any]) -@limiter.limit("5/minute") +@limiter.limit("30/minute") def forceCancel( request: Request, data: ForceCancelRequest, @@ -451,46 +451,47 @@ def _getDataVolumeUsage( """Calculate current data volume usage for a mandate vs. plan limit.""" from modules.interfaces.interfaceDbApp import getRootInterface from modules.datamodels.datamodelFiles import FileItem - from modules.datamodels.datamodelSubscription import MandateSubscription, SubscriptionPlan - from modules.datamodels.datamodelFeature import FeatureInstance + from modules.datamodels.datamodelFeatures import FeatureInstance + from modules.interfaces.interfaceDbKnowledge import aggregateMandateRagTotalBytes + from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRootIf rootIf = getRootInterface() mandateId = targetMandateId instances = rootIf.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) - totalBytes = 0 - for inst in instances: - instId = inst.get("id") if isinstance(inst, dict) else getattr(inst, "id", None) - if not instId: - continue - files = rootIf.db.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) + instIds = [str(inst.get("id") or "") for inst in instances if inst.get("id")] + + mgmtDb = getMgmtInterface().db + totalFileBytes = 0 + for instId in instIds: + files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) for f in files: size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) - totalBytes += (size or 0) + totalFileBytes += (size or 0) + mandateFiles = mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId}) + for f in mandateFiles: + size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) + totalFileBytes += (size or 0) + filesMB = round(totalFileBytes / (1024 * 1024), 2) - filesMB = round(totalBytes / (1024 * 1024), 2) - - from modules.datamodels.datamodelKnowledge import FileContentIndex - ragIndexes = rootIf.db.getRecordset(FileContentIndex, recordFilter={"mandateId": mandateId}) - ragBytes = sum(int(idx.get("totalSize") or 0) if isinstance(idx, dict) else int(getattr(idx, "totalSize", 0) or 0) for idx in ragIndexes) + ragBytes = aggregateMandateRagTotalBytes(mandateId) ragMB = round(ragBytes / (1024 * 1024), 2) maxMB = None - subs = rootIf.db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) - for sub in subs: - planKey = sub.get("planKey") if isinstance(sub, dict) else getattr(sub, "planKey", "") - if planKey: - plans = rootIf.db.getRecordset(SubscriptionPlan, recordFilter={"planKey": planKey}) - for plan in plans: - limit = plan.get("maxDataVolumeMB") if isinstance(plan, dict) else getattr(plan, "maxDataVolumeMB", None) - if limit: - maxMB = limit - break - if maxMB: - break + subIf = _getSubRootIf() + operative = subIf.getOperativeForMandate(mandateId) + if operative: + plan = subIf.getPlan(operative.get("planKey") or "") + if plan and plan.maxDataVolumeMB is not None: + maxMB = int(plan.maxDataVolumeMB) usedMB = ragMB percentUsed = round((usedMB / maxMB) * 100, 1) if maxMB else None + logger.info( + "data-volume mandate=%s: files=%.2f MB, rag=%.2f MB, max=%s MB", + mandateId, filesMB, ragMB, maxMB, + ) return { "mandateId": mandateId, "usedMB": usedMB, diff --git a/modules/routes/routeVoiceGoogle.py b/modules/routes/routeVoiceGoogle.py index dc0c7a85..1c796361 100644 --- a/modules/routes/routeVoiceGoogle.py +++ b/modules/routes/routeVoiceGoogle.py @@ -463,7 +463,7 @@ async def save_voice_settings( currentUser: User = Depends(getCurrentUser) ): """Save voice settings for the current user (writes to UserVoicePreferences).""" - from modules.datamodels.datamodelUam import UserVoicePreferences + from modules.datamodels.datamodelUam import UserVoicePreferences, _normalizeTtsVoiceMap from modules.security.rootAccess import getRootInterface rootInterface = getRootInterface() userId = str(currentUser.id) @@ -473,6 +473,8 @@ async def save_voice_settings( "translationSourceLanguage", "translationTargetLanguage", } updateData = {k: v for k, v in settings.items() if k in allowedFields} + if "ttsVoiceMap" in updateData: + updateData["ttsVoiceMap"] = _normalizeTtsVoiceMap(updateData["ttsVoiceMap"]) existing = rootInterface.db.getRecordset( UserVoicePreferences, recordFilter={"userId": userId} diff --git a/modules/routes/routeVoiceUser.py b/modules/routes/routeVoiceUser.py index 9b628eeb..2f21662b 100644 --- a/modules/routes/routeVoiceUser.py +++ b/modules/routes/routeVoiceUser.py @@ -14,7 +14,7 @@ from typing import Any, Dict from fastapi import APIRouter, Body, Depends, HTTPException, Query, Request, status from modules.auth import getCurrentUser, limiter -from modules.datamodels.datamodelUam import User, UserVoicePreferences +from modules.datamodels.datamodelUam import User, UserVoicePreferences, _normalizeTtsVoiceMap from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceVoiceObjects import getVoiceInterface @@ -79,6 +79,8 @@ def updateVoicePreferences( "translationTargetLanguage", } updateData = {k: v for k, v in preferences.items() if k in allowedFields} + if "ttsVoiceMap" in updateData: + updateData["ttsVoiceMap"] = _normalizeTtsVoiceMap(updateData["ttsVoiceMap"]) if existing: existingRecord = existing[0] diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 23a749ab..4529ede0 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -27,6 +27,28 @@ _MAX_TOOL_RESULT_CHARS = 50_000 _BINARY_SIGNATURES = (b"%PDF", b"\x89PNG", b"\xff\xd8\xff", b"GIF8", b"PK\x03\x04", b"Rar!", b"\x1f\x8b") +def _resolveFileScope(fileId: str, context: dict) -> tuple: + """Resolve featureInstanceId and mandateId for a file from context or management DB. + + Returns (featureInstanceId, mandateId) — never None, always strings. + """ + fiId = context.get("featureInstanceId", "") or "" + mId = context.get("mandateId", "") or "" + if fiId and mId: + return fiId, mId + try: + from modules.datamodels.datamodelFiles import FileItem + from modules.interfaces.interfaceDbManagement import ComponentObjects + fm = ComponentObjects().db._loadRecord(FileItem, fileId) + if fm: + _get = (lambda k: fm.get(k, "")) if isinstance(fm, dict) else (lambda k: getattr(fm, k, "")) + fiId = fiId or str(_get("featureInstanceId") or "") + mId = mId or str(_get("mandateId") or "") + except Exception: + pass + return fiId, mId + + def _looksLikeBinary(data: bytes, sampleSize: int = 1024) -> bool: """Detect binary content by checking for magic bytes and non-printable char ratio.""" if any(data[:8].startswith(sig) for sig in _BINARY_SIGNATURES): @@ -602,16 +624,29 @@ def _registerCoreTools(registry: ToolRegistry, services): if knowledgeService: try: userId = context.get("userId", "") + _fiId, _mId = _resolveFileScope(fileId, context) await knowledgeService.indexFile( fileId=fileId, fileName=fileName, mimeType=mimeType, userId=userId, contentObjects=contentObjects, + featureInstanceId=_fiId, + mandateId=_mId, ) except Exception: pass - textParts = [o["data"] for o in contentObjects if o["contentType"] != "image"] - if textParts: - joined = "\n\n".join(textParts) + joined = "" + if knowledgeService: + _chunks = knowledgeService._knowledgeDb.getContentChunks(fileId) + _textChunks = [ + c for c in (_chunks or []) + if c.get("contentType") != "image" and c.get("data") + ] + if _textChunks: + joined = "\n\n".join(c["data"] for c in _textChunks) + if not joined: + textParts = [o["data"] for o in contentObjects if o["contentType"] != "image"] + joined = "\n\n".join(textParts) if textParts else "" + if joined: chunked = _applyOffsetLimit(joined, offset, limit) if chunked is not None: return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked) @@ -642,6 +677,36 @@ def _registerCoreTools(registry: ToolRegistry, services): try: text = rawBytes.decode(encoding) if text.strip(): + _fileNeedNeutralize = False + try: + from modules.datamodels.datamodelFiles import FileItem as _FI + from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO + _fRec = _CO().db._loadRecord(_FI, fileId) + if _fRec: + _fG = (lambda k, d=None: _fRec.get(k, d)) if isinstance(_fRec, dict) else (lambda k, d=None: getattr(_fRec, k, d)) + _fileNeedNeutralize = bool(_fG("neutralize", False)) + except Exception: + pass + if _fileNeedNeutralize: + try: + _nSvc = services.getService("neutralization") if hasattr(services, "getService") else None + if _nSvc and hasattr(_nSvc, 'processText'): + _nResult = _nSvc.processText(text) + if _nResult and _nResult.get("neutralized_text"): + text = _nResult["neutralized_text"] + logger.debug(f"readFile: neutralized text for file {fileId}") + else: + logger.warning(f"readFile: neutralization failed for file {fileId}, blocking text (fail-safe)") + return ToolResult(toolCallId="", toolName="readFile", success=True, + data="[File requires neutralization but neutralization failed. Content blocked for data protection.]") + else: + logger.warning(f"readFile: neutralization required but service unavailable for file {fileId}") + return ToolResult(toolCallId="", toolName="readFile", success=True, + data="[File requires neutralization but service unavailable. Content blocked for data protection.]") + except Exception as _nErr: + logger.error(f"readFile: neutralization error for file {fileId}: {_nErr}") + return ToolResult(toolCallId="", toolName="readFile", success=True, + data="[File requires neutralization but an error occurred. Content blocked for data protection.]") chunked = _applyOffsetLimit(text, offset, limit) if chunked is not None: return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked) @@ -1562,7 +1627,7 @@ def _registerCoreTools(registry: ToolRegistry, services): } async def _resolveDataSource(dsId: str): - """Resolve a DataSource record and return (connectionId, service, path) or raise.""" + """Resolve a DataSource record and return (connectionId, service, path, neutralize) or raise.""" chatService = services.chat ds = chatService.getDataSource(dsId) if hasattr(chatService, "getDataSource") else None if not ds: @@ -1571,11 +1636,12 @@ def _registerCoreTools(registry: ToolRegistry, services): sourceType = ds.get("sourceType", "") path = ds.get("path", "/") label = ds.get("label", "") + neutralize = bool(ds.get("neutralize", False)) service = _SOURCE_TYPE_TO_SERVICE.get(sourceType, sourceType) if not connectionId: raise ValueError(f"DataSource '{dsId}' has no connectionId") - logger.info(f"Resolved DataSource '{dsId}' ({label}): sourceType={sourceType}, service={service}, connectionId={connectionId}, path={path[:80]}") - return connectionId, service, path + logger.info(f"Resolved DataSource '{dsId}' ({label}): sourceType={sourceType}, service={service}, connectionId={connectionId}, path={path[:80]}, neutralize={neutralize}") + return connectionId, service, path, neutralize _MAIL_SERVICES = {"outlook", "gmail"} @@ -1589,7 +1655,7 @@ def _registerCoreTools(registry: ToolRegistry, services): error="Provide either dataSourceId OR connectionId+service") try: if dsId: - connectionId, service, basePath = await _resolveDataSource(dsId) + connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId) else: connectionId, service, basePath = directConnId, directService, args.get("path", "/") if subPath: @@ -1632,7 +1698,7 @@ def _registerCoreTools(registry: ToolRegistry, services): error="Provide either dataSourceId OR connectionId+service") try: if dsId: - connectionId, service, basePath = await _resolveDataSource(dsId) + connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId) else: connectionId, service, basePath = directConnId, directService, args.get("path", "/") from modules.connectors.connectorResolver import ConnectorResolver @@ -1666,8 +1732,9 @@ def _registerCoreTools(registry: ToolRegistry, services): try: from modules.connectors.connectorResolver import ConnectorResolver from modules.connectors.connectorProviderBase import DownloadResult as _DR + _sourceNeutralize = False if dsId: - connectionId, service, basePath = await _resolveDataSource(dsId) + connectionId, service, basePath, _sourceNeutralize = await _resolveDataSource(dsId) else: connectionId, service, basePath = directConnId, directService, "/" fullPath = filePath if filePath.startswith("/") else f"{basePath.rstrip('/')}/{filePath}" @@ -1710,6 +1777,8 @@ def _registerCoreTools(registry: ToolRegistry, services): fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "") if fiId: chatService.interfaceDbComponent.updateFile(fileItem.id, {"featureInstanceId": fiId}) + if _sourceNeutralize: + chatService.interfaceDbComponent.updateFile(fileItem.id, {"neutralize": True}) tempFolderId = _getOrCreateTempFolder(chatService) if tempFolderId: chatService.interfaceDbComponent.updateFile(fileItem.id, {"folderId": tempFolderId}) @@ -2040,9 +2109,12 @@ def _registerCoreTools(registry: ToolRegistry, services): }) if contentObjects: + _diFiId, _diMId = _resolveFileScope(fileId, context) await knowledgeService.indexFile( fileId=fileId, fileName=fileName, mimeType=fileMime, userId=context.get("userId", ""), contentObjects=contentObjects, + featureInstanceId=_diFiId, + mandateId=_diMId, ) chunks = knowledgeService._knowledgeDb.getContentChunks(fileId) @@ -2088,9 +2160,22 @@ def _registerCoreTools(registry: ToolRegistry, services): dataUrl = f"data:{mimeType};base64,{imageData}" from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum as OTE + _opType = OTE.IMAGE_ANALYSE + try: + from modules.datamodels.datamodelFiles import FileItem as _FileItemModel + from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO + _fRow = _CO().db._loadRecord(_FileItemModel, fileId) + if _fRow: + _fGet = (lambda k, d=None: _fRow.get(k, d)) if isinstance(_fRow, dict) else (lambda k, d=None: getattr(_fRow, k, d)) + if bool(_fGet("neutralize", False)): + _opType = OTE.NEUTRALIZATION_IMAGE + logger.info(f"describeImage: file {fileId} has neutralize=True, using NEUTRALIZATION_IMAGE (internal models only)") + except Exception: + pass + visionRequest = AiCallRequest( prompt=prompt, - options=AiCallOptions(operationType=OTE.IMAGE_ANALYSE), + options=AiCallOptions(operationType=_opType), messages=[{"role": "user", "content": [ {"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": dataUrl}}, @@ -3099,6 +3184,11 @@ def _registerCoreTools(registry: ToolRegistry, services): recordFilter={"featureInstanceId": featureInstanceId, "workspaceInstanceId": workspaceInstanceId}, ) + _anySourceNeutralize = any( + bool(ds.get("neutralize", False) if isinstance(ds, dict) else getattr(ds, "neutralize", False)) + for ds in (featureDataSources or []) + ) + from modules.security.rbacCatalog import getCatalogService catalog = getCatalogService() if not featureDataSources: @@ -3133,6 +3223,8 @@ def _registerCoreTools(registry: ToolRegistry, services): ) async def _subAgentAiCall(req): + if _anySourceNeutralize: + req.requireNeutralization = True return await aiService.callAi(req) try: diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index e2de43e6..9ff6437d 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -200,10 +200,6 @@ class AiService: finally: self.aiObjects.billingCallback = None - # Rehydrate neutralization placeholders in response - if _wasNeutralized and response and hasattr(response, 'content') and response.content: - response.content = self._rehydrateResponse(response.content) - # Attach neutralization exclusion metadata if any parts failed if _excludedDocs and response: if not hasattr(response, 'metadata') or response.metadata is None: @@ -240,10 +236,7 @@ class AiService: self.aiObjects.billingCallback = self._createBillingCallback() try: async for chunk in self.aiObjects.callWithTextContextStream(request): - # Rehydrate the final AiCallResponse (non-str chunks are the final response) if not isinstance(chunk, str): - if _wasNeutralized and hasattr(chunk, 'content') and chunk.content: - chunk.content = self._rehydrateResponse(chunk.content) if _excludedDocs: if not hasattr(chunk, 'metadata') or chunk.metadata is None: chunk.metadata = {} @@ -566,34 +559,70 @@ detectedIntent-Werte: def _shouldNeutralize(self, request: AiCallRequest) -> bool: """Check if this AI request should have neutralization applied. - Per-request override: requireNeutralization=True forces it, False skips it. - Only applies to text prompts -- not embeddings or image processing.""" + + OR-logic across three sources (any True → neutralize): + 1. Feature-Instance config (NeutralizationConfig.enabled) + 2. Workflow/Session (context.requireNeutralization) + 3. Per-request (request.requireNeutralization) + + No source can override another's True with False. + """ try: - if request.requireNeutralization is False: - return False - if not request.prompt and not request.messages: + if not request.prompt and not request.messages and not request.context: return False + + _sources = [] + + # Source 1: Feature-Instance config + _neutralSvc = self._get_service("neutralization") + if _neutralSvc and hasattr(_neutralSvc, 'getConfig'): + _config = _neutralSvc.getConfig() + if _config and getattr(_config, 'enabled', False): + _sources.append("featureInstance") + + # Source 2: Workflow / Session context + _ctx = getattr(self.services, '_context', None) + _ctxFlag = getattr(_ctx, "requireNeutralization", None) if _ctx else None + if _ctxFlag is True: + _sources.append("context") + + # Source 3: Per-request flag if request.requireNeutralization is True: + _sources.append("request") + + if _sources: + logger.debug(f"Neutralization required by: {', '.join(_sources)}") + request.requireNeutralization = True return True - neutralSvc = self._get_service("neutralization") - if not neutralSvc: - return False - config = neutralSvc.getConfig() if hasattr(neutralSvc, 'getConfig') else None - if not config or not getattr(config, 'enabled', False): - return False - return True - except Exception: + + return False + except Exception as e: + logger.error(f"_shouldNeutralize check failed: {e} — defaulting to False") return False def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool, List[str]]: """Neutralize the prompt text and messages in an AiCallRequest. + Returns (modifiedRequest, wasNeutralized, excludedDocs). - Fail-safe: failing parts are excluded instead of aborting the entire call.""" + + FAILSAFE behaviour when ``requireNeutralization is True`` (explicit): + - Service unavailable → raises (caller must not send raw data to AI). + - Prompt neutralization fails → raises. + - Individual message neutralization fails → message is **removed** + (not kept in original form) and noted in excludedDocs. + + When neutralization is only config-driven (requireNeutralization is + None) the behaviour is softer: failures are logged and originals are + kept — but a warning is emitted. + """ + _hardMode = request.requireNeutralization is True excludedDocs: List[str] = [] neutralSvc = self._get_service("neutralization") if not neutralSvc or not hasattr(neutralSvc, 'processText'): - logger.warning("Neutralization required but neutralization service is unavailable — continuing without neutralization") + if _hardMode: + raise RuntimeError("Neutralization explicitly required but service unavailable — AI call BLOCKED") + logger.warning("Neutralization required by config but service unavailable — continuing without neutralization") excludedDocs.append("Neutralization service unavailable; prompt sent un-neutralized") return request, False, excludedDocs @@ -607,28 +636,148 @@ detectedIntent-Werte: _wasNeutralized = True logger.debug("Neutralized prompt in AiCallRequest") else: + if _hardMode: + raise RuntimeError(f"Prompt neutralization returned empty — AI call BLOCKED (hard mode)") logger.warning("Neutralization of prompt returned no neutralized_text — sending original prompt") excludedDocs.append("Prompt neutralization failed; original prompt used") + except RuntimeError: + raise except Exception as e: + if _hardMode: + raise RuntimeError(f"Prompt neutralization failed — AI call BLOCKED: {e}") from e logger.warning(f"Neutralization of prompt failed: {e} — sending original prompt") excludedDocs.append(f"Prompt neutralization error: {e}") + if request.context: + try: + result = neutralSvc.processText(request.context) + if result and result.get("neutralized_text"): + request.context = result["neutralized_text"] + _wasNeutralized = True + logger.debug("Neutralized context in AiCallRequest") + else: + if _hardMode: + raise RuntimeError("Context neutralization returned empty — AI call BLOCKED (hard mode)") + logger.warning("Neutralization of context returned no neutralized_text — sending original context") + excludedDocs.append("Context neutralization failed; original context used") + except RuntimeError: + raise + except Exception as e: + if _hardMode: + raise RuntimeError(f"Context neutralization failed — AI call BLOCKED: {e}") from e + logger.warning(f"Neutralization of context failed: {e} — sending original context") + excludedDocs.append(f"Context neutralization error: {e}") + if request.messages and isinstance(request.messages, list): + cleanMessages = [] for idx, msg in enumerate(request.messages): content = msg.get("content") if isinstance(msg, dict) else None - if not isinstance(content, str) or not content: + if content is None: + cleanMessages.append(msg) continue - try: - result = neutralSvc.processText(content) - if result and result.get("neutralized_text"): - msg["content"] = result["neutralized_text"] - _wasNeutralized = True + if isinstance(content, str): + if not content: + cleanMessages.append(msg) + continue + try: + result = neutralSvc.processText(content) + if result and result.get("neutralized_text"): + msg["content"] = result["neutralized_text"] + _wasNeutralized = True + cleanMessages.append(msg) + else: + if _hardMode: + logger.warning(f"Message[{idx}] neutralization empty — REMOVING message (hard mode)") + excludedDocs.append(f"Message[{idx}] neutralization failed; message REMOVED") + else: + logger.warning(f"Neutralization of message[{idx}] returned no neutralized_text — keeping original") + excludedDocs.append(f"Message[{idx}] neutralization failed; original kept") + cleanMessages.append(msg) + except Exception as e: + if _hardMode: + logger.warning(f"Message[{idx}] neutralization error — REMOVING message (hard mode): {e}") + excludedDocs.append(f"Message[{idx}] neutralization error; message REMOVED: {e}") + else: + logger.warning(f"Neutralization of message[{idx}] failed: {e} — keeping original") + excludedDocs.append(f"Message[{idx}] neutralization error: {e}") + cleanMessages.append(msg) + elif isinstance(content, list): + _cleanParts = [] + for _partIdx, _part in enumerate(content): + if not isinstance(_part, dict): + _cleanParts.append(_part) + continue + _partType = _part.get("type", "") + if _partType == "text" and _part.get("text"): + try: + _result = neutralSvc.processText(_part["text"]) + if _result and _result.get("neutralized_text"): + _part["text"] = _result["neutralized_text"] + _wasNeutralized = True + _cleanParts.append(_part) + else: + if _hardMode: + logger.warning(f"Message[{idx}].content[{_partIdx}] text neutralization empty — REMOVING part") + excludedDocs.append(f"Message[{idx}].content[{_partIdx}] text removed") + else: + _cleanParts.append(_part) + except Exception as e: + if _hardMode: + logger.warning(f"Message[{idx}].content[{_partIdx}] text neutralization error — REMOVING: {e}") + excludedDocs.append(f"Message[{idx}].content[{_partIdx}] text error: {e}") + else: + _cleanParts.append(_part) + elif _partType == "image_url": + if _hardMode: + logger.warning(f"Message[{idx}].content[{_partIdx}] image_url — REMOVING (neutralization active)") + excludedDocs.append(f"Message[{idx}].content[{_partIdx}] image removed (neutralization)") + else: + _cleanParts.append(_part) + else: + _cleanParts.append(_part) + if _cleanParts: + msg["content"] = _cleanParts + cleanMessages.append(msg) + elif _hardMode: + logger.warning(f"Message[{idx}] all parts removed — REMOVING message") + excludedDocs.append(f"Message[{idx}] fully removed after neutralization") + else: + cleanMessages.append(msg) + request.messages = cleanMessages + + if hasattr(request, 'contentParts') and request.contentParts: + _cleanParts = [] + for _cpIdx, _cp in enumerate(request.contentParts): + _tg = getattr(_cp, 'typeGroup', '') or '' + _data = getattr(_cp, 'data', '') or '' + if _tg in ('text', 'table') and _data: + try: + _result = neutralSvc.processText(str(_data)) + if _result and _result.get("neutralized_text"): + _cp.data = _result["neutralized_text"] + _wasNeutralized = True + _cleanParts.append(_cp) + else: + if _hardMode: + logger.warning(f"ContentPart[{_cpIdx}] neutralization empty — REMOVING") + excludedDocs.append(f"ContentPart[{_cpIdx}] removed") + else: + _cleanParts.append(_cp) + except Exception as e: + if _hardMode: + logger.warning(f"ContentPart[{_cpIdx}] neutralization error — REMOVING: {e}") + excludedDocs.append(f"ContentPart[{_cpIdx}] error: {e}") + else: + _cleanParts.append(_cp) + elif _tg == 'image': + if _hardMode: + logger.warning(f"ContentPart[{_cpIdx}] image — REMOVING (neutralization active)") + excludedDocs.append(f"ContentPart[{_cpIdx}] image removed") else: - logger.warning(f"Neutralization of message[{idx}] returned no neutralized_text — keeping original") - excludedDocs.append(f"Message[{idx}] neutralization failed; original kept") - except Exception as e: - logger.warning(f"Neutralization of message[{idx}] failed: {e} — keeping original") - excludedDocs.append(f"Message[{idx}] neutralization error: {e}") + _cleanParts.append(_cp) + else: + _cleanParts.append(_cp) + request.contentParts = _cleanParts return request, _wasNeutralized, excludedDocs diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index 0f20bc7f..49774a38 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -83,12 +83,47 @@ class KnowledgeService: """ contentObjects = contentObjects or [] - # 1. Create FileContentIndex + # 1. Resolve scope fields from FileItem (Single Source of Truth) + # FileItem lives in poweron_management; its scope/mandateId/featureInstanceId + # are authoritative and must be mirrored onto the FileContentIndex. + resolvedScope = "personal" + resolvedMandateId = mandateId + resolvedFeatureInstanceId = featureInstanceId + resolvedUserId = userId + _shouldNeutralize = False + try: + from modules.datamodels.datamodelFiles import FileItem as _FileItem + _dbComponent = getattr(self._context, "interfaceDbComponent", None) + _fileRecords = _dbComponent.getRecordset(_FileItem, recordFilter={"id": fileId}) if _dbComponent else [] + if not _fileRecords: + from modules.interfaces.interfaceDbManagement import ComponentObjects + _row = ComponentObjects().db._loadRecord(_FileItem, fileId) + if _row: + _fileRecords = [_row] + if _fileRecords: + _fileRecord = _fileRecords[0] + _get = (lambda k, d=None: _fileRecord.get(k, d)) if isinstance(_fileRecord, dict) else (lambda k, d=None: getattr(_fileRecord, k, d)) + _shouldNeutralize = bool(_get("neutralize", False)) + _fileScope = _get("scope") + if _fileScope: + resolvedScope = _fileScope + if not resolvedMandateId: + resolvedMandateId = str(_get("mandateId", "") or "") + if not resolvedFeatureInstanceId: + resolvedFeatureInstanceId = str(_get("featureInstanceId", "") or "") + _fileCreatedBy = _get("sysCreatedBy") + if _fileCreatedBy: + resolvedUserId = str(_fileCreatedBy) + except Exception: + pass + + # 2. Create FileContentIndex with correct scope from the start index = FileContentIndex( id=fileId, - userId=userId, - featureInstanceId=featureInstanceId, - mandateId=mandateId, + userId=resolvedUserId, + featureInstanceId=resolvedFeatureInstanceId, + mandateId=resolvedMandateId, + scope=resolvedScope, fileName=fileName, mimeType=mimeType, containerPath=containerPath, @@ -108,28 +143,9 @@ class KnowledgeService: ) self._knowledgeDb.upsertFileContentIndex(index) - # 2. Chunk text content objects and create embeddings + # 3. Chunk text content objects and create embeddings textObjects = [o for o in contentObjects if o.get("contentType") == "text"] - # Read FileItem attributes for index metadata and neutralization - _shouldNeutralize = False - try: - from modules.datamodels.datamodelFiles import FileItem as _FileItem - _dbComponent = getattr(self._context, 'interfaceDbComponent', None) - _fileRecords = _dbComponent.getRecordset(_FileItem, recordFilter={"id": fileId}) if _dbComponent else [] - if _fileRecords: - _fileRecord = _fileRecords[0] - _get = (lambda k, d=None: _fileRecord.get(k, d)) if isinstance(_fileRecord, dict) else (lambda k, d=None: getattr(_fileRecord, k, d)) - _shouldNeutralize = bool(_get("neutralize", False)) - _fileScope = _get("scope") - if _fileScope: - index.scope = _fileScope - _fileCreatedBy = _get("sysCreatedBy") - if _fileCreatedBy: - index.userId = str(_fileCreatedBy) - except Exception: - pass - if _shouldNeutralize and textObjects: _neutralizedObjects = [] try: @@ -142,9 +158,7 @@ class KnowledgeService: if not _textContent: continue try: - _neutralResult = _neutralSvc.processText( - _textContent, userId=userId, featureInstanceId=featureInstanceId - ) + _neutralResult = _neutralSvc.processText(_textContent) if _neutralResult and _neutralResult.get("neutralized_text"): _obj["data"] = _neutralResult["neutralized_text"] _neutralizedObjects.append(_obj) @@ -176,8 +190,8 @@ class KnowledgeService: contentChunk = ContentChunk( contentObjectId=chunk["contentObjectId"], fileId=fileId, - userId=userId, - featureInstanceId=featureInstanceId, + userId=resolvedUserId, + featureInstanceId=resolvedFeatureInstanceId, contentType="text", data=chunk["data"], contextRef=chunk["contextRef"], @@ -185,14 +199,36 @@ class KnowledgeService: ) self._knowledgeDb.upsertContentChunk(contentChunk) - # 3. Store non-text content objects (images, etc.) without embedding + # 4. Store non-text content objects (images, etc.) without embedding nonTextObjects = [o for o in contentObjects if o.get("contentType") != "text"] + if _shouldNeutralize and nonTextObjects: + import base64 as _b64 + _filteredNonText = [] + for _obj in nonTextObjects: + if _obj.get("contentType") != "image": + _filteredNonText.append(_obj) + continue + _imgData = (_obj.get("data", "") or "").strip() + if not _imgData: + _filteredNonText.append(_obj) + continue + try: + _imgBytes = _b64.b64decode(_imgData) + _imgResult = await _neutralSvc.processImageAsync(_imgBytes, fileName) + if _imgResult.get("status") == "ok": + _filteredNonText.append(_obj) + logger.debug(f"Image chunk OK for file {fileId}, storing") + else: + logger.warning(f"Image chunk blocked for file {fileId} (PII detected), skipping (fail-safe)") + except Exception as _imgErr: + logger.warning(f"Image neutralization check failed for file {fileId}: {_imgErr}, skipping (fail-safe)") + nonTextObjects = _filteredNonText for obj in nonTextObjects: contentChunk = ContentChunk( contentObjectId=obj.get("contentObjectId", ""), fileId=fileId, - userId=userId, - featureInstanceId=featureInstanceId, + userId=resolvedUserId, + featureInstanceId=resolvedFeatureInstanceId, contentType=obj.get("contentType", "other"), data=obj.get("data", ""), contextRef=obj.get("contextRef", {}), @@ -200,21 +236,23 @@ class KnowledgeService: ) self._knowledgeDb.upsertContentChunk(contentChunk) - self._knowledgeDb.updateFileStatus(fileId, "indexed") + # 5. Final upsert ALWAYS — persists scope, neutralization status, etc. index.status = "indexed" if _shouldNeutralize: - try: - index.neutralizationStatus = "completed" - index.isNeutralized = True - self._knowledgeDb.upsertFileContentIndex(index) - except Exception as e: - logger.debug(f"Could not set neutralizationStatus for file {fileId}: {e}") - logger.info(f"Indexed file {fileId} ({fileName}): {len(contentObjects)} objects, {len(textObjects)} text chunks") - if mandateId: + index.neutralizationStatus = "completed" + index.isNeutralized = True + self._knowledgeDb.upsertFileContentIndex(index) + + logger.info( + "Indexed file %s (%s): %d objects, %d text chunks, scope=%s, mandate=%s, instance=%s", + fileId, fileName, len(contentObjects), len(textObjects), + resolvedScope, resolvedMandateId, resolvedFeatureInstanceId, + ) + if resolvedMandateId: try: from modules.interfaces.interfaceDbBilling import _getRootInterface - _getRootInterface().reconcileMandateStorageBilling(str(mandateId)) + _getRootInterface().reconcileMandateStorageBilling(str(resolvedMandateId)) except Exception as ex: logger.warning("reconcileMandateStorageBilling after index failed: %s", ex) return index @@ -328,17 +366,18 @@ class KnowledgeService: if entities: builder.add(priority=3, label="Workflow Context", items=entities, isKeyValue=True, maxChars=2000) - # Layer 3: Shared Layer (mandate-wide shared documents) - sharedChunks = self._knowledgeDb.semanticSearch( - queryVector=queryVector, - mandateId=mandateId, - isShared=True, - limit=10, - minScore=0.7, - isSysAdmin=isSysAdmin, - ) - if sharedChunks: - builder.add(priority=4, label="Shared Knowledge", items=sharedChunks, maxChars=2000) + # Layer 3: Mandate-scoped documents (visible to all mandate users) + if mandateId: + mandateChunks = self._knowledgeDb.semanticSearch( + queryVector=queryVector, + scope="mandate", + mandateId=mandateId, + limit=10, + minScore=0.7, + isSysAdmin=isSysAdmin, + ) + if mandateChunks: + builder.add(priority=4, label="Shared Knowledge", items=mandateChunks, maxChars=2000) # Layer 4: Cross-workflow hint (other conversations in this workspace) if workflowHintItems: diff --git a/modules/serviceCenter/services/serviceKnowledge/subPreScan.py b/modules/serviceCenter/services/serviceKnowledge/subPreScan.py index e025dd99..0688deb2 100644 --- a/modules/serviceCenter/services/serviceKnowledge/subPreScan.py +++ b/modules/serviceCenter/services/serviceKnowledge/subPreScan.py @@ -31,6 +31,7 @@ async def preScanDocument( userId: str = "", featureInstanceId: str = "", mandateId: str = "", + scope: str = "personal", ) -> FileContentIndex: """Create a structural FileContentIndex without AI. @@ -56,6 +57,7 @@ async def preScanDocument( userId=userId, featureInstanceId=featureInstanceId, mandateId=mandateId, + scope=scope, fileName=fileName, mimeType=mimeType, totalObjects=totalObjects, diff --git a/modules/shared/notifyMandateAdmins.py b/modules/shared/notifyMandateAdmins.py index 27445afb..6bef921d 100644 --- a/modules/shared/notifyMandateAdmins.py +++ b/modules/shared/notifyMandateAdmins.py @@ -7,9 +7,7 @@ All mandate-level notifications (subscription changes, billing warnings, etc.) MUST go through notifyMandateAdmins() to ensure consistent recipient resolution and delivery. -Recipients are the union of: -1. BillingSettings.notifyEmails for the mandate (configured contact addresses) -2. All users with the mandate-level "admin" RBAC role +Recipients: all users with the mandate-level "admin" RBAC role. """ from __future__ import annotations @@ -96,10 +94,10 @@ def _resolveMandateAdminEmails(mandateId: str) -> List[str]: def _resolveAllRecipients(mandateId: str) -> List[str]: - """Union of BillingSettings.notifyEmails + all mandate admin user emails, deduplicated.""" + """Mandate admin user emails only (RBAC-resolved), deduplicated.""" seen: Set[str] = set() result: List[str] = [] - for email in _resolveMandateContactEmails(mandateId) + _resolveMandateAdminEmails(mandateId): + for email in _resolveMandateAdminEmails(mandateId): if email and email not in seen: seen.add(email) result.append(email) @@ -233,7 +231,7 @@ def notifyMandateAdmins( rawHtmlBlock: Optional[str] = None, ) -> int: """ - Send a styled HTML notification to all mandate admins and configured contacts. + Send a styled HTML notification to all mandate admins. Args: mandateId: The mandate to notify admins for. diff --git a/modules/workflows/methods/methodContext/actions/extractContent.py b/modules/workflows/methods/methodContext/actions/extractContent.py index 466165ad..9fb2e7f4 100644 --- a/modules/workflows/methods/methodContext/actions/extractContent.py +++ b/modules/workflows/methods/methodContext/actions/extractContent.py @@ -6,7 +6,7 @@ import time from typing import Dict, Any from modules.datamodels.datamodelChat import ActionResult, ActionDocument from modules.datamodels.datamodelDocref import DocumentReferenceList -from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrategy, ContentExtracted, ContentPart +from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrategy logger = logging.getLogger(__name__) @@ -101,74 +101,6 @@ async def extractContent(self, parameters: Dict[str, Any]) -> ActionResult: # Pass operationId for hierarchical per-document progress logging extractedResults = self.services.extraction.extractContent(chatDocuments, extractionOptions, operationId=operationId) - # Check if neutralization is enabled and should be applied automatically - neutralizationEnabled = False - try: - config = self.services.neutralization.getConfig() - neutralizationEnabled = config and config.enabled - except Exception as e: - logger.debug(f"Could not check neutralization config: {str(e)}") - - # Neutralize extracted data if enabled (for dynamic mode: after extraction, before AI processing) - if neutralizationEnabled: - self.services.chat.progressLogUpdate(operationId, 0.7, "Neutralizing extracted data") - logger.info("Neutralization enabled - neutralizing extracted content data") - - # Neutralize each ContentExtracted result - for extracted in extractedResults: - if extracted.parts: - neutralizedParts = [] - for part in extracted.parts: - if not isinstance(part, ContentPart): - # Try to parse as ContentPart if it's a dict - if isinstance(part, dict): - try: - part = ContentPart(**part) - except Exception as e: - logger.warning(f"Could not parse ContentPart: {str(e)}") - neutralizedParts.append(part) - continue - else: - neutralizedParts.append(part) - continue - - # Neutralize the data field if it contains text - if part.data: - try: - # Call neutralization service - neutralizationResult = self.services.neutralization.processText(part.data) - - if neutralizationResult and 'neutralized_text' in neutralizationResult: - # Replace data with neutralized text - neutralizedData = neutralizationResult['neutralized_text'] - - # Create new ContentPart with neutralized data - neutralizedPart = ContentPart( - id=part.id, - parentId=part.parentId, - label=part.label, - typeGroup=part.typeGroup, - mimeType=part.mimeType, - data=neutralizedData, - metadata=part.metadata.copy() if part.metadata else {} - ) - neutralizedParts.append(neutralizedPart) - else: - # Neutralization failed, use original part - logger.warning(f"Neutralization did not return neutralized_text for part {part.id}") - neutralizedParts.append(part) - except Exception as e: - logger.error(f"Error neutralizing part {part.id}: {str(e)}") - # On error, use original part - neutralizedParts.append(part) - else: - # No data to neutralize, keep original part - neutralizedParts.append(part) - - # Update extracted result with neutralized parts - extracted.parts = neutralizedParts - logger.info(f"Neutralized {len(neutralizedParts)} content parts") - # Build ActionDocuments from ContentExtracted results self.services.chat.progressLogUpdate(operationId, 0.8, "Building result documents") actionDocuments = [] @@ -190,7 +122,6 @@ async def extractContent(self, parameters: Dict[str, Any]) -> ActionResult: "documentIndex": i, "extractedId": extracted.id, "partCount": len(extracted.parts) if extracted.parts else 0, - "neutralized": neutralizationEnabled, "originalFileName": originalDoc.fileName if originalDoc and hasattr(originalDoc, 'fileName') else None } actionDoc = ActionDocument( diff --git a/modules/workflows/methods/methodContext/actions/neutralizeData.py b/modules/workflows/methods/methodContext/actions/neutralizeData.py index a1fc6b91..bd032cac 100644 --- a/modules/workflows/methods/methodContext/actions/neutralizeData.py +++ b/modules/workflows/methods/methodContext/actions/neutralizeData.py @@ -16,14 +16,13 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult: workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}" operationId = f"context_neutralize_{workflowId}_{int(time.time())}" - # Check if neutralization is enabled neutralizationEnabled = False try: config = self.services.neutralization.getConfig() neutralizationEnabled = config and config.enabled except Exception as e: logger.debug(f"Could not check neutralization config: {str(e)}") - + if not neutralizationEnabled: logger.info("Neutralization is not enabled, returning documents unchanged") # Return original documents if neutralization is disabled @@ -144,8 +143,25 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult: neutralizedParts.append(part) continue - # Neutralize the data field if it contains text - if part.data: + # Neutralize the data field based on typeGroup + _typeGroup = getattr(part, 'typeGroup', '') or '' + if _typeGroup == 'image' and part.data: + import base64 as _b64 + try: + self.services.chat.progressLogUpdate( + operationId, + 0.3 + (i / len(chatDocuments)) * 0.6, + f"Checking image part {len(neutralizedParts) + 1} of document {i+1}" + ) + _imgBytes = _b64.b64decode(str(part.data)) + _imgResult = await self.services.neutralization.processImageAsync(_imgBytes, f"part_{part.id}") + if _imgResult.get("status") == "ok": + neutralizedParts.append(part) + else: + logger.warning(f"Fail-Safe: Image part {part.id} blocked (PII detected), SKIPPING") + except Exception as _imgErr: + logger.error(f"Fail-Safe: Image check failed for part {part.id}: {_imgErr}, SKIPPING") + elif part.data: try: self.services.chat.progressLogUpdate( operationId, @@ -153,14 +169,11 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult: f"Neutralizing part {len(neutralizedParts) + 1} of document {i+1}" ) - # Call neutralization service neutralizationResult = self.services.neutralization.processText(part.data) if neutralizationResult and 'neutralized_text' in neutralizationResult: - # Replace data with neutralized text neutralizedData = neutralizationResult['neutralized_text'] - # Create new ContentPart with neutralized data neutralizedPart = ContentPart( id=part.id, parentId=part.parentId, @@ -172,15 +185,12 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult: ) neutralizedParts.append(neutralizedPart) else: - # Fail-Safe: neutralization incomplete, skip this part logger.warning(f"Fail-Safe: Neutralization incomplete for part {part.id}, SKIPPING (not passing original)") continue except Exception as e: logger.error(f"Fail-Safe: Error neutralizing part {part.id}, SKIPPING document (not passing original): {str(e)}") - # Fail-Safe: do NOT pass original data to AI continue else: - # No data to neutralize, keep original part neutralizedParts.append(part) # Create neutralized ContentExtracted object diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py index de332c31..58b76908 100644 --- a/modules/workflows/workflowManager.py +++ b/modules/workflows/workflowManager.py @@ -352,12 +352,6 @@ class WorkflowManager: for i, doc in enumerate(documents, 1): docListText += f"\n{i}. {doc.fileName} ({doc.mimeType}, {doc.fileSize} bytes)" - _userId = getattr(getattr(self.services, 'user', None), 'id', '') or '' - _featureInstanceId = getattr(self.services, 'featureInstanceId', '') or '' - _promptForAnalysis, _wasNeutralized, _mappingId = await self._neutralizePromptIfRequired( - userPrompt, userId=_userId, featureInstanceId=_featureInstanceId - ) - analysisPrompt = f"""You are an input analyzer. From the user's message, perform ALL of the following in one pass: 1. detectedLanguage: Detect ISO 639-1 language code (e.g., de, en, fr, it) @@ -407,7 +401,7 @@ Return ONLY JSON (no markdown) with this exact structure: The following is the user's original input message. Analyze intent, normalize the request, and determine complexity: ################ USER INPUT START ################# -{_promptForAnalysis.replace('{', '{{').replace('}', '}}') if _promptForAnalysis else ''} +{userPrompt.replace('{', '{{').replace('}', '}}') if userPrompt else ''} ################ USER INPUT FINISH ################# """ @@ -425,12 +419,6 @@ The following is the user's original input message. Analyze intent, normalize th jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0 if jsonStart != -1 and jsonEnd > jsonStart: result = json.loads(aiResponse[jsonStart:jsonEnd]) - if _wasNeutralized: - for _field in ('normalizedRequest', 'intent', 'workflowName'): - if _field in result and result[_field]: - result[_field] = await self._rehydrateResponseIfNeeded( - result[_field], True, userId=_userId, featureInstanceId=_featureInstanceId - ) return result else: logger.warning("Could not parse combined analysis response, using defaults") @@ -490,7 +478,6 @@ The following is the user's original input message. Analyze intent, normalize th if userInput.prompt: try: originalPromptBytes = userInput.prompt.encode('utf-8') - originalPromptBytes = await self._neutralizeContentIfEnabled(originalPromptBytes, "text/markdown") fileItem = self.services.interfaceDbComponent.createFile( name="user_prompt_original.md", mimeType="text/markdown", @@ -680,7 +667,6 @@ The following is the user's original input message. Analyze intent, normalize th if userInput.prompt: try: originalPromptBytes = userInput.prompt.encode('utf-8') - originalPromptBytes = await self._neutralizeContentIfEnabled(originalPromptBytes, "text/markdown") fileItem = self.services.interfaceDbComponent.createFile( name="user_prompt_original.md", mimeType="text/markdown", @@ -821,7 +807,6 @@ The following is the user's original input message. Analyze intent, normalize th if userInput.prompt: try: originalPromptBytes = userInput.prompt.encode('utf-8') - originalPromptBytes = await self._neutralizeContentIfEnabled(originalPromptBytes, "text/markdown") fileItem = self.services.interfaceDbComponent.createFile( name="user_prompt_original.md", mimeType="text/markdown", @@ -1365,82 +1350,3 @@ The following is the user's original input message. Analyze intent, normalize th """Set user language for the service center""" self.services.user.language = language - async def _neutralizePromptIfRequired(self, prompt: str, userId: str, featureInstanceId: str) -> tuple: - """Neutralize prompt text if the workflow context requires it. - Returns (processedPrompt, wasNeutralized, mappingId).""" - try: - _neutralSvc = getattr(self.services, 'neutralization', None) - if not _neutralSvc: - return prompt, False, None - _config = _neutralSvc.getConfig() if hasattr(_neutralSvc, 'getConfig') else None - if not _config or not getattr(_config, 'enabled', False): - return prompt, False, None - _result = _neutralSvc.processText(prompt, userId=userId, featureInstanceId=featureInstanceId) - if _result and _result.get("neutralized_text"): - return _result["neutralized_text"], True, _result.get("mappingId") - return prompt, False, None - except Exception as e: - logger.warning(f"Prompt neutralization failed: {e}") - return prompt, False, None - - async def _rehydrateResponseIfNeeded(self, response: str, wasNeutralized: bool, userId: str, featureInstanceId: str) -> str: - """Replace placeholders in AI response with original values.""" - if not wasNeutralized or not response: - return response - try: - _neutralSvc = getattr(self.services, 'neutralization', None) - if not _neutralSvc: - return response - _rehydrated = _neutralSvc.resolveText(response, userId=userId, featureInstanceId=featureInstanceId) - return _rehydrated if _rehydrated else response - except Exception as e: - logger.warning(f"Response re-hydration failed: {e}") - return response - - async def _neutralizeContentIfEnabled(self, contentBytes: bytes, mimeType: str) -> bytes: - """Neutralize content if neutralization is enabled in user settings""" - try: - # Automation hub may not have neutralization service; skip if unavailable - neutralization = getattr(self.services, 'neutralization', None) - if not neutralization: - return contentBytes - # Check if neutralization is enabled - config = neutralization.getConfig() - if not config or not config.enabled: - return contentBytes - - # Decode content to text for neutralization - try: - textContent = contentBytes.decode('utf-8') - except UnicodeDecodeError: - # Try alternative encodings - for enc in ['latin-1', 'cp1252', 'iso-8859-1']: - try: - textContent = contentBytes.decode(enc) - break - except UnicodeDecodeError: - continue - else: - # If unable to decode, return original bytes (binary content) - logger.debug(f"Unable to decode content for neutralization, skipping: {mimeType}") - return contentBytes - - # Neutralize the text content - # Note: The neutralization service should use names from config when processing - result = neutralization.processText(textContent) - if result and 'neutralized_text' in result: - neutralizedText = result['neutralized_text'] - # Encode back to bytes using the same encoding - try: - return neutralizedText.encode('utf-8') - except Exception as e: - logger.warning(f"Error encoding neutralized text: {str(e)}") - return contentBytes - else: - logger.warning("Neutralization did not return neutralized_text") - return contentBytes - except Exception as e: - logger.error(f"Error during content neutralization: {str(e)}") - # Return original content on error - return contentBytes - From e0a09ae6b1e560b1426be13b6278c87c0e14bfbf Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Mon, 30 Mar 2026 00:14:57 +0200 Subject: [PATCH 14/33] streamlined neutralization flow --- modules/aicore/aicoreBase.py | 4 +- modules/aicore/aicorePluginPrivateLlm.py | 8 +- .../datamodelFeatureNeutralizer.py | 24 ++ .../interfaceFeatureNeutralizer.py | 69 +++++ .../neutralization/neutralizePlayground.py | 24 +- .../neutralization/routeFeatureNeutralizer.py | 197 ++++++++++--- .../mainServiceNeutralization.py | 263 +++++++++++++++++- .../workspace/routeFeatureWorkspace.py | 3 + .../services/serviceAgent/mainServiceAgent.py | 6 +- .../services/serviceAi/mainServiceAi.py | 141 +++++++--- .../serviceGeneration/renderers/registry.py | 9 +- .../serviceKnowledge/mainServiceKnowledge.py | 2 +- .../methodContext/actions/neutralizeData.py | 2 +- 13 files changed, 655 insertions(+), 97 deletions(-) diff --git a/modules/aicore/aicoreBase.py b/modules/aicore/aicoreBase.py index 70dd67c4..e107beb3 100644 --- a/modules/aicore/aicoreBase.py +++ b/modules/aicore/aicoreBase.py @@ -18,7 +18,9 @@ from typing import List, Dict, Any, Optional, AsyncGenerator, Union from modules.datamodels.datamodelAi import AiModel, AiModelCall, AiModelResponse -_RETRY_AFTER_PATTERN = _re.compile(r"try again in (\d+(?:\.\d+)?)\s*s", _re.IGNORECASE) +_RETRY_AFTER_PATTERN = _re.compile( + r"(?:try again in|retry after)\s+(\d+(?:\.\d+)?)\s*s", _re.IGNORECASE +) def _parseRetryAfterSeconds(message: str) -> float: diff --git a/modules/aicore/aicorePluginPrivateLlm.py b/modules/aicore/aicorePluginPrivateLlm.py index 38baa35e..79853652 100644 --- a/modules/aicore/aicorePluginPrivateLlm.py +++ b/modules/aicore/aicorePluginPrivateLlm.py @@ -22,7 +22,7 @@ import time from typing import List, Optional, Dict, Any from fastapi import HTTPException from modules.shared.configuration import APP_CONFIG -from .aicoreBase import BaseConnectorAi +from .aicoreBase import BaseConnectorAi, RateLimitExceededException from modules.datamodels.datamodelAi import ( AiModel, PriorityEnum, @@ -370,6 +370,9 @@ class AiPrivateLlm(BaseConnectorAi): if response.status_code != 200: errorMessage = f"Private-LLM API error: {response.status_code} - {response.text}" + if response.status_code == 429: + logger.warning(errorMessage) + raise RateLimitExceededException(errorMessage) logger.error(errorMessage) raise HTTPException(status_code=500, detail=errorMessage) @@ -461,6 +464,9 @@ class AiPrivateLlm(BaseConnectorAi): if response.status_code != 200: errorMessage = f"Private-LLM API error: {response.status_code} - {response.text}" + if response.status_code == 429: + logger.warning(errorMessage) + raise RateLimitExceededException(errorMessage) logger.error(errorMessage) raise HTTPException(status_code=500, detail=errorMessage) diff --git a/modules/features/neutralization/datamodelFeatureNeutralizer.py b/modules/features/neutralization/datamodelFeatureNeutralizer.py index a8ed5981..cc111950 100644 --- a/modules/features/neutralization/datamodelFeatureNeutralizer.py +++ b/modules/features/neutralization/datamodelFeatureNeutralizer.py @@ -58,6 +58,17 @@ class DataNeutralizerAttributes(BaseModel): originalText: str = Field(description="Original text that was neutralized", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) fileId: Optional[str] = Field(default=None, description="ID of the file this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) patternType: str = Field(description="Type of pattern that matched (email, phone, name, etc.)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) + + +class DataNeutralizationSnapshot(BaseModel): + """Stores the full neutralized text (with embedded placeholders) per source.""" + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + mandateId: str = Field(description="Mandate scope") + featureInstanceId: str = Field(default="", description="Feature instance scope") + userId: str = Field(description="User who triggered neutralization") + sourceLabel: str = Field(description="Human label, e.g. 'Prompt', 'Kontext', 'Nachricht 3'") + neutralizedText: str = Field(description="Full text with [type.uuid] placeholders embedded") + placeholderCount: int = Field(default=0, description="Number of placeholders in the text") registerModelLabels( "DataNeutralizerAttributes", {"en": "Neutralized Data Attribute", "fr": "Attribut de données neutralisées"}, @@ -71,5 +82,18 @@ registerModelLabels( "patternType": {"en": "Pattern Type", "fr": "Type de modèle"}, }, ) +registerModelLabels( + "DataNeutralizationSnapshot", + {"en": "Neutralization Snapshot", "de": "Neutralisierungs-Snapshot"}, + { + "id": {"en": "ID"}, + "mandateId": {"en": "Mandate ID"}, + "featureInstanceId": {"en": "Feature Instance ID"}, + "userId": {"en": "User ID"}, + "sourceLabel": {"en": "Source", "de": "Quelle"}, + "neutralizedText": {"en": "Neutralized Text", "de": "Neutralisierter Text"}, + "placeholderCount": {"en": "Placeholders", "de": "Platzhalter"}, + }, +) diff --git a/modules/features/neutralization/interfaceFeatureNeutralizer.py b/modules/features/neutralization/interfaceFeatureNeutralizer.py index 1a52e130..22af9683 100644 --- a/modules/features/neutralization/interfaceFeatureNeutralizer.py +++ b/modules/features/neutralization/interfaceFeatureNeutralizer.py @@ -11,6 +11,7 @@ from typing import Dict, List, Any, Optional from modules.features.neutralization.datamodelFeatureNeutralizer import ( DataNeutraliserConfig, DataNeutralizerAttributes, + DataNeutralizationSnapshot, ) from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.interfaces.interfaceRbac import getRecordsetWithRBAC @@ -227,6 +228,74 @@ class InterfaceFeatureNeutralizer: logger.error(f"Error deleting attribute by ID: {str(e)}") return False + # ------------------------------------------------------------------ + # Snapshot CRUD + # ------------------------------------------------------------------ + + def getSnapshots(self) -> List[DataNeutralizationSnapshot]: + """Return all neutralization snapshots for the current mandate + feature instance.""" + try: + _filter: Dict[str, Any] = {"mandateId": self.mandateId} + if self.featureInstanceId: + _filter["featureInstanceId"] = self.featureInstanceId + rows = getRecordsetWithRBAC( + self.db, + DataNeutralizationSnapshot, + self.currentUser, + recordFilter=_filter, + mandateId=self.mandateId, + ) + return [ + DataNeutralizationSnapshot(**{k: v for k, v in r.items() if not k.startswith("_")}) + for r in rows + ] + except Exception as e: + logger.error(f"Error getting snapshots: {e}") + return [] + + def clearSnapshots(self) -> int: + """Delete all snapshots for the current feature-instance scope. Returns count deleted.""" + try: + _filter: Dict[str, Any] = {"mandateId": self.mandateId} + if self.featureInstanceId: + _filter["featureInstanceId"] = self.featureInstanceId + existing = self.db.getRecordset(DataNeutralizationSnapshot, recordFilter=_filter) + for row in existing: + self.db.recordDelete(DataNeutralizationSnapshot, row["id"]) + return len(existing) + except Exception as e: + logger.error(f"Error clearing snapshots: {e}") + return 0 + + def createSnapshot( + self, + sourceLabel: str, + neutralizedText: str, + placeholderCount: int = 0, + ) -> Optional[DataNeutralizationSnapshot]: + """Persist one neutralization snapshot.""" + try: + if not self.userId: + logger.warning("Cannot create snapshot: missing userId") + return None + snap = DataNeutralizationSnapshot( + mandateId=self.mandateId or "", + featureInstanceId=self.featureInstanceId or "", + userId=self.userId, + sourceLabel=sourceLabel, + neutralizedText=neutralizedText, + placeholderCount=placeholderCount, + ) + created = self.db.recordCreate(DataNeutralizationSnapshot, snap.model_dump()) + return DataNeutralizationSnapshot(**{k: v for k, v in created.items() if not k.startswith("_")}) + except Exception as e: + logger.error(f"Error creating snapshot: {e}") + return None + + # ------------------------------------------------------------------ + # Attribute CRUD + # ------------------------------------------------------------------ + def createAttribute( self, attributeId: str, diff --git a/modules/features/neutralization/neutralizePlayground.py b/modules/features/neutralization/neutralizePlayground.py index b9b66fed..500cc1ba 100644 --- a/modules/features/neutralization/neutralizePlayground.py +++ b/modules/features/neutralization/neutralizePlayground.py @@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional from urllib.parse import urlparse, unquote from modules.datamodels.datamodelUam import User -from .datamodelFeatureNeutralizer import DataNeutralizerAttributes, DataNeutraliserConfig +from .datamodelFeatureNeutralizer import DataNeutralizerAttributes, DataNeutraliserConfig, DataNeutralizationSnapshot from .interfaceFeatureNeutralizer import getInterface as _getNeutralizerInterface from modules.serviceHub import getInterface as getServices @@ -86,7 +86,7 @@ class NeutralizationPlayground: 'neutralized_file_id': None, 'processed_info': {'type': 'error', 'error': 'File could not be decoded as text. Supported: UTF-8, Latin-1. For PDF/Word/Excel, use supported binary formats.'} } - result = self.services.neutralization.processText(text_content) + result = await self.services.neutralization.processTextAsync(text_content) result['neutralized_file_name'] = f'neutralized_{filename}' # Save neutralized text as file to user files if self.services.interfaceDbComponent and result.get('neutralized_text') is not None: @@ -198,12 +198,28 @@ class NeutralizationPlayground: """Resolve UIDs in neutralized text back to original text""" return self.services.neutralization.resolveText(text) + def getSnapshots(self) -> List[DataNeutralizationSnapshot]: + """Return stored neutralization text snapshots.""" + try: + return self.services.neutralization.getSnapshots() + except Exception as e: + logger.error(f"Error getting snapshots: {e}") + return [] + def getAttributes(self, fileId: str = None) -> List[DataNeutralizerAttributes]: """Get neutralization attributes, optionally filtered by file ID""" try: allAttributes = self.services.neutralization.getAttributes() if fileId: - return [attr for attr in allAttributes if attr.fileId == fileId] + want = str(fileId).strip() + + def _matches(a: DataNeutralizerAttributes) -> bool: + af = a.fileId + if af is None or (isinstance(af, str) and not str(af).strip()): + return False + return str(af).strip() == want + + return [attr for attr in allAttributes if _matches(attr)] return allAttributes except Exception as e: logger.error(f"Error getting attributes: {str(e)}") @@ -396,7 +412,7 @@ class SharepointProcessor: textContent = fileContent.decode('utf-8') except UnicodeDecodeError: textContent = fileContent.decode('latin-1') - result = self.services.neutralization.processText(textContent) + result = await self.services.neutralization.processTextAsync(textContent) content_to_upload = (result.get('neutralized_text') or '').encode('utf-8') neutralizedFilename = f"neutralized_{fileInfo['name']}" diff --git a/modules/features/neutralization/routeFeatureNeutralizer.py b/modules/features/neutralization/routeFeatureNeutralizer.py index 03d44f72..2f36efef 100644 --- a/modules/features/neutralization/routeFeatureNeutralizer.py +++ b/modules/features/neutralization/routeFeatureNeutralizer.py @@ -8,12 +8,33 @@ import logging from modules.auth import limiter, getRequestContext, RequestContext # Import interfaces -from .datamodelFeatureNeutralizer import DataNeutraliserConfig, DataNeutralizerAttributes +from .datamodelFeatureNeutralizer import DataNeutraliserConfig, DataNeutralizerAttributes, DataNeutralizationSnapshot from .neutralizePlayground import NeutralizationPlayground # Configure logger logger = logging.getLogger(__name__) + +def _assertFeatureInstancePathMatchesContext(featureInstanceIdFromPath: str, context: RequestContext) -> None: + """Reject path/instance mismatch when request context already carries an instance id.""" + ctxId = str(context.featureInstanceId).strip() if getattr(context, "featureInstanceId", None) else "" + pathId = (featureInstanceIdFromPath or "").strip() + if ctxId and pathId and pathId != ctxId: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Feature instance id in URL does not match request context (X-Instance-Id)", + ) + + +def _fetchNeutralizationAttributes(context: RequestContext, fileId: Optional[str]) -> List[DataNeutralizerAttributes]: + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + return service.getAttributes(fileId) + + # Create router for neutralization endpoints router = APIRouter( prefix="/api/neutralization", @@ -208,15 +229,9 @@ def get_neutralization_attributes( ) -> List[DataNeutralizerAttributes]: """Get neutralization attributes, optionally filtered by file ID""" try: - service = NeutralizationPlayground( - context.user, - str(context.mandateId) if context.mandateId else "", - featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None - ) - attributes = service.getAttributes(fileId) - - return attributes - + return _fetchNeutralizationAttributes(context, fileId) + except HTTPException: + raise except Exception as e: logger.error(f"Error getting neutralization attributes: {str(e)}") raise HTTPException( @@ -224,6 +239,72 @@ def get_neutralization_attributes( detail=f"Error getting neutralization attributes: {str(e)}" ) + +@router.get("/{feature_instance_id}/attributes", response_model=List[DataNeutralizerAttributes]) +@limiter.limit("30/minute") +def get_neutralization_attributes_scoped( + request: Request, + feature_instance_id: str = Path(..., description="Workspace / feature instance id (must match X-Instance-Id when set)"), + fileId: Optional[str] = Query(None, description="Filter by file ID"), + context: RequestContext = Depends(getRequestContext), +) -> List[DataNeutralizerAttributes]: + """Same as GET /attributes; path includes instance id for workspace UI compatibility.""" + _assertFeatureInstancePathMatchesContext(feature_instance_id, context) + try: + return _fetchNeutralizationAttributes(context, fileId) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting neutralization attributes: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error getting neutralization attributes: {str(e)}" + ) + +@router.get("/snapshots", response_model=List[DataNeutralizationSnapshot]) +@limiter.limit("30/minute") +def get_neutralization_snapshots( + request: Request, + context: RequestContext = Depends(getRequestContext), +) -> List[DataNeutralizationSnapshot]: + """Return neutralized-text snapshots (full text with placeholders) for the current feature instance.""" + try: + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + return service.getSnapshots() + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting neutralization snapshots: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) + + +@router.get("/{feature_instance_id}/snapshots", response_model=List[DataNeutralizationSnapshot]) +@limiter.limit("30/minute") +def get_neutralization_snapshots_scoped( + request: Request, + feature_instance_id: str = Path(..., description="Workspace instance id (must match X-Instance-Id when set)"), + context: RequestContext = Depends(getRequestContext), +) -> List[DataNeutralizationSnapshot]: + """Same as GET /snapshots; path includes instance id for workspace UI (explicit scope).""" + _assertFeatureInstancePathMatchesContext(feature_instance_id, context) + try: + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + return service.getSnapshots() + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting neutralization snapshots (scoped): {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) + + @router.post("/process-sharepoint", response_model=Dict[str, Any]) @limiter.limit("5/minute") async def process_sharepoint_files( @@ -317,6 +398,21 @@ def get_neutralization_stats( detail=f"Error getting neutralization stats: {str(e)}" ) +def _deleteSingleNeutralizationAttribute(context: RequestContext, attributeId: str) -> Dict[str, str]: + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + success = service.deleteAttribute(attributeId) + if success: + return {"message": f"Attribute {attributeId} deleted"} + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Attribute {attributeId} not found", + ) + + @router.delete("/attributes/single/{attributeId}", response_model=Dict[str, str]) @limiter.limit("30/minute") def deleteAttribute( @@ -326,20 +422,7 @@ def deleteAttribute( ) -> Dict[str, str]: """Delete a single neutralization attribute by ID.""" try: - service = NeutralizationPlayground( - context.user, - str(context.mandateId) if context.mandateId else "", - featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None - ) - success = service.deleteAttribute(attributeId) - - if success: - return {"message": f"Attribute {attributeId} deleted"} - else: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Attribute {attributeId} not found" - ) + return _deleteSingleNeutralizationAttribute(context, attributeId) except HTTPException: raise except Exception as e: @@ -347,6 +430,40 @@ def deleteAttribute( raise HTTPException(status_code=500, detail=str(e)) +@router.delete("/{feature_instance_id}/attributes/single/{attributeId}", response_model=Dict[str, str]) +@limiter.limit("30/minute") +def deleteAttributeScoped( + request: Request, + feature_instance_id: str = Path(..., description="Workspace / feature instance id"), + attributeId: str = Path(..., description="Attribute ID to delete"), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, str]: + """Same as DELETE /attributes/single/{attributeId}; path includes instance id for workspace UI.""" + _assertFeatureInstancePathMatchesContext(feature_instance_id, context) + try: + return _deleteSingleNeutralizationAttribute(context, attributeId) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error deleting attribute: {str(e)}") + raise HTTPException(status_code=500, detail=str(e)) + + +def _retriggerNeutralizationBody(context: RequestContext, fileId: str) -> Dict[str, str]: + if not fileId: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="fileId is required", + ) + service = NeutralizationPlayground( + context.user, + str(context.mandateId) if context.mandateId else "", + featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None, + ) + service.cleanupFileAttributes(fileId) + return {"message": f"Neutralization re-triggered for file {fileId}", "fileId": fileId} + + @router.post("/retrigger", response_model=Dict[str, str]) @limiter.limit("10/minute") def retriggerNeutralization( @@ -356,20 +473,26 @@ def retriggerNeutralization( ) -> Dict[str, str]: """Re-trigger neutralization for a specific file.""" try: - fileId = retriggerData.get("fileId", "") - if not fileId: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="fileId is required" - ) + return _retriggerNeutralizationBody(context, retriggerData.get("fileId", "")) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error re-triggering neutralization: {str(e)}") + raise HTTPException(status_code=500, detail=str(e)) - service = NeutralizationPlayground( - context.user, - str(context.mandateId) if context.mandateId else "", - featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None - ) - service.cleanupFileAttributes(fileId) - return {"message": f"Neutralization re-triggered for file {fileId}", "fileId": fileId} + +@router.post("/{feature_instance_id}/retrigger", response_model=Dict[str, str]) +@limiter.limit("10/minute") +def retriggerNeutralizationScoped( + request: Request, + feature_instance_id: str = Path(..., description="Workspace / feature instance id"), + retriggerData: Dict[str, str] = Body(...), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, str]: + """Same as POST /retrigger; path includes instance id for workspace UI compatibility.""" + _assertFeatureInstancePathMatchesContext(feature_instance_id, context) + try: + return _retriggerNeutralizationBody(context, retriggerData.get("fileId", "")) except HTTPException: raise except Exception as e: diff --git a/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py b/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py index e583c60b..4c0842d4 100644 --- a/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py +++ b/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py @@ -60,6 +60,12 @@ class NeutralizationService: mandateId=serviceCenter.mandateId or dbApp.mandateId, featureInstanceId=getattr(serviceCenter, 'featureInstanceId', None) or getattr(dbApp, 'featureInstanceId', None) ) + elif serviceCenter and getattr(serviceCenter, "user", None): + self.interfaceNeutralizer = getNeutralizerInterface( + currentUser=serviceCenter.user, + mandateId=getattr(serviceCenter, 'mandateId', None) or getattr(serviceCenter, 'mandate_id', None), + featureInstanceId=getattr(serviceCenter, 'featureInstanceId', None) or getattr(serviceCenter, 'feature_instance_id', None), + ) namesList = NamesToParse if isinstance(NamesToParse, list) else [] self.NamesToParse = namesList @@ -82,11 +88,213 @@ class NeutralizationService: # Public API: process text or file - def processText(self, text: str) -> Dict[str, Any]: - """Neutralize a raw text string and return a standard result dict.""" - result = self._neutralizeText(text, 'text') - self._persistAttributes(result.get('mapping', {}), None) - return result + _NEUT_INSTRUCTION = ( + "Analyze the following text and identify ALL sensitive content that must be neutralized:\n" + "1. Personal data (PII): names of persons, email addresses, phone numbers, " + "physical addresses, ID numbers, dates of birth, financial data (IBAN, account numbers), " + "social security numbers\n" + "2. Protected business logic: proprietary algorithms, trade secrets, confidential " + "processes, internal procedures, code snippets that reveal implementation details\n" + "3. Named entities: company names, product names, project names, brand names\n\n" + "Return ONLY a JSON array (no markdown, no explanation):\n" + '[{"text":"exact substring","type":"name|email|phone|address|id|financial|logic|company|product|location|other"}]\n\n' + "Rules:\n" + "- Every entry's 'text' must be an exact, verbatim substring of the input.\n" + "- Do NOT include generic words, common language constructs or non-sensitive terms.\n" + "- If nothing is sensitive, return [].\n\n" + ) + _BYTES_PER_TOKEN = 3 + _SELECTOR_MAX_RATIO = 0.8 + _CHUNK_SAFETY_MARGIN = 0.9 + + def _resolveNeutModel(self): + """Query the model registry for the best NEUTRALIZATION_TEXT model. + Returns the model object (with contextLength etc.) or None.""" + try: + from modules.aicore.aicoreModelRegistry import modelRegistry + from modules.aicore.aicoreModelSelector import modelSelector as _modSel + from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum + + _models = modelRegistry.getAvailableModels() + _opts = AiCallOptions(operationType=OperationTypeEnum.NEUTRALIZATION_TEXT) + _failover = _modSel.getFailoverModelList("x", "", _opts, _models) + return _failover[0] if _failover else None + except Exception as _e: + logger.warning(f"_resolveNeutModel failed: {_e}") + return None + + def _calcMaxChunkChars(self, model) -> int: + """Derive the maximum text-chunk size (in characters) from the selected + model's contextLength, mirroring the rules in aicoreModelSelector: + promptTokens = promptBytes / 3 must be <= contextLength * 0.8 + Subtract the instruction overhead and apply a safety margin.""" + if not model or getattr(model, 'contextLength', 0) <= 0: + return 5000 + _instructionBytes = len(self._NEUT_INSTRUCTION.encode('utf-8')) + 30 + _maxPromptBytes = int(model.contextLength * self._SELECTOR_MAX_RATIO * self._BYTES_PER_TOKEN) + _maxChunkChars = int((_maxPromptBytes - _instructionBytes) * self._CHUNK_SAFETY_MARGIN) + return max(_maxChunkChars, 500) + + @staticmethod + def _splitTextIntoChunks(text: str, maxChars: int) -> List[str]: + """Split *text* into chunks of at most *maxChars*, preferring paragraph + then sentence boundaries so that the LLM sees coherent blocks.""" + if len(text) <= maxChars: + return [text] + + chunks: List[str] = [] + remaining = text + while remaining: + if len(remaining) <= maxChars: + chunks.append(remaining) + break + _cut = maxChars + _para = remaining.rfind("\n\n", 0, _cut) + if _para > maxChars // 3: + _cut = _para + 2 + else: + _nl = remaining.rfind("\n", 0, _cut) + if _nl > maxChars // 3: + _cut = _nl + 1 + else: + _dot = remaining.rfind(". ", 0, _cut) + if _dot > maxChars // 3: + _cut = _dot + 2 + else: + _sp = remaining.rfind(" ", 0, _cut) + if _sp > maxChars // 3: + _cut = _sp + 1 + chunks.append(remaining[:_cut]) + remaining = remaining[_cut:] + return chunks + + async def _analyseChunk(self, aiService, chunkText: str) -> List[dict]: + """Send one chunk to the NEUTRALIZATION_TEXT model, return raw findings list.""" + from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum + + _prompt = self._NEUT_INSTRUCTION + "Text to analyze:\n---\n" + chunkText + "\n---" + _request = AiCallRequest( + prompt=_prompt, + options=AiCallOptions(operationType=OperationTypeEnum.NEUTRALIZATION_TEXT), + ) + _response = await aiService.callAi(_request) + if not _response or not getattr(_response, 'content', None): + raise RuntimeError( + "Neutralization AI call returned no response " + "(no model available for NEUTRALIZATION_TEXT?)" + ) + if getattr(_response, 'errorCount', 0) > 0 or getattr(_response, 'modelName', '') == 'error': + raise RuntimeError( + f"Neutralization AI call failed: {_response.content}" + ) + _content = _response.content.strip() + if _content.startswith("```"): + _content = _content.split("\n", 1)[-1].rsplit("```", 1)[0].strip() + try: + return json.loads(_content) + except json.JSONDecodeError: + _bracket = _content.find("[") + if _bracket >= 0: + try: + return json.loads(_content[_bracket:]) + except json.JSONDecodeError: + pass + return [] + + async def processTextAsync(self, text: str, fileId: Optional[str] = None) -> Dict[str, Any]: + """AI-powered text neutralization with automatic chunking. + + If *text* exceeds the safe token budget for the neutralization model + it is split into smaller chunks, each analysed separately. Findings + are merged and de-duplicated before placeholder replacement. + + Regex patterns run as a supplementary pass to catch anything the + model missed. + """ + import uuid as _uuid + + aiService = None + if self._getService: + try: + aiService = self._getService("ai") + except Exception: + pass + + aiMapping: Dict[str, str] = {} + + if not aiService or not hasattr(aiService, 'callAi'): + raise RuntimeError("Neutralization requires an AI service but none is available") + + if text.strip(): + _neutModel = self._resolveNeutModel() + _maxChunkChars = self._calcMaxChunkChars(_neutModel) + logger.info( + f"processTextAsync: model={getattr(_neutModel, 'name', '?')}, " + f"contextLength={getattr(_neutModel, 'contextLength', '?')} tokens, " + f"maxChunkChars={_maxChunkChars}" + ) + + _chunks = self._splitTextIntoChunks(text, _maxChunkChars) + if len(_chunks) > 1: + logger.info( + f"processTextAsync: text ({len(text)} chars) " + f"split into {len(_chunks)} chunk(s) of max {_maxChunkChars} chars" + ) + + for _chunkIdx, _chunkText in enumerate(_chunks): + _findings = await self._analyseChunk(aiService, _chunkText) + if not isinstance(_findings, list): + continue + for _f in _findings: + if not isinstance(_f, dict): + continue + _origText = _f.get("text", "") + _patType = _f.get("type", "other").lower() + if not _origText or _origText not in text: + continue + if _origText in aiMapping: + continue + _uid = str(_uuid.uuid4()) + _placeholder = f"[{_patType}.{_uid}]" + aiMapping[_origText] = _placeholder + + logger.info(f"AI neutralization found {len(aiMapping)} item(s)" + + (f" across {len(_chunks)} chunk(s)" if len(_chunks) > 1 else "")) + + neutralizedText = text + for _orig, _ph in sorted(aiMapping.items(), key=lambda x: -len(x[0])): + neutralizedText = neutralizedText.replace(_orig, _ph) + + regexMapping: Dict[str, str] = {} + finalText = neutralizedText + + allMapping = {**aiMapping, **regexMapping} + if allMapping: + _loop = asyncio.get_event_loop() + await _loop.run_in_executor( + None, self._persistAttributes, allMapping, fileId + ) + logger.debug(f"processTextAsync: {len(allMapping)} attribute(s) persisted") + + return { + 'neutralized_text': finalText, + 'mapping': allMapping, + 'attributes': [ + NeutralizationAttribute(original=k, placeholder=v) + for k, v in allMapping.items() + ], + 'processed_info': {'type': 'text', 'ai_findings': len(aiMapping), 'regex_findings': len(regexMapping)}, + } + + def processText(self, text: str, fileId: Optional[str] = None) -> Dict[str, Any]: + """Sync wrapper around processTextAsync. Propagates errors.""" + try: + return asyncio.run(self.processTextAsync(text, fileId)) + except RuntimeError as _re: + if "cannot be called from a running event loop" in str(_re): + loop = asyncio.get_event_loop() + return loop.run_until_complete(self.processTextAsync(text, fileId)) + raise def processFile(self, fileId: str) -> Dict[str, Any]: """Neutralize a file referenced by its fileId using component interface. @@ -153,8 +361,7 @@ class NeutralizationService: raise ValueError("Unable to decode file content as text.") textContent = decoded - result = self._neutralizeText(textContent, textType) - self._persistAttributes(result.get('mapping', {}), fileId) + result = self.processText(textContent, fileId) if fileName: result['neutralized_file_name'] = f"neutralized_{fileName}" result['file_id'] = fileId @@ -319,6 +526,22 @@ class NeutralizationService: return False return self.interfaceNeutralizer.deleteNeutralizationAttributes(fileId) + def getSnapshots(self): + if not self.interfaceNeutralizer: + return [] + return self.interfaceNeutralizer.getSnapshots() + + def clearSnapshots(self) -> int: + if not self.interfaceNeutralizer: + return 0 + return self.interfaceNeutralizer.clearSnapshots() + + def saveSnapshot(self, sourceLabel: str, neutralizedText: str, placeholderCount: int = 0): + if not self.interfaceNeutralizer: + logger.warning("saveSnapshot: interfaceNeutralizer is None — snapshot not stored") + return None + return self.interfaceNeutralizer.createSnapshot(sourceLabel, neutralizedText, placeholderCount) + def _persistAttributes(self, mapping: Dict[str, str], fileId: Optional[str]) -> None: """Persist mapping to DB for resolve to work. mapping: originalText -> placeholder e.g. '[email.uuid]'""" if not self.interfaceNeutralizer or not mapping: @@ -393,7 +616,7 @@ class NeutralizationService: except Exception as _imgErr: logger.warning(f"Image check failed in binary file '{fileName}': {_imgErr}, removing (fail-safe)") continue - nr = self._neutralizeText(str(data), 'text' if type_group != 'table' else 'csv') + nr = await self.processTextAsync(str(data), fileId) proc = nr.get('processed_info', {}) or {} if isinstance(proc, dict) and proc.get('type') == 'error': neutralization_error = proc.get('error', 'Neutralization failed') @@ -402,7 +625,6 @@ class NeutralizationService: all_mapping.update(mapping) new_part = {**p, 'data': neu_text} neutralized_parts.append(new_part) - self._persistAttributes(all_mapping, fileId) # 3. PDF: Use in-place only; no fallback to render if mimeType == "application/pdf": @@ -546,10 +768,31 @@ class NeutralizationService: # Helper functions + def _neutralizeTextLight(self, text: str) -> Dict[str, Any]: + """Regex-only supplementary pass using already-initialised processors. + + Unlike ``_neutralizeText`` this does **no** DB I/O + (``_reloadNamesFromConfig`` is skipped) so it is safe to call from + an async context without blocking the event-loop or risking a + DB-connection-pool deadlock during parallel document processing. + """ + try: + data, mapping, replaced_fields, processed_info = self.textProcessor.processTextContent(text) + neutralized_text = str(data) + attributes = [NeutralizationAttribute(original=k, placeholder=v) for k, v in mapping.items()] + return NeutralizationResult( + neutralized_text=neutralized_text, + mapping=mapping, + attributes=attributes, + processed_info=processed_info, + ).model_dump() + except Exception as e: + logger.warning(f"_neutralizeTextLight error: {e}") + return {'neutralized_text': text, 'mapping': {}, 'attributes': [], 'processed_info': {'type': 'error', 'error': str(e)}} + def _neutralizeText(self, text: str, textType: str = None) -> Dict[str, Any]: """Process text and return unified dict for API consumption.""" try: - # Reload names from config before processing to ensure we have the latest names self._reloadNamesFromConfig() # Auto-detect content type if not provided diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 6271a8cd..1caa9707 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -689,6 +689,9 @@ async def _runWorkspaceAgent( if allowedProviders: aiService.services.allowedProviders = allowedProviders + logger.info(f"Workspace agent: allowedProviders={allowedProviders}") + else: + logger.debug("Workspace agent: no allowedProviders in request") if requireNeutralization is not None: ctx.requireNeutralization = requireNeutralization diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 4529ede0..23de01e7 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -690,8 +690,8 @@ def _registerCoreTools(registry: ToolRegistry, services): if _fileNeedNeutralize: try: _nSvc = services.getService("neutralization") if hasattr(services, "getService") else None - if _nSvc and hasattr(_nSvc, 'processText'): - _nResult = _nSvc.processText(text) + if _nSvc and hasattr(_nSvc, 'processTextAsync'): + _nResult = await _nSvc.processTextAsync(text, fileId) if _nResult and _nResult.get("neutralized_text"): text = _nResult["neutralized_text"] logger.debug(f"readFile: neutralized text for file {fileId}") @@ -3054,7 +3054,7 @@ def _registerCoreTools(registry: ToolRegistry, services): if not neutralizationService.interfaceDbComponent: neutralizationService.interfaceDbComponent = services.chat.interfaceDbComponent if text: - result = neutralizationService.processText(text) + result = await neutralizationService.processTextAsync(text, fileId or None) else: result = neutralizationService.processFile(fileId) if result: diff --git a/modules/serviceCenter/services/serviceAi/mainServiceAi.py b/modules/serviceCenter/services/serviceAi/mainServiceAi.py index 9ff6437d..a9df1e9b 100644 --- a/modules/serviceCenter/services/serviceAi/mainServiceAi.py +++ b/modules/serviceCenter/services/serviceAi/mainServiceAi.py @@ -181,13 +181,11 @@ class AiService: _wasNeutralized = False _excludedDocs: List[str] = [] if self._shouldNeutralize(request): - request, _wasNeutralized, _excludedDocs = self._neutralizeRequest(request) + request, _wasNeutralized, _excludedDocs = await self._neutralizeRequest(request) if _excludedDocs: logger.warning(f"Neutralization partial failures (continuing): {_excludedDocs}") - # Set billing callback on aiObjects BEFORE the AI call - # This callback is invoked by _callWithModel() after EVERY individual model call - # For parallel content parts (e.g., 200 MB doc), each model call creates its own transaction + logger.debug("callAi: neutralization phase done, starting main AI call") self.aiObjects.billingCallback = self._createBillingCallback() try: @@ -229,10 +227,11 @@ class AiService: _wasNeutralized = False _excludedDocs: List[str] = [] if self._shouldNeutralize(request): - request, _wasNeutralized, _excludedDocs = self._neutralizeRequest(request) + request, _wasNeutralized, _excludedDocs = await self._neutralizeRequest(request) if _excludedDocs: logger.warning(f"Neutralization partial failures in stream (continuing): {_excludedDocs}") + logger.debug("callAiStream: neutralization phase done, starting main AI stream") self.aiObjects.billingCallback = self._createBillingCallback() try: async for chunk in self.aiObjects.callWithTextContextStream(request): @@ -557,6 +556,25 @@ detectedIntent-Werte: # NEUTRALIZATION: Centralized prompt neutralization / response rehydration # ========================================================================= + async def _hasNeutralizationModel(self) -> bool: + """Fast check: is at least one model available for NEUTRALIZATION_TEXT + given the current effective provider list? No AI call is made.""" + try: + from modules.aicore.aicoreModelRegistry import modelRegistry + from modules.aicore.aicoreModelSelector import modelSelector as _modSel + from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum + + _models = modelRegistry.getAvailableModels() + _providers = self._calculateEffectiveProviders() + if _providers: + _models = [m for m in _models if m.connectorType in _providers] + _opts = AiCallOptions(operationType=OperationTypeEnum.NEUTRALIZATION_TEXT) + _failover = _modSel.getFailoverModelList("x", "", _opts, _models) + return bool(_failover) + except Exception as _e: + logger.warning(f"_hasNeutralizationModel check failed: {_e}") + return True + def _shouldNeutralize(self, request: AiCallRequest) -> bool: """Check if this AI request should have neutralization applied. @@ -566,11 +584,17 @@ detectedIntent-Werte: 3. Per-request (request.requireNeutralization) No source can override another's True with False. + Neutralization calls themselves (NEUTRALIZATION_TEXT / NEUTRALIZATION_IMAGE) + are never re-neutralized (recursion guard). """ try: if not request.prompt and not request.messages and not request.context: return False + _opType = request.options.operationType if request.options else None + if _opType in (OperationTypeEnum.NEUTRALIZATION_TEXT, OperationTypeEnum.NEUTRALIZATION_IMAGE): + return False + _sources = [] # Source 1: Feature-Instance config @@ -600,11 +624,15 @@ detectedIntent-Werte: logger.error(f"_shouldNeutralize check failed: {e} — defaulting to False") return False - def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool, List[str]]: - """Neutralize the prompt text and messages in an AiCallRequest. + async def _neutralizeRequest(self, request: AiCallRequest) -> Tuple[AiCallRequest, bool, List[str]]: + """Neutralize the prompt text and messages in an AiCallRequest (async). Returns (modifiedRequest, wasNeutralized, excludedDocs). + Uses ``processTextAsync`` which calls AI with NEUTRALIZATION_TEXT + to identify PII, protected logic and names — then applies regex as + supplementary pass. + FAILSAFE behaviour when ``requireNeutralization is True`` (explicit): - Service unavailable → raises (caller must not send raw data to AI). - Prompt neutralization fails → raises. @@ -619,7 +647,7 @@ detectedIntent-Werte: excludedDocs: List[str] = [] neutralSvc = self._get_service("neutralization") - if not neutralSvc or not hasattr(neutralSvc, 'processText'): + if not neutralSvc or not hasattr(neutralSvc, 'processTextAsync'): if _hardMode: raise RuntimeError("Neutralization explicitly required but service unavailable — AI call BLOCKED") logger.warning("Neutralization required by config but service unavailable — continuing without neutralization") @@ -627,13 +655,25 @@ detectedIntent-Werte: return request, False, excludedDocs _wasNeutralized = False + _snapshots: list = [] + + if _hardMode: + _hasNeutModel = await self._hasNeutralizationModel() + if not _hasNeutModel: + raise RuntimeError( + "Neutralisierung ist aktiviert, aber es ist kein AI-Modell für " + "NEUTRALIZATION_TEXT verfügbar. Bitte ein Modell für Neutralisierung " + "freigeben oder die Neutralisierung deaktivieren." + ) if request.prompt: + logger.debug(f"_neutralizeRequest: neutralizing prompt ({len(request.prompt)} chars)") try: - result = neutralSvc.processText(request.prompt) + result = await neutralSvc.processTextAsync(request.prompt) if result and result.get("neutralized_text"): request.prompt = result["neutralized_text"] _wasNeutralized = True + _snapshots.append(("Prompt", result["neutralized_text"], len(result.get("mapping", {})))) logger.debug("Neutralized prompt in AiCallRequest") else: if _hardMode: @@ -649,11 +689,13 @@ detectedIntent-Werte: excludedDocs.append(f"Prompt neutralization error: {e}") if request.context: + logger.debug(f"_neutralizeRequest: neutralizing context ({len(request.context)} chars)") try: - result = neutralSvc.processText(request.context) + result = await neutralSvc.processTextAsync(request.context) if result and result.get("neutralized_text"): request.context = result["neutralized_text"] _wasNeutralized = True + _snapshots.append(("Kontext", result["neutralized_text"], len(result.get("mapping", {})))) logger.debug("Neutralized context in AiCallRequest") else: if _hardMode: @@ -668,6 +710,9 @@ detectedIntent-Werte: logger.warning(f"Neutralization of context failed: {e} — sending original context") excludedDocs.append(f"Context neutralization error: {e}") + _msgCount = len(request.messages) if request.messages and isinstance(request.messages, list) else 0 + if _msgCount: + logger.debug(f"_neutralizeRequest: neutralizing {_msgCount} message(s)") if request.messages and isinstance(request.messages, list): cleanMessages = [] for idx, msg in enumerate(request.messages): @@ -680,27 +725,33 @@ detectedIntent-Werte: cleanMessages.append(msg) continue try: - result = neutralSvc.processText(content) + result = await neutralSvc.processTextAsync(content) if result and result.get("neutralized_text"): msg["content"] = result["neutralized_text"] _wasNeutralized = True + _role = msg.get("role", "?") + _snapshots.append((f"Nachricht {idx+1} ({_role})", result["neutralized_text"], len(result.get("mapping", {})))) cleanMessages.append(msg) else: if _hardMode: - logger.warning(f"Message[{idx}] neutralization empty — REMOVING message (hard mode)") - excludedDocs.append(f"Message[{idx}] neutralization failed; message REMOVED") - else: - logger.warning(f"Neutralization of message[{idx}] returned no neutralized_text — keeping original") - excludedDocs.append(f"Message[{idx}] neutralization failed; original kept") - cleanMessages.append(msg) + raise RuntimeError( + f"Neutralisierung von Nachricht {idx+1}/{_msgCount} schlug fehl " + f"(leere Antwort). Konversation kann nicht sicher gesendet werden." + ) + logger.warning(f"Neutralization of message[{idx}] returned no neutralized_text — keeping original") + excludedDocs.append(f"Message[{idx}] neutralization failed; original kept") + cleanMessages.append(msg) + except RuntimeError: + raise except Exception as e: if _hardMode: - logger.warning(f"Message[{idx}] neutralization error — REMOVING message (hard mode): {e}") - excludedDocs.append(f"Message[{idx}] neutralization error; message REMOVED: {e}") - else: - logger.warning(f"Neutralization of message[{idx}] failed: {e} — keeping original") - excludedDocs.append(f"Message[{idx}] neutralization error: {e}") - cleanMessages.append(msg) + raise RuntimeError( + f"Neutralisierung von Nachricht {idx+1}/{_msgCount} schlug fehl: {e}. " + f"Konversation kann nicht sicher gesendet werden." + ) from e + logger.warning(f"Neutralization of message[{idx}] failed: {e} — keeping original") + excludedDocs.append(f"Message[{idx}] neutralization error: {e}") + cleanMessages.append(msg) elif isinstance(content, list): _cleanParts = [] for _partIdx, _part in enumerate(content): @@ -710,23 +761,29 @@ detectedIntent-Werte: _partType = _part.get("type", "") if _partType == "text" and _part.get("text"): try: - _result = neutralSvc.processText(_part["text"]) + _result = await neutralSvc.processTextAsync(_part["text"]) if _result and _result.get("neutralized_text"): _part["text"] = _result["neutralized_text"] _wasNeutralized = True + _role = msg.get("role", "?") + _snapshots.append((f"Nachricht {idx+1}.{_partIdx+1} ({_role})", _result["neutralized_text"], len(_result.get("mapping", {})))) _cleanParts.append(_part) else: if _hardMode: - logger.warning(f"Message[{idx}].content[{_partIdx}] text neutralization empty — REMOVING part") - excludedDocs.append(f"Message[{idx}].content[{_partIdx}] text removed") - else: - _cleanParts.append(_part) + raise RuntimeError( + f"Neutralisierung von Nachricht {idx+1}, Teil {_partIdx+1} " + f"schlug fehl (leere Antwort)." + ) + _cleanParts.append(_part) + except RuntimeError: + raise except Exception as e: if _hardMode: - logger.warning(f"Message[{idx}].content[{_partIdx}] text neutralization error — REMOVING: {e}") - excludedDocs.append(f"Message[{idx}].content[{_partIdx}] text error: {e}") - else: - _cleanParts.append(_part) + raise RuntimeError( + f"Neutralisierung von Nachricht {idx+1}, Teil {_partIdx+1} " + f"schlug fehl: {e}" + ) from e + _cleanParts.append(_part) elif _partType == "image_url": if _hardMode: logger.warning(f"Message[{idx}].content[{_partIdx}] image_url — REMOVING (neutralization active)") @@ -738,12 +795,12 @@ detectedIntent-Werte: if _cleanParts: msg["content"] = _cleanParts cleanMessages.append(msg) - elif _hardMode: - logger.warning(f"Message[{idx}] all parts removed — REMOVING message") - excludedDocs.append(f"Message[{idx}] fully removed after neutralization") + else: + cleanMessages.append(msg) else: cleanMessages.append(msg) request.messages = cleanMessages + logger.debug(f"_neutralizeRequest: messages done, {len(cleanMessages)} kept of {_msgCount}") if hasattr(request, 'contentParts') and request.contentParts: _cleanParts = [] @@ -752,10 +809,11 @@ detectedIntent-Werte: _data = getattr(_cp, 'data', '') or '' if _tg in ('text', 'table') and _data: try: - _result = neutralSvc.processText(str(_data)) + _result = await neutralSvc.processTextAsync(str(_data)) if _result and _result.get("neutralized_text"): _cp.data = _result["neutralized_text"] _wasNeutralized = True + _snapshots.append((f"Inhalt {_cpIdx+1} ({_tg})", _result["neutralized_text"], len(_result.get("mapping", {})))) _cleanParts.append(_cp) else: if _hardMode: @@ -778,7 +836,18 @@ detectedIntent-Werte: else: _cleanParts.append(_cp) request.contentParts = _cleanParts + logger.debug(f"_neutralizeRequest: contentParts done, {len(_cleanParts)} kept") + if _snapshots and _wasNeutralized: + try: + neutralSvc.clearSnapshots() + for _label, _text, _phCount in _snapshots: + neutralSvc.saveSnapshot(_label, _text, _phCount) + logger.debug(f"_neutralizeRequest: saved {len(_snapshots)} snapshot(s)") + except Exception as _snapErr: + logger.warning(f"_neutralizeRequest: could not save snapshots: {_snapErr}") + + logger.info(f"_neutralizeRequest complete: neutralized={_wasNeutralized}, excluded={len(excludedDocs)}") return request, _wasNeutralized, excludedDocs def _rehydrateResponse(self, responseText: str) -> str: diff --git a/modules/serviceCenter/services/serviceGeneration/renderers/registry.py b/modules/serviceCenter/services/serviceGeneration/renderers/registry.py index adb83275..553c16a1 100644 --- a/modules/serviceCenter/services/serviceGeneration/renderers/registry.py +++ b/modules/serviceCenter/services/serviceGeneration/renderers/registry.py @@ -11,6 +11,7 @@ import logging import importlib from typing import Dict, Type, List, Optional, Tuple from .documentRendererBaseTemplate import BaseRenderer +from .codeRendererBaseTemplate import BaseCodeRenderer logger = logging.getLogger(__name__) @@ -52,9 +53,9 @@ class RendererRegistry: for attrName in dir(module): attr = getattr(module, attrName) - if (isinstance(attr, type) and - issubclass(attr, BaseRenderer) and - attr != BaseRenderer and + if (isinstance(attr, type) and + issubclass(attr, BaseRenderer) and + attr not in (BaseRenderer, BaseCodeRenderer) and hasattr(attr, 'getSupportedFormats')): self._registerRendererClass(attr) @@ -72,6 +73,8 @@ class RendererRegistry: """Register a renderer class keyed by (format, outputStyle).""" try: supportedFormats = rendererClass.getSupportedFormats() + if not supportedFormats: + return priority = rendererClass.getPriority() if hasattr(rendererClass, 'getPriority') else 0 for formatName in supportedFormats: diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py index 49774a38..9404a567 100644 --- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py +++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py @@ -158,7 +158,7 @@ class KnowledgeService: if not _textContent: continue try: - _neutralResult = _neutralSvc.processText(_textContent) + _neutralResult = await _neutralSvc.processTextAsync(_textContent, fileId) if _neutralResult and _neutralResult.get("neutralized_text"): _obj["data"] = _neutralResult["neutralized_text"] _neutralizedObjects.append(_obj) diff --git a/modules/workflows/methods/methodContext/actions/neutralizeData.py b/modules/workflows/methods/methodContext/actions/neutralizeData.py index bd032cac..b0fc5c24 100644 --- a/modules/workflows/methods/methodContext/actions/neutralizeData.py +++ b/modules/workflows/methods/methodContext/actions/neutralizeData.py @@ -169,7 +169,7 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult: f"Neutralizing part {len(neutralizedParts) + 1} of document {i+1}" ) - neutralizationResult = self.services.neutralization.processText(part.data) + neutralizationResult = await self.services.neutralization.processTextAsync(part.data) if neutralizationResult and 'neutralized_text' in neutralizationResult: neutralizedData = neutralizationResult['neutralized_text'] From a787cdf6bf7a6c0b036387d6009f9f30183d6891 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Mon, 30 Mar 2026 23:03:36 +0200 Subject: [PATCH 15/33] fixed onboarding flow --- modules/datamodels/datamodelSubscription.py | 1 + modules/features/automation/mainAutomation.py | 2 +- .../features/automation2/mainAutomation2.py | 19 +- modules/features/commcoach/mainCommcoach.py | 34 +- .../commcoach/tests/test_mainCommcoach.py | 2 +- .../neutralization/mainNeutralization.py | 37 +- modules/features/realEstate/mainRealEstate.py | 53 ++- .../teamsbot/interfaceFeatureTeamsbot.py | 15 - modules/features/teamsbot/mainTeamsbot.py | 22 +- modules/features/trustee/mainTrustee.py | 49 +- modules/interfaces/interfaceDbApp.py | 63 ++- modules/interfaces/interfaceDbBilling.py | 11 - modules/interfaces/interfaceDbManagement.py | 23 +- modules/routes/routeDataUsers.py | 39 +- modules/routes/routeInvitations.py | 51 +-- modules/routes/routeSecurityLocal.py | 424 ++++++++++++------ modules/routes/routeStore.py | 14 +- .../services/serviceChat/mainServiceChat.py | 12 +- .../mainServiceGeneration.py | 2 +- 19 files changed, 550 insertions(+), 323 deletions(-) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index c5547c0a..8fcf10f2 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -31,6 +31,7 @@ OPERATIVE_STATUSES = {SubscriptionStatusEnum.ACTIVE, SubscriptionStatusEnum.TRIA ALLOWED_TRANSITIONS = { (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.ACTIVE), + (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.TRIALING), (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.SCHEDULED), (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.EXPIRED), (SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE), diff --git a/modules/features/automation/mainAutomation.py b/modules/features/automation/mainAutomation.py index 4bb30f7f..d56804fd 100644 --- a/modules/features/automation/mainAutomation.py +++ b/modules/features/automation/mainAutomation.py @@ -227,7 +227,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, # Automatically create instance in root mandate during bootstrap + "autoCreateInstance": False, } diff --git a/modules/features/automation2/mainAutomation2.py b/modules/features/automation2/mainAutomation2.py index 08038e68..c0bee3fe 100644 --- a/modules/features/automation2/mainAutomation2.py +++ b/modules/features/automation2/mainAutomation2.py @@ -60,12 +60,25 @@ RESOURCE_OBJECTS = [ ] TEMPLATE_ROLES = [ + { + "roleLabel": "automation2-viewer", + "description": { + "en": "Automation2 Viewer - View workflows (read-only)", + "de": "Automation2 Betrachter - Workflows ansehen (nur lesen)", + "fr": "Visualiseur Automation2 - Consulter les workflows (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.automation2.workflows", "view": True}, + {"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, { "roleLabel": "automation2-user", "description": { "en": "Automation2 User - Use automation2 flow builder", "de": "Automation2 Benutzer - Flow-Builder nutzen", - "fr": "Utilisateur Automation2 - Utiliser le flow builder" + "fr": "Utilisateur Automation2 - Utiliser le flow builder", }, "accessRules": [ {"context": "UI", "item": "ui.feature.automation2.editor", "view": True}, @@ -75,7 +88,7 @@ TEMPLATE_ROLES = [ {"context": "RESOURCE", "item": "resource.feature.automation2.node-types", "view": True}, {"context": "RESOURCE", "item": "resource.feature.automation2.execute", "view": True}, {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, - ] + ], }, { "roleLabel": "automation2-admin", @@ -188,7 +201,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, + "autoCreateInstance": False, } diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py index e8abcee8..9d949e13 100644 --- a/modules/features/commcoach/mainCommcoach.py +++ b/modules/features/commcoach/mainCommcoach.py @@ -109,12 +109,27 @@ RESOURCE_OBJECTS = [ ] TEMPLATE_ROLES = [ + { + "roleLabel": "commcoach-viewer", + "description": { + "en": "Communication Coach Viewer - View coaching data (read-only)", + "de": "Kommunikations-Coach Betrachter - Coaching-Daten ansehen (nur lesen)", + "fr": "Visualiseur Coach Communication - Consulter les donnees coaching (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.commcoach.dashboard", "view": True}, + {"context": "UI", "item": "ui.feature.commcoach.coaching", "view": True}, + {"context": "UI", "item": "ui.feature.commcoach.dossier", "view": True}, + {"context": "UI", "item": "ui.feature.commcoach.settings", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, { "roleLabel": "commcoach-user", "description": { "en": "Communication Coach User - Can manage own coaching contexts and sessions", "de": "Kommunikations-Coach Benutzer - Kann eigene Coaching-Kontexte und Sessions verwalten", - "fr": "Utilisateur Coach Communication - Peut gerer ses propres contextes et sessions" + "fr": "Utilisateur Coach Communication - Peut gerer ses propres contextes et sessions", }, "accessRules": [ {"context": "UI", "item": "ui.feature.commcoach.dashboard", "view": True}, @@ -132,7 +147,20 @@ TEMPLATE_ROLES = [ {"context": "RESOURCE", "item": "resource.feature.commcoach.session.start", "view": True}, {"context": "RESOURCE", "item": "resource.feature.commcoach.session.complete", "view": True}, {"context": "RESOURCE", "item": "resource.feature.commcoach.task.manage", "view": True}, - ] + ], + }, + { + "roleLabel": "commcoach-admin", + "description": { + "en": "Communication Coach Admin - All UI and API actions; data scoped to own records", + "de": "Kommunikations-Coach Admin - Alle UI- und API-Aktionen; Daten nur eigene Datensaetze", + "fr": "Administrateur Coach Communication - Toute l'UI et les API; donnees propres", + }, + "accessRules": [ + {"context": "UI", "item": None, "view": True}, + {"context": "RESOURCE", "item": None, "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, + ], }, ] @@ -142,7 +170,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, + "autoCreateInstance": False, } diff --git a/modules/features/commcoach/tests/test_mainCommcoach.py b/modules/features/commcoach/tests/test_mainCommcoach.py index 85d85cf6..6be563b6 100644 --- a/modules/features/commcoach/tests/test_mainCommcoach.py +++ b/modules/features/commcoach/tests/test_mainCommcoach.py @@ -31,7 +31,7 @@ class TestFeatureDefinition: assert defn["code"] == "commcoach" assert "label" in defn assert "icon" in defn - assert defn["autoCreateInstance"] is True + assert defn["autoCreateInstance"] is False class TestRbacObjects: diff --git a/modules/features/neutralization/mainNeutralization.py b/modules/features/neutralization/mainNeutralization.py index d32b441f..bfe97a13 100644 --- a/modules/features/neutralization/mainNeutralization.py +++ b/modules/features/neutralization/mainNeutralization.py @@ -45,34 +45,55 @@ RESOURCE_OBJECTS = [ # Template roles for this feature TEMPLATE_ROLES = [ + { + "roleLabel": "neutralization-viewer", + "description": { + "en": "Neutralization Viewer - View neutralization data (read-only)", + "de": "Neutralisierungs-Betrachter - Neutralisierungsdaten einsehen (nur lesen)", + "fr": "Visualiseur neutralisation - Consulter les données de neutralisation (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.neutralization.playground", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, + { + "roleLabel": "neutralization-user", + "description": { + "en": "Neutralization User - Use neutralization tools and manage own data", + "de": "Neutralisierungs-Benutzer - Neutralisierungstools nutzen und eigene Daten verwalten", + "fr": "Utilisateur neutralisation - Utiliser les outils et gérer ses propres données", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.neutralization.playground", "view": True}, + {"context": "UI", "item": "ui.feature.neutralization.attributes", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"}, + ], + }, { "roleLabel": "neutralization-admin", "description": { "en": "Neutralization Administrator - Full access to neutralization settings and data", "de": "Neutralisierungs-Administrator - Vollzugriff auf Neutralisierungs-Einstellungen und Daten", - "fr": "Administrateur neutralisation - Accès complet aux paramètres et données" + "fr": "Administrateur neutralisation - Accès complet aux paramètres et données", }, "accessRules": [ - # Full UI access (all views including admin views) {"context": "UI", "item": None, "view": True}, - # Full DATA access {"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"}, - ] + ], }, { "roleLabel": "neutralization-analyst", "description": { "en": "Neutralization Analyst - Analyze and process neutralization data", "de": "Neutralisierungs-Analyst - Neutralisierungsdaten analysieren und verarbeiten", - "fr": "Analyste neutralisation - Analyser et traiter les données de neutralisation" + "fr": "Analyste neutralisation - Analyser et traiter les données de neutralisation", }, "accessRules": [ - # UI access to specific views - vollqualifizierte ObjectKeys {"context": "UI", "item": "ui.feature.neutralization.playground", "view": True}, {"context": "UI", "item": "ui.feature.neutralization.attributes", "view": True}, - # Group-level DATA access (read-only for sensitive config) {"context": "DATA", "item": None, "view": True, "read": "g", "create": "n", "update": "n", "delete": "n"}, - ] + ], }, ] diff --git a/modules/features/realEstate/mainRealEstate.py b/modules/features/realEstate/mainRealEstate.py index 2ae2378b..dfe310d5 100644 --- a/modules/features/realEstate/mainRealEstate.py +++ b/modules/features/realEstate/mainRealEstate.py @@ -39,52 +39,57 @@ RESOURCE_OBJECTS = [ # Template roles for this feature with AccessRules # IMPORTANT: item uses vollqualifizierte ObjectKeys (gemäss Navigation-API-Konzept) TEMPLATE_ROLES = [ + { + "roleLabel": "realestate-viewer", + "description": { + "en": "Real Estate Viewer - View property information (read-only)", + "de": "Immobilien-Betrachter - Immobilien-Informationen einsehen (nur lesen)", + "fr": "Visualiseur immobilier - Consulter les informations immobilières (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, + { + "roleLabel": "realestate-user", + "description": { + "en": "Real Estate User - Create and manage own property records", + "de": "Immobilien-Benutzer - Eigene Immobilien-Daten erstellen und verwalten", + "fr": "Utilisateur immobilier - Créer et gérer ses propres données immobilières", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"}, + {"context": "RESOURCE", "item": "resource.feature.realestate.project.create", "view": True}, + ], + }, { "roleLabel": "realestate-admin", "description": { "en": "Real Estate Administrator - Full access to all property data and settings", "de": "Immobilien-Administrator - Vollzugriff auf alle Immobiliendaten und Einstellungen", - "fr": "Administrateur immobilier - Accès complet aux données et paramètres" + "fr": "Administrateur immobilier - Accès complet aux données et paramètres", }, "accessRules": [ - # Full UI access (all views including admin views) {"context": "UI", "item": None, "view": True}, - # Full DATA access {"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"}, - # Admin resources {"context": "RESOURCE", "item": "resource.feature.realestate.project.create", "view": True}, {"context": "RESOURCE", "item": "resource.feature.realestate.project.delete", "view": True}, - ] + ], }, { "roleLabel": "realestate-manager", "description": { "en": "Real Estate Manager - Manage properties and tenants", "de": "Immobilien-Verwalter - Immobilien und Mieter verwalten", - "fr": "Gestionnaire immobilier - Gérer les propriétés et locataires" + "fr": "Gestionnaire immobilier - Gérer les propriétés et locataires", }, "accessRules": [ - # UI access to map view {"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True}, - # Group-level DATA access {"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "g"}, - # Resource: create projects {"context": "RESOURCE", "item": "resource.feature.realestate.project.create", "view": True}, - ] - }, - { - "roleLabel": "realestate-viewer", - "description": { - "en": "Real Estate Viewer - View property information", - "de": "Immobilien-Betrachter - Immobilien-Informationen einsehen", - "fr": "Visualiseur immobilier - Consulter les informations immobilières" - }, - "accessRules": [ - # UI access to map view (read-only) - {"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True}, - # Read-only DATA access (my records) - {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, - ] + ], }, ] diff --git a/modules/features/teamsbot/interfaceFeatureTeamsbot.py b/modules/features/teamsbot/interfaceFeatureTeamsbot.py index 9be96393..4d6519d8 100644 --- a/modules/features/teamsbot/interfaceFeatureTeamsbot.py +++ b/modules/features/teamsbot/interfaceFeatureTeamsbot.py @@ -10,7 +10,6 @@ from typing import Dict, Any, List, Optional from modules.datamodels.datamodelUam import User from modules.connectors.connectorDbPostgre import DatabaseConnector -from modules.shared.timeUtils import getIsoTimestamp from modules.shared.configuration import APP_CONFIG from .datamodelTeamsbot import ( @@ -104,13 +103,10 @@ class TeamsbotObjects: def createSession(self, sessionData: Dict[str, Any]) -> Dict[str, Any]: """Create a new session.""" - sessionData["creationDate"] = getIsoTimestamp() - sessionData["lastModified"] = getIsoTimestamp() return self.db.recordCreate(TeamsbotSession, sessionData) def updateSession(self, sessionId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Update session fields.""" - updates["lastModified"] = getIsoTimestamp() return self.db.recordModify(TeamsbotSession, sessionId, updates) def deleteSession(self, sessionId: str) -> bool: @@ -149,7 +145,6 @@ class TeamsbotObjects: def createTranscript(self, transcriptData: Dict[str, Any]) -> Dict[str, Any]: """Create a new transcript segment.""" - transcriptData["creationDate"] = getIsoTimestamp() return self.db.recordCreate(TeamsbotTranscript, transcriptData) def updateTranscript(self, transcriptId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: @@ -180,7 +175,6 @@ class TeamsbotObjects: def createBotResponse(self, responseData: Dict[str, Any]) -> Dict[str, Any]: """Create a new bot response record.""" - responseData["creationDate"] = getIsoTimestamp() return self.db.recordCreate(TeamsbotBotResponse, responseData) def _deleteResponsesBySession(self, sessionId: str) -> int: @@ -216,13 +210,10 @@ class TeamsbotObjects: def createSystemBot(self, botData: Dict[str, Any]) -> Dict[str, Any]: """Create a new system bot account.""" - botData["creationDate"] = getIsoTimestamp() - botData["lastModified"] = getIsoTimestamp() return self.db.recordCreate(TeamsbotSystemBot, botData) def updateSystemBot(self, botId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Update a system bot account.""" - updates["lastModified"] = getIsoTimestamp() return self.db.recordModify(TeamsbotSystemBot, botId, updates) def deleteSystemBot(self, botId: str) -> bool: @@ -243,13 +234,10 @@ class TeamsbotObjects: def createUserSettings(self, settingsData: Dict[str, Any]) -> Dict[str, Any]: """Create user settings.""" - settingsData["creationDate"] = getIsoTimestamp() - settingsData["lastModified"] = getIsoTimestamp() return self.db.recordCreate(TeamsbotUserSettings, settingsData) def updateUserSettings(self, settingsId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Update user settings.""" - updates["lastModified"] = getIsoTimestamp() return self.db.recordModify(TeamsbotUserSettings, settingsId, updates) def deleteUserSettings(self, settingsId: str) -> bool: @@ -270,13 +258,10 @@ class TeamsbotObjects: def createUserAccount(self, data: Dict[str, Any]) -> Dict[str, Any]: """Create saved MS credentials.""" - data["creationDate"] = getIsoTimestamp() - data["lastModified"] = getIsoTimestamp() return self.db.recordCreate(TeamsbotUserAccount, data) def updateUserAccount(self, accountId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Update saved MS credentials.""" - updates["lastModified"] = getIsoTimestamp() return self.db.recordModify(TeamsbotUserAccount, accountId, updates) def deleteUserAccount(self, accountId: str) -> bool: diff --git a/modules/features/teamsbot/mainTeamsbot.py b/modules/features/teamsbot/mainTeamsbot.py index 97cc107e..afdce822 100644 --- a/modules/features/teamsbot/mainTeamsbot.py +++ b/modules/features/teamsbot/mainTeamsbot.py @@ -103,25 +103,35 @@ TEMPLATE_ROLES = [ {"context": "RESOURCE", "item": "resource.feature.teamsbot.config.edit", "view": True}, ] }, + { + "roleLabel": "teamsbot-viewer", + "description": { + "en": "Teams Bot Viewer - View sessions and transcripts (read-only)", + "de": "Teams Bot Betrachter - Sitzungen und Transkripte ansehen (nur lesen)", + "fr": "Visualiseur Teams Bot - Consulter les sessions et transcriptions (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.teamsbot.dashboard", "view": True}, + {"context": "UI", "item": "ui.feature.teamsbot.sessions", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, { "roleLabel": "teamsbot-user", "description": { "en": "Teams Bot User - Can start/stop sessions and view transcripts", "de": "Teams Bot Benutzer - Kann Sitzungen starten/stoppen und Transkripte einsehen", - "fr": "Utilisateur Teams Bot - Peut démarrer/arrêter des sessions et voir les transcriptions" + "fr": "Utilisateur Teams Bot - Peut démarrer/arrêter des sessions et voir les transcriptions", }, "accessRules": [ - # UI access to dashboard and sessions (not settings) {"context": "UI", "item": "ui.feature.teamsbot.dashboard", "view": True}, {"context": "UI", "item": "ui.feature.teamsbot.sessions", "view": True}, - # Own records only {"context": "DATA", "item": "data.feature.teamsbot.TeamsbotSession", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"}, {"context": "DATA", "item": "data.feature.teamsbot.TeamsbotTranscript", "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, {"context": "DATA", "item": "data.feature.teamsbot.TeamsbotBotResponse", "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, - # Start and stop sessions {"context": "RESOURCE", "item": "resource.feature.teamsbot.session.start", "view": True}, {"context": "RESOURCE", "item": "resource.feature.teamsbot.session.stop", "view": True}, - ] + ], }, ] @@ -132,7 +142,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, + "autoCreateInstance": False, } diff --git a/modules/features/trustee/mainTrustee.py b/modules/features/trustee/mainTrustee.py index 606da308..45824b1b 100644 --- a/modules/features/trustee/mainTrustee.py +++ b/modules/features/trustee/mainTrustee.py @@ -170,60 +170,81 @@ RESOURCE_OBJECTS = [ # Note: UI item=None means ALL views, specific items restrict to named views # IMPORTANT: item uses vollqualifizierte ObjectKeys (gemäss Navigation-API-Konzept) TEMPLATE_ROLES = [ + { + "roleLabel": "trustee-viewer", + "description": { + "en": "Trustee Viewer - View trustee data (read-only)", + "de": "Treuhand-Betrachter - Treuhand-Daten einsehen (nur lesen)", + "fr": "Visualiseur fiduciaire - Consulter les données fiduciaires (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True}, + {"context": "UI", "item": "ui.feature.trustee.positions", "view": True}, + {"context": "UI", "item": "ui.feature.trustee.documents", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, + { + "roleLabel": "trustee-user", + "description": { + "en": "Trustee User - Create and manage own trustee records", + "de": "Treuhand-Benutzer - Eigene Treuhand-Daten erstellen und verwalten", + "fr": "Utilisateur fiduciaire - Créer et gérer ses propres données fiduciaires", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True}, + {"context": "UI", "item": "ui.feature.trustee.positions", "view": True}, + {"context": "UI", "item": "ui.feature.trustee.documents", "view": True}, + {"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"}, + ], + }, { "roleLabel": "trustee-admin", "description": { "en": "Trustee Administrator - Full access to all trustee data and settings", "de": "Treuhand-Administrator - Vollzugriff auf alle Treuhand-Daten und Einstellungen", - "fr": "Administrateur fiduciaire - Accès complet aux données et paramètres fiduciaires" + "fr": "Administrateur fiduciaire - Accès complet aux données et paramètres fiduciaires", }, "accessRules": [ - # Full UI access (all views including admin views) {"context": "UI", "item": None, "view": True}, - # Full DATA access {"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"}, - # Admin resource: manage instance roles {"context": "RESOURCE", "item": "resource.feature.trustee.instance-roles.manage", "view": True}, - ] + ], }, { "roleLabel": "trustee-accountant", "description": { "en": "Trustee Accountant - Manage accounting and financial data", "de": "Treuhand-Buchhalter - Buchhaltungs- und Finanzdaten verwalten", - "fr": "Comptable fiduciaire - Gérer les données comptables et financières" + "fr": "Comptable fiduciaire - Gérer les données comptables et financières", }, "accessRules": [ - # UI access to main views (not admin views, not expense-import) - vollqualifizierte ObjectKeys {"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True}, {"context": "UI", "item": "ui.feature.trustee.positions", "view": True}, {"context": "UI", "item": "ui.feature.trustee.documents", "view": True}, {"context": "UI", "item": "ui.feature.trustee.settings", "view": True}, - # Group-level DATA access {"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "g"}, - # Accounting sync permission {"context": "RESOURCE", "item": "resource.feature.trustee.accounting.sync", "view": True}, {"context": "RESOURCE", "item": "resource.feature.trustee.accounting.view", "view": True}, - ] + ], }, { "roleLabel": "trustee-client", "description": { "en": "Trustee Client - View own accounting data and documents", "de": "Treuhand-Kunde - Eigene Buchhaltungsdaten und Dokumente einsehen", - "fr": "Client fiduciaire - Consulter ses propres données comptables et documents" + "fr": "Client fiduciaire - Consulter ses propres données comptables et documents", }, "accessRules": [ - # UI access to main views + expense-import - vollqualifizierte ObjectKeys {"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True}, {"context": "UI", "item": "ui.feature.trustee.positions", "view": True}, {"context": "UI", "item": "ui.feature.trustee.documents", "view": True}, {"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True}, {"context": "UI", "item": "ui.feature.trustee.scan-upload", "view": True}, - # Own records only (MY level) {"context": "DATA", "item": "data.feature.trustee.TrusteePosition", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"}, {"context": "DATA", "item": "data.feature.trustee.TrusteeDocument", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"}, - ] + ], }, ] diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index ffde890f..d980eb56 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1446,7 +1446,7 @@ class AppObjects: if not adminRoleId: raise ValueError(f"No admin role found for mandate {mandateId} — cannot assign user without role") - self.createUserMandate(userId, mandateId, roleIds=[adminRoleId]) + self.createUserMandate(userId, mandateId, roleIds=[adminRoleId], skipCapacityCheck=True) subscription = MandateSubscription( mandateId=mandateId, @@ -1454,8 +1454,10 @@ class AppObjects: status=SubscriptionStatusEnum.PENDING, ) if plan.trialDays: - pass # trialEndsAt set on ACTIVE transition - self.db.recordCreate(MandateSubscription, subscription.model_dump()) + pass # trialEndsAt set on ACTIVE/TRIALING transition + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + subInterface = _getSubRoot() + subInterface.createSubscription(subscription) featureInterface = getFeatureInterface(self.db) mainModules = loadFeatureMainModules() @@ -1513,50 +1515,58 @@ class AppObjects: """ Activate PENDING subscriptions for all mandates where this user is a member. Called on login — trial period begins NOW, not at registration. + Uses the subscription interface (poweron_billing) for all subscription operations. Returns number of activated subscriptions. """ from modules.datamodels.datamodelSubscription import ( - MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS, + SubscriptionStatusEnum, BUILTIN_PLANS, ) + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot from datetime import datetime, timezone, timedelta activated = 0 + subInterface = _getSubRoot() + userMandates = self.db.getRecordset( UserMandate, recordFilter={"userId": userId, "enabled": True} ) for um in userMandates: mandateId = um.get("mandateId") - subs = self.db.getRecordset( - MandateSubscription, - recordFilter={"mandateId": mandateId, "status": SubscriptionStatusEnum.PENDING.value} - ) - for sub in subs: + allSubs = subInterface.listForMandate(mandateId) + pendingSubs = [s for s in allSubs if s.get("status") == SubscriptionStatusEnum.PENDING.value] + + for sub in pendingSubs: subId = sub.get("id") planKey = sub.get("planKey") plan = BUILTIN_PLANS.get(planKey) now = datetime.now(timezone.utc) - updateData = { - "status": SubscriptionStatusEnum.TRIALING.value if plan and plan.trialDays else SubscriptionStatusEnum.ACTIVE.value, + targetStatus = SubscriptionStatusEnum.TRIALING if plan and plan.trialDays else SubscriptionStatusEnum.ACTIVE + additionalData = { "currentPeriodStart": now.isoformat(), } if plan and plan.trialDays: trialEnd = now + timedelta(days=plan.trialDays) - updateData["trialEndsAt"] = trialEnd.isoformat() - updateData["currentPeriodEnd"] = trialEnd.isoformat() + additionalData["trialEndsAt"] = trialEnd.isoformat() + additionalData["currentPeriodEnd"] = trialEnd.isoformat() elif plan and plan.billingPeriod: from modules.datamodels.datamodelSubscription import BillingPeriodEnum if plan.billingPeriod == BillingPeriodEnum.MONTHLY: - updateData["currentPeriodEnd"] = (now + timedelta(days=30)).isoformat() + additionalData["currentPeriodEnd"] = (now + timedelta(days=30)).isoformat() elif plan.billingPeriod == BillingPeriodEnum.YEARLY: - updateData["currentPeriodEnd"] = (now + timedelta(days=365)).isoformat() + additionalData["currentPeriodEnd"] = (now + timedelta(days=365)).isoformat() try: - self.db.recordModify(MandateSubscription, subId, updateData) + subInterface.transitionStatus( + subId, + expectedFromStatus=SubscriptionStatusEnum.PENDING, + toStatus=targetStatus, + additionalData=additionalData, + ) activated += 1 - logger.info(f"Activated subscription {subId} (plan={planKey}) for mandate {mandateId}: {updateData.get('status')}") + logger.info(f"Activated subscription {subId} (plan={planKey}) for mandate {mandateId}: {targetStatus.value}") except Exception as e: logger.error(f"Failed to activate subscription {subId}: {e}") @@ -1848,7 +1858,7 @@ class AppObjects: logger.error(f"Error getting UserMandates: {e}") return [] - def createUserMandate(self, userId: str, mandateId: str, roleIds: List[str] = None) -> UserMandate: + def createUserMandate(self, userId: str, mandateId: str, roleIds: List[str] = None, *, skipCapacityCheck: bool = False) -> UserMandate: """ Create a UserMandate record (add user to mandate). Also creates a billing audit account for the user if billing is configured. @@ -1859,6 +1869,8 @@ class AppObjects: userId: User ID mandateId: Mandate ID roleIds: List of role IDs to assign (at least one required) + skipCapacityCheck: If True, skip subscription capacity check (used during initial provisioning + when the subscription hasn't been created yet) Returns: Created UserMandate object @@ -1871,7 +1883,8 @@ class AppObjects: if existing: raise ValueError(f"User {userId} is already member of mandate {mandateId}") - self._checkSubscriptionCapacity(mandateId, "users", delta=1) + if not skipCapacityCheck: + self._checkSubscriptionCapacity(mandateId, "users", delta=1) userMandate = UserMandate( userId=userId, @@ -2551,6 +2564,18 @@ class AppObjects: logger.error(f"Error getting invitations for target username {targetUsername}: {e}") return [] + def getInvitationsByEmail(self, email: str) -> List[Invitation]: + """Get all invitations for a target email address (email-only invitations).""" + try: + records = self.db.getRecordset(Invitation, recordFilter={"email": email}) + result = [] + for record in records: + result.append(Invitation(**dict(record))) + return result + except Exception as e: + logger.error(f"Error getting invitations for email {email}: {e}") + return [] + # ============================================ # Additional Helper Methods # ============================================ diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index bb2dc5c9..948f8918 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -586,17 +586,6 @@ class BillingObjects: # Create transaction record (always on transaction.accountId for audit) transactionDict = transaction.model_dump(exclude_none=True) - ts = getUtcTimestamp() - uid = str(self.userId) if self.userId else None - if transactionDict.get("sysCreatedAt") is None: - transactionDict["sysCreatedAt"] = ts - if transactionDict.get("sysModifiedAt") is None: - transactionDict["sysModifiedAt"] = ts - if uid: - if transactionDict.get("sysCreatedBy") is None: - transactionDict["sysCreatedBy"] = uid - if transactionDict.get("sysModifiedBy") is None: - transactionDict["sysModifiedBy"] = uid created = self.db.recordCreate(BillingTransaction, transactionDict) # Update balance on the target account diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py index 0a16b734..2df85164 100644 --- a/modules/interfaces/interfaceDbManagement.py +++ b/modules/interfaces/interfaceDbManagement.py @@ -823,7 +823,7 @@ class ComponentObjects: mimeType=file["mimeType"], fileHash=file["fileHash"], fileSize=file["fileSize"], - creationDate=file["creationDate"] + sysCreatedAt=file.get("sysCreatedAt") or file.get("creationDate"), ) def getMimeType(self, fileName: str) -> str: @@ -928,9 +928,11 @@ class ComponentObjects: fileItems = [] for file in files: try: - creationDate = file.get("creationDate") - if creationDate is None or not isinstance(creationDate, (int, float)) or creationDate <= 0: - file["creationDate"] = getUtcTimestamp() + sysCreatedAt = file.get("sysCreatedAt") or file.get("creationDate") + if sysCreatedAt is None or not isinstance(sysCreatedAt, (int, float)) or sysCreatedAt <= 0: + file["sysCreatedAt"] = getUtcTimestamp() + else: + file["sysCreatedAt"] = sysCreatedAt fileName = file.get("fileName") if not fileName or fileName == "None": @@ -977,20 +979,19 @@ class ComponentObjects: file = filteredFiles[0] try: - # Get creation date from record or use current time - creationDate = file.get("creationDate") - if not creationDate: - creationDate = getUtcTimestamp() - + sysCreatedAt = file.get("sysCreatedAt") or file.get("creationDate") + if not sysCreatedAt: + sysCreatedAt = getUtcTimestamp() + return FileItem( id=file.get("id"), mandateId=file.get("mandateId"), + featureInstanceId=file.get("featureInstanceId", ""), fileName=file.get("fileName"), mimeType=file.get("mimeType"), - workflowId=file.get("workflowId"), fileHash=file.get("fileHash"), fileSize=file.get("fileSize"), - creationDate=creationDate + sysCreatedAt=sysCreatedAt, ) except Exception as e: logger.error(f"Error converting file record: {str(e)}") diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py index a1da658b..7cce66ca 100644 --- a/modules/routes/routeDataUsers.py +++ b/modules/routes/routeDataUsers.py @@ -920,30 +920,29 @@ def send_password_link( expiryHours = int(APP_CONFIG.get("Auth_RESET_TOKEN_EXPIRY_HOURS", "24")) try: - from modules.serviceHub import Services - services = Services(targetUser) - + from modules.routes.routeSecurityLocal import _buildAuthEmailHtml, _sendAuthEmail + emailSubject = "PowerOn - Passwort setzen" - emailBody = f""" -Hallo {targetUser.fullName or targetUser.username}, + emailHtml = _buildAuthEmailHtml( + greeting=f"Hallo {targetUser.fullName or targetUser.username}", + bodyLines=[ + "Ein Administrator hat einen Link zum Setzen Ihres Passworts angefordert.", + "", + f"Ihr Benutzername: {targetUser.username}", + "", + "Klicken Sie auf die Schaltfläche, um Ihr Passwort zu setzen:", + ], + buttonText="Passwort setzen", + buttonUrl=magicLink, + footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie diese Anforderung nicht erwartet haben, kontaktieren Sie bitte Ihren Administrator.", + ) -Ein Administrator hat einen Link zum Setzen Ihres Passworts angefordert. - -Ihr Benutzername: {targetUser.username} - -Klicken Sie auf den folgenden Link, um Ihr Passwort zu setzen: -{magicLink} - -Dieser Link ist {expiryHours} Stunden gültig. - -Falls Sie diese Anforderung nicht erwartet haben, kontaktieren Sie bitte Ihren Administrator. -""" - - emailSent = services.messaging.sendEmailDirect( + emailSent = _sendAuthEmail( recipient=targetUser.email, subject=emailSubject, - message=emailBody, - userId=str(targetUser.id) + message="", + userId=str(targetUser.id), + htmlOverride=emailHtml, ) if not emailSent: diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py index cb913137..ccefcc87 100644 --- a/modules/routes/routeInvitations.py +++ b/modules/routes/routeInvitations.py @@ -292,37 +292,24 @@ def create_invitation( emailConnector = ConnectorMessagingEmail() if instance_label: emailSubject = f"Einladung zur Feature-Instanz {instance_label}" - invite_text = f"der Feature-Instanz {instance_label} (Mandant: {mandateName}) beizutreten" + invite_desc = f"der Feature-Instanz «{instance_label}» (Mandant: {mandateName}) beizutreten" else: emailSubject = f"Einladung zu {mandateName}" - invite_text = f"dem Mandanten {mandateName} beizutreten" - emailBody = f""" - - -

Sie wurden eingeladen!

-

Hallo {display_name},

-

Sie wurden eingeladen, {invite_text}.

-

Klicken Sie auf den folgenden Link, um die Einladung anzunehmen:

-

- - Einladung annehmen - -

-

- Oder kopieren Sie diesen Link in Ihren Browser:
- {inviteUrl} -

-

- Diese Einladung ist {data.expiresInHours} Stunden gültig. -

-
-

- Diese E-Mail wurde automatisch von PowerOn gesendet. -

- - - """ - + invite_desc = f"dem Mandanten «{mandateName}» beizutreten" + + from modules.routes.routeSecurityLocal import _buildAuthEmailHtml + emailBody = _buildAuthEmailHtml( + greeting=f"Hallo {display_name}", + bodyLines=[ + f"Sie wurden eingeladen, {invite_desc}.", + "", + "Klicken Sie auf die Schaltfläche, um die Einladung anzunehmen:", + ], + buttonText="Einladung annehmen", + buttonUrl=inviteUrl, + footerText=f"Diese Einladung ist {data.expiresInHours} Stunden gültig.", + ) + emailConnector.send( recipient=email_val, subject=emailSubject, @@ -376,6 +363,8 @@ def create_invitation( f"to {target_desc}, expires in {data.expiresInHours}h" ) + # Invitation extends PowerOnModel: recordCreate/_saveRecord set sysCreatedAt and sysCreatedBy automatically. + # API response uses createdAt/createdBy; map from the system fields (no separate createdAt column on model). return InvitationResponse( id=str(createdRecord.get("id")), token=str(createdRecord.get("token")), @@ -384,8 +373,8 @@ def create_invitation( roleIds=createdRecord.get("roleIds", []), targetUsername=createdRecord.get("targetUsername"), email=createdRecord.get("email"), - createdBy=str(createdRecord.get("createdBy")), - createdAt=createdRecord.get("createdAt"), + createdBy=str(createdRecord["sysCreatedBy"]), + createdAt=float(createdRecord["sysCreatedAt"]), expiresAt=createdRecord.get("expiresAt"), usedBy=createdRecord.get("usedBy"), usedAt=createdRecord.get("usedAt"), diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index 11b6cb0f..9ec4fc38 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -26,36 +26,122 @@ from modules.shared.timeUtils import getUtcTimestamp logger = logging.getLogger(__name__) -def _sendAuthEmail(recipient: str, subject: str, message: str, userId: str = None) -> bool: +def _buildAuthEmailHtml( + greeting: str, + bodyLines: list, + buttonText: str = None, + buttonUrl: str = None, + footerText: str = None, +) -> str: + """Build a branded HTML email for authentication flows. + + Uses the same visual design as notifyMandateAdmins._renderHtmlEmail + (dark header, clean body, operator footer). + """ + import html as _html + + paragraphsHtml = "" + for line in bodyLines: + if line == "": + paragraphsHtml += '

 

\n' + else: + escaped = _html.escape(str(line)) + paragraphsHtml += f'

{escaped}

\n' + + buttonBlock = "" + if buttonText and buttonUrl: + buttonBlock = f''' +

+ {_html.escape(buttonUrl)} +

''' + + footerNote = "" + if footerText: + footerNote = f'

{_html.escape(footerText)}

\n' + + operatorLine = "" + try: + from modules.shared.configuration import APP_CONFIG + parts = [p for p in [ + APP_CONFIG.get("Operator_CompanyName", ""), + APP_CONFIG.get("Operator_Address", ""), + APP_CONFIG.get("Operator_VatNumber", ""), + ] if p] + if parts: + operatorLine = ( + f'

' + f'{_html.escape(" | ".join(parts))}

\n' + ) + except Exception: + pass + + return f''' + + + + + +
+ + + + + + + +
+

PowerOn

+
+

{_html.escape(greeting)}

+
+ {paragraphsHtml} + {buttonBlock} +
+ {footerNote} +
+

+ Diese E-Mail wurde automatisch von PowerOn versendet. +

+ {operatorLine} +
+
+ +''' + + +def _sendAuthEmail(recipient: str, subject: str, message: str, userId: str = None, htmlOverride: str = None) -> bool: """ Send authentication-related email directly without requiring full Services initialization. Used for registration, password reset, and other auth flows. - + Args: recipient: Email address subject: Email subject - message: Plain text message (will be converted to HTML) + message: Plain text fallback (ignored when htmlOverride is given) userId: Optional user ID for logging - + htmlOverride: Pre-built branded HTML (from _buildAuthEmailHtml) + Returns: bool: True if email was sent successfully """ try: - import html from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface from modules.datamodels.datamodelMessaging import MessagingChannel - - # Convert plain text to simple HTML - escaped = html.escape(message) - escaped = escaped.replace('\n', '
\n') - htmlMessage = f""" - - - -{escaped} - -""" - + + htmlMessage = htmlOverride + if not htmlMessage: + import html + escaped = html.escape(message) + escaped = escaped.replace('\n', '
\n') + htmlMessage = f'{escaped}' + messagingInterface = getMessagingInterface() success = messagingInterface.send( channel=MessagingChannel.EMAIL, @@ -63,12 +149,12 @@ def _sendAuthEmail(recipient: str, subject: str, message: str, userId: str = Non subject=subject, message=htmlMessage ) - + if success: logger.info(f"Auth email sent successfully to {recipient} (userId: {userId})") else: logger.warning(f"Failed to send auth email to {recipient} (userId: {userId})") - + return success except Exception as e: logger.error(f"Error sending auth email to {recipient}: {str(e)}", exc_info=True) @@ -88,15 +174,43 @@ router = APIRouter( ) def _ensureHomeMandate(rootInterface, user) -> None: - """Ensure user has a Home mandate. Creates 'Home {username}' if none exists.""" - userMandates = rootInterface.getUserMandates(str(user.id)) + """Ensure user has a Home mandate, but only if they have no mandate memberships + AND no pending invitations. + + Invited users should NOT get a Home mandate — they join existing mandates via + invitation acceptance and can create their own later via onboarding. + """ + userId = str(user.id) + userMandates = rootInterface.getUserMandates(userId) + + if userMandates: + for um in userMandates: + mandate = rootInterface.getMandate(um.mandateId) + if mandate and (mandate.name or "").startswith("Home ") and not mandate.isSystem: + return + logger.debug(f"User {user.username} has {len(userMandates)} mandate(s) but no Home — skipping auto-creation") + return + + try: + from modules.interfaces.interfaceDbApp import getRootInterface as _getRootIf + appIf = _getRootIf() + normalizedEmail = (user.email or "").strip().lower() if user.email else None + pendingByUsername = appIf.getInvitationsByTargetUsername(user.username) + pendingByEmail = appIf.getInvitationsByEmail(normalizedEmail) if normalizedEmail else [] + seenIds = set() + for inv in pendingByUsername + pendingByEmail: + if inv.id in seenIds: + continue + seenIds.add(inv.id) + if not inv.revokedAt and (inv.currentUses or 0) < (inv.maxUses or 1): + logger.info(f"User {user.username} has pending invitation(s) — skipping Home mandate creation") + return + except Exception as e: + logger.warning(f"Could not check pending invitations for {user.username}: {e}") + homeMandateName = f"Home {user.username}" - for um in userMandates: - mandate = rootInterface.getMandate(um.mandateId) - if mandate and (mandate.name or "").startswith("Home ") and not mandate.isSystem: - return rootInterface._provisionMandateForUser( - userId=str(user.id), + userId=userId, mandateName=homeMandateName, planKey="TRIAL_7D", ) @@ -191,7 +305,14 @@ def login( # Save access token userInterface.saveAccessToken(token) - # Activate PENDING subscriptions on first login + # Ensure user has a Home mandate (created on first login if missing) + try: + _ensureHomeMandate(rootInterface, user) + except Exception as homeErr: + logger.error(f"Error ensuring Home mandate for user {user.username}: {homeErr}") + + # Activate PENDING subscriptions on first login (runs AFTER _ensureHomeMandate + # so that a freshly provisioned Home mandate subscription is also activated) try: activatedCount = rootInterface._activatePendingSubscriptions(str(user.id)) if activatedCount > 0: @@ -199,12 +320,6 @@ def login( except Exception as subErr: logger.error(f"Error activating subscriptions on login: {subErr}") - # Ensure user has a Home mandate (created on first login if missing) - try: - _ensureHomeMandate(rootInterface, user) - except Exception as homeErr: - logger.error(f"Error ensuring Home mandate for user {user.username}: {homeErr}") - # Log successful login (app log file + audit DB for traceability) logger.info("Login successful for username=%s (userId=%s)", formData.username, str(user.id)) try: @@ -282,35 +397,28 @@ def register_user( ) -> Dict[str, Any]: """Register a new local user (magic link based - no password required). + Unified registration path: invited users skip Home mandate provisioning + (they join the inviting mandate instead). Non-invited users get a Home + mandate with TRIAL_7D. Company mandate creation is deferred to onboarding. + Args: userData: User data (username, email, fullName, language) frontendUrl: The frontend URL to use in magic link (REQUIRED - provided by frontend) + registrationType: Kept for backward compat but ignored (company mandates via onboarding) + companyName: Kept for backward compat but ignored """ try: - # Get gateway interface with root privileges since this is a public endpoint appInterface = getRootInterface() - - # Note: User registration does NOT require mandateId context - # Users are mandate-independent (Multi-Tenant Design) - # Mandate assignment happens via createUserMandate() after registration - - # Frontend URL is required - no fallback baseUrl = frontendUrl.rstrip("/") - - # Normalize email normalizedEmail = userData.email.lower().strip() if userData.email else None - # Note: Email can be shared across multiple users (different mandates) - # Username uniqueness is enforced in createUser() - that's the primary constraint - - # Create user with local authentication (no password - magic link based) user = appInterface.createUser( username=userData.username, - password=None, # No password - will be set via magic link + password=None, email=normalizedEmail, fullName=userData.fullName, language=userData.language, - enabled=True, # Users are enabled by default (can login after setting password) + enabled=True, authenticationAuthority=AuthAuthority.LOCAL ) @@ -320,35 +428,51 @@ def register_user( detail="Failed to register user" ) - # Provision Home mandate for every new user ("Home {username}") - provisionResult = None + # Check for pending invitations BEFORE provisioning. + # Search by both username AND email (email-only invitations have targetUsername=None). + hasPendingInvitations = False + validInvitations = [] try: - homeMandateName = f"Home {user.username}" - provisionResult = appInterface._provisionMandateForUser( - userId=str(user.id), - mandateName=homeMandateName, - planKey="TRIAL_7D", - ) - logger.info(f"Provisioned Home mandate for user {user.id}: {provisionResult}") - except Exception as provErr: - logger.error(f"Error provisioning Home mandate for user {user.id}: {provErr}") + from modules.datamodels.datamodelInvitation import Invitation - # If company registration, also create a company mandate with the paid plan - if registrationType == "company": - if not companyName: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="companyName is required for company registration" - ) + currentTime = getUtcTimestamp() + pendingByUsername = appInterface.getInvitationsByTargetUsername(userData.username) + pendingByEmail = appInterface.getInvitationsByEmail(normalizedEmail) if normalizedEmail else [] + + seenIds = set() + allPending = pendingByUsername + pendingByEmail + for invitation in allPending: + if invitation.id in seenIds: + continue + seenIds.add(invitation.id) + if (invitation.expiresAt or 0) < currentTime: + continue + if invitation.revokedAt: + continue + if (invitation.currentUses or 0) >= (invitation.maxUses or 1): + continue + validInvitations.append(invitation) + + hasPendingInvitations = len(validInvitations) > 0 + except Exception as invErr: + logger.warning(f"Failed to check pending invitations: {invErr}") + + # Only provision Home mandate if user has NO pending invitations. + # Invited users join existing mandates; they can create their own later via onboarding. + provisionResult = None + if not hasPendingInvitations: try: - companyResult = appInterface._provisionMandateForUser( + homeMandateName = f"Home {user.username}" + provisionResult = appInterface._provisionMandateForUser( userId=str(user.id), - mandateName=companyName, - planKey="STANDARD_MONTHLY", + mandateName=homeMandateName, + planKey="TRIAL_7D", ) - logger.info(f"Provisioned company mandate for user {user.id}: {companyResult}") - except Exception as compErr: - logger.error(f"Error provisioning company mandate for user {user.id}: {compErr}") + logger.info(f"Provisioned Home mandate for user {user.id}: {provisionResult}") + except Exception as provErr: + logger.error(f"Error provisioning Home mandate for user {user.id}: {provErr}") + else: + logger.info(f"Skipping Home mandate for user {user.id} — has {len(validInvitations)} pending invitation(s)") # Generate reset token for password setup token, expires = appInterface.generateResetTokenAndExpiry() @@ -360,57 +484,43 @@ def register_user( expiryHours = int(APP_CONFIG.get("Auth_RESET_TOKEN_EXPIRY_HOURS", "24")) emailSubject = "PowerOn Registrierung - Passwort setzen" - emailBody = f"""Hallo {user.fullName or user.username}, + emailHtml = _buildAuthEmailHtml( + greeting=f"Hallo {user.fullName or user.username}", + bodyLines=[ + "Vielen Dank für Ihre Registrierung bei PowerOn.", + "", + f"Ihr Benutzername: {user.username}", + "", + "Klicken Sie auf die Schaltfläche, um Ihr Passwort zu setzen:", + ], + buttonText="Passwort setzen", + buttonUrl=magicLink, + footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie sich nicht registriert haben, können Sie diese E-Mail ignorieren.", + ) -Vielen Dank für Ihre Registrierung bei PowerOn. - -Ihr Benutzername: {user.username} - -Klicken Sie auf den folgenden Link, um Ihr Passwort zu setzen: -{magicLink} - -Dieser Link ist {expiryHours} Stunden gültig. - -Falls Sie sich nicht registriert haben, können Sie diese E-Mail ignorieren.""" - emailSent = _sendAuthEmail( recipient=user.email, subject=emailSubject, - message=emailBody, - userId=str(user.id) + message="", + userId=str(user.id), + htmlOverride=emailHtml, ) if not emailSent: logger.warning(f"Failed to send registration email to {user.email}") except Exception as emailErr: logger.error(f"Error sending registration email: {str(emailErr)}") - # Don't fail registration if email fails - user can request reset later - # Check for pending invitations and create notifications - try: - from modules.datamodels.datamodelInvitation import Invitation - from modules.routes.routeNotifications import createInvitationNotification - from modules.datamodels.datamodelUam import Mandate - - currentTime = getUtcTimestamp() - pendingInvitations = appInterface.getInvitationsByTargetUsername(userData.username) - - for invitation in pendingInvitations: - # Skip expired, revoked, or fully used invitations - if (invitation.expiresAt or 0) < currentTime: - continue - if invitation.revokedAt: - continue - if (invitation.currentUses or 0) >= (invitation.maxUses or 1): - continue + # Create notifications for pending invitations + for invitation in validInvitations: + try: + from modules.routes.routeNotifications import createInvitationNotification - # Get mandate name for notification using interface method mandateId = invitation.mandateId mandate = appInterface.getMandate(mandateId) mandateName = (mandate.label or mandate.name) if mandate else "PowerOn" - # Get inviter name - inviterId = invitation.createdBy + inviterId = invitation.sysCreatedBy inviter = appInterface.getUser(inviterId) if inviterId else None inviterName = (inviter.fullName or inviter.username) if inviter else "PowerOn" @@ -421,16 +531,15 @@ Falls Sie sich nicht registriert haben, können Sie diese E-Mail ignorieren.""" inviterName=inviterName ) logger.info(f"Created notification for new user {userData.username} for invitation {invitation.id}") - - except Exception as notifErr: - logger.warning(f"Failed to create notifications for pending invitations: {notifErr}") - # Don't fail registration if notification creation fails + except Exception as notifErr: + logger.warning(f"Failed to create notification for invitation {invitation.id}: {notifErr}") responseData = { "message": "Registrierung erfolgreich! Bitte prüfen Sie Ihre E-Mail für den Link zum Setzen Ihres Passworts." } if provisionResult: responseData["mandateId"] = provisionResult.get("mandateId") + responseData["hasInvitations"] = hasPendingInvitations return responseData except ValueError as e: @@ -676,24 +785,26 @@ def password_reset_request( # Send email using dedicated auth email function emailSubject = "PowerOn - Passwort zurücksetzen" - emailBody = f"""Hallo {user.fullName or user.username}, + emailHtml = _buildAuthEmailHtml( + greeting=f"Hallo {user.fullName or user.username}", + bodyLines=[ + "Sie haben eine Passwort-Zurücksetzung für Ihren PowerOn Account angefordert.", + "", + f"Benutzername: {user.username}", + "", + "Klicken Sie auf die Schaltfläche, um Ihr Passwort zurückzusetzen:", + ], + buttonText="Passwort zurücksetzen", + buttonUrl=magicLink, + footerText=f"Dieser Link ist {expiryHours} Stunden gültig. Falls Sie diese Anforderung nicht gestellt haben, können Sie diese E-Mail ignorieren.", + ) -Sie haben eine Passwort-Zurücksetzung für Ihren PowerOn Account angefordert. - -Benutzername: {user.username} - -Klicken Sie auf den folgenden Link, um Ihr Passwort zurückzusetzen: -{magicLink} - -Dieser Link ist {expiryHours} Stunden gültig. - -Falls Sie diese Anforderung nicht gestellt haben, können Sie diese E-Mail ignorieren.""" - emailSent = _sendAuthEmail( recipient=user.email, subject=emailSubject, - message=emailBody, - userId=str(user.id) + message="", + userId=str(user.id), + htmlOverride=emailHtml, ) if emailSent: @@ -725,24 +836,63 @@ def onboarding_provision( companyName: str = Body(None, embed=True), planKey: str = Body("TRIAL_7D", embed=True), ) -> Dict[str, Any]: - """Post-login onboarding: ensure Home mandate exists and optionally create a company mandate.""" + """Post-login onboarding: create a mandate for the user. + + Guard: user can only create a mandate if they are NOT already admin in any + non-system mandate. This prevents duplicate provisioning. + """ try: + from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole + from modules.datamodels.datamodelRbac import Role + appInterface = getRootInterface() + db = appInterface.db + userId = str(currentUser.id) - _ensureHomeMandate(appInterface, currentUser) + # Check if user already has admin role in a non-system mandate + userMandates = db.getRecordset(UserMandate, recordFilter={"userId": userId, "enabled": True}) + hasAdminMandate = False + for um in userMandates: + mandateId = um.get("mandateId") + mandate = db.getRecordset(Mandate, recordFilter={"id": mandateId}) + if mandate and mandate[0].get("isSystem"): + continue + umId = um.get("id") + umRoles = db.getRecordset(UserMandateRole, recordFilter={"userMandateId": umId}) + for umRole in umRoles: + roleId = umRole.get("roleId") + roles = db.getRecordset(Role, recordFilter={"id": roleId}) + for role in roles: + if "admin" in (role.get("roleLabel") or "").lower(): + hasAdminMandate = True + break + if hasAdminMandate: + break + if hasAdminMandate: + break - result = None - if companyName and companyName.strip(): - if planKey not in ("STANDARD_MONTHLY", "STANDARD_YEARLY"): - planKey = "STANDARD_MONTHLY" - result = appInterface._provisionMandateForUser( - userId=str(currentUser.id), - mandateName=companyName.strip(), - planKey=planKey, - ) + if hasAdminMandate: + logger.info(f"Onboarding: user {currentUser.username} already has admin mandate — skipping provisioning") + return { + "message": "User already has an admin mandate", + "mandateId": None, + "alreadyProvisioned": True, + } + + mandateName = (companyName.strip() if companyName and companyName.strip() + else f"Home {currentUser.username}") + + if planKey not in ("TRIAL_7D", "STANDARD_MONTHLY", "STANDARD_YEARLY"): + planKey = "TRIAL_7D" + + result = appInterface._provisionMandateForUser( + userId=userId, + mandateName=mandateName, + planKey=planKey, + ) try: - activatedCount = appInterface._activatePendingSubscriptions(str(currentUser.id)) + activatedCount = appInterface._activatePendingSubscriptions(userId) if activatedCount > 0: logger.info(f"Activated {activatedCount} pending subscription(s) for user {currentUser.username} during onboarding") except Exception as subErr: diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 5c6f782a..0d6e68ec 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -136,8 +136,8 @@ def listUserMandates( ) -> List[Dict[str, Any]]: """ List mandates where the user can activate features (admin mandates). - If user has 0 admin mandates, auto-provisions a personal mandate so the - Store always has a clear mandate context. + Returns empty list if user has no admin mandates — the frontend handles + this via OnboardingAssistant/OnboardingWizard to create a mandate. """ try: rootInterface = getRootInterface() @@ -145,16 +145,6 @@ def listUserMandates( userId = str(context.user.id) adminMandateIds = _getUserAdminMandateIds(db, userId) - if not adminMandateIds: - homeMandateName = f"Home {context.user.username}" - provisionResult = rootInterface._provisionMandateForUser( - userId=userId, - mandateName=homeMandateName, - planKey="TRIAL_7D", - ) - adminMandateIds = [provisionResult["mandateId"]] - logger.info(f"Auto-provisioned personal mandate {adminMandateIds[0]} for user {userId} on Store access") - result = [] for mid in adminMandateIds: records = db.getRecordset(Mandate, recordFilter={"id": mid}) diff --git a/modules/serviceCenter/services/serviceChat/mainServiceChat.py b/modules/serviceCenter/services/serviceChat/mainServiceChat.py index 40769fae..f3a74b1e 100644 --- a/modules/serviceCenter/services/serviceChat/mainServiceChat.py +++ b/modules/serviceCenter/services/serviceChat/mainServiceChat.py @@ -422,7 +422,7 @@ class ChatService: "size": fileItem.fileSize, "mimeType": fileItem.mimeType, "fileHash": fileItem.fileHash, - "creationDate": fileItem.creationDate, + "creationDate": fileItem.sysCreatedAt, "tags": getattr(fileItem, "tags", None), "folderId": getattr(fileItem, "folderId", None), "description": getattr(fileItem, "description", None), @@ -482,7 +482,7 @@ class ChatService: "fileName": fileItem.fileName, "mimeType": fileItem.mimeType, "fileSize": fileItem.fileSize, - "creationDate": fileItem.creationDate, + "creationDate": fileItem.sysCreatedAt, "tags": getattr(fileItem, "tags", None), "folderId": getattr(fileItem, "folderId", None), "description": getattr(fileItem, "description", None), @@ -524,7 +524,7 @@ class ChatService: mandateId=self._context.mandate_id or "", userId=self.user.id if self.user else "", ) - return self.interfaceDbComponent.db.recordCreate(DataSource, ds) + return self.interfaceDbApp.db.recordCreate(DataSource, ds) def listDataSources(self, featureInstanceId: str = None) -> List[Dict[str, Any]]: """List data sources, optionally filtered by feature instance.""" @@ -532,19 +532,19 @@ class ChatService: recordFilter = {} if featureInstanceId: recordFilter["featureInstanceId"] = featureInstanceId - return self.interfaceDbComponent.db.getRecordset(DataSource, recordFilter=recordFilter) + return self.interfaceDbApp.db.getRecordset(DataSource, recordFilter=recordFilter) def getDataSource(self, dataSourceId: str) -> Optional[Dict[str, Any]]: """Get a single data source by ID.""" from modules.datamodels.datamodelDataSource import DataSource - results = self.interfaceDbComponent.db.getRecordset(DataSource, recordFilter={"id": dataSourceId}) + results = self.interfaceDbApp.db.getRecordset(DataSource, recordFilter={"id": dataSourceId}) return results[0] if results else None def deleteDataSource(self, dataSourceId: str) -> bool: """Delete a data source.""" from modules.datamodels.datamodelDataSource import DataSource try: - self.interfaceDbComponent.db.recordDelete(DataSource, dataSourceId) + self.interfaceDbApp.db.recordDelete(DataSource, dataSourceId) return True except Exception as e: logger.error(f"Failed to delete DataSource {dataSourceId}: {e}") diff --git a/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py b/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py index 8fccd4e4..99da173e 100644 --- a/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py +++ b/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py @@ -346,7 +346,7 @@ class GenerationService: "size": file_item.fileSize, "mimeType": file_item.mimeType, "fileHash": getattr(file_item, 'fileHash', None), - "creationDate": getattr(file_item, 'creationDate', None) + "creationDate": getattr(file_item, 'sysCreatedAt', None) } return None except Exception as e: From 4cf24884cba5867b7fa29ea2bfa8ef17bb0d30ea Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 00:06:01 +0200 Subject: [PATCH 16/33] fixed keys and sandbox for int --- env_dev.env | 8 ++++---- env_int.env | 8 ++++---- env_prod.env | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/env_dev.env b/env_dev.env index 2a6d715c..30ffd079 100644 --- a/env_dev.env +++ b/env_dev.env @@ -48,18 +48,18 @@ Service_GOOGLE_DATA_REDIRECT_URI = http://localhost:8000/api/google/auth/connect # ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 -Service_CLICKUP_CLIENT_SECRET = CZECD706WLSX6UV13YI4ACNW50ADZHHXDAJALHE0YE030QFSI6Y9HP4Y61JT7CF0 +Service_CLICKUP_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd4ZWVBeHVtRnpIT0VBN0tSZDhLRmFmN05DOVBOelJtLWhkVnJDRVBqUkh3bDFTZFRWaWQ1cWowdGNLUk5IQzlGN1J6RFVCaW8zRnBwLVBnclJfdWgxV3pVRzFEV2lwcW5Rc19Xa1ROWXNJcUF0ajZaYUxOUXk0WHRsRmJLM25FaHV5T2IxdV92ZW1nRjhzaGpwU0l2Wm9FTkRnY2lJVjhuNHUwT29salAxYV8wPQ== Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback # Stripe Billing (both end with _SECRET for encryption script) -STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGWDkxSldfM0NCZ3dmbHY5cS1nQlI3UWZ4ZWRrNVdUdEFKa25RckRiQWY0c1E5MjVsZzlfRkZEU0VFU2tNQ01qZnRNQ0pZVU9hVFN6OEU0RXhwdTl3algzLWJlSXRhYmZlMHltSC1XejlGWEU5TDF1LUlYNEh1aG9tRFI4YmlCYzUyei02U1dabWoyb0N2dVFSb1RhWTNnQjBCZkFjV0FfOWdYdDVpX1k5R2pYM1R6SHRiaE10V1l1dnQybjVHWDRiQUJLM0UxRDZnczhJZGFsc3JhOU82QT09 -STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGcHNWTWpBWkFHRExtdU01N3RyZzNsMjhUS3NiVTNCZmMwN2NEcFZ6UkQ1a2I0aUkyNU4wR2dUdHJXYmtkaEFRUnFpcThObHBEQmJkdEFnT1FXeUxOTlU3UDFNRzl6LWdpRFpYdExvY3FTTG9MTkswdEhrVkNKQVFucnBjSnhLNm4= +STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5aHNGejgzQmpTdmprdzQxR19KZkh3MlhYUTNseFN3WnlaWjh2SDZyalN6aU9xSktkbUQwUnZrVnlvbGVRQm4yZFdiRU5aSEk5WVJuUnR4VUwtTm9OVk1WWmJQeU5QaDdib0hfVWV5U1BfYTFXRmdoOWdnOWxkb3JFQmF3bm45UjFUVUxmWGtGRkFKUGd6bmhpQlFnaVI3Q2lLdDlsY1VESk1vOEM0ZFBJNW1qcVZ0N2tPYmRLNmVKajZ2M3o3S05lWnRRVG5LdkRseW4wQ3VjNHNQZTZUdz09 +STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5dDJMSHBrVk8wTzJhU2xzTTZCZWdvWmU2NGI2WklfRXRJZVUzaVYyOU9GLUZsalUwa2lPdEgtUHo0dVVvRDU1cy1saHJyU0Rxa2xQZjBuakExQzk3bmxBcU9WbEIxUEtpR1JoUFMxZG9ISGRZUXFhdFpSMGxvQUV3a0VLQllfUUtCOHZwTGdteV9rYTFOazBfSlN3ekNWblFpakJlZVlCTmNkWWQ4Sm01a1RCWTlnTlFHWVA0MkZYMlprUExrWFN2V0NVU1BTd1NKczFJbVo3VHpLdlc4UT09 STRIPE_API_VERSION = 2026-01-28.clover # AI configuration Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQnBaSnM4TWFRRmxVQmNQblVIYmc1Y0Q3aW9zZUtDWlNWdGZjbFpncGp2NHN2QjkxMWxibUJnZDBId252MWk5TXN3Yk14ajFIdi1CTkx2ZWx2QzF5OFR6LUx5azQ3dnNLaXJBOHNxc0tlWmtZcTFVelF4eXBSM2JkbHd2eTM0VHNXdHNtVUprZWtPVzctNlJsZHNmM20tU1N6Q1Q2cHFYSi1tNlhZNDNabTVuaEVGWmIydEhadTcyMlBURmw2aUJxOF9GTzR0dTZiNGZfOFlHaVpPZ1A1LXhhOEFtN1J5TEVNNWtMcGpyNkMzSl8xRnZsaTF1WTZrOUZmb0cxVURjSGFLS2dIYTQyZEJtTm90bEYxVWxNNXVPdTVjaVhYbXhxT3JsVDM5VjZMVFZKSE1tZnM9 Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09 -Connector_AiPerplexity_API_SECRET = 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 +Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5ZmdDZ3hrSElrMnQzNFAtel9wX191VjVzN2g1LWZoa0V1YklubEdmMEJDdEZiR1RWeVZrM3V3enBHX3p6WUtTS0kwYkFyVEF0Nm8zX05CelVQcFJUc0lwVW5iNFczc1p1WWJ2WFBmd0lpLUxxWndEeUh0b2hGUHVpN19vb19nMTBnV1A1VmNpWERVX05lQ29VS20wTjZ3PT0= Connector_AiTavily_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI= Connector_AiPrivateLlm_API_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGRHM5eFdUVmVZU1R1cHBwN1RlMUx4T0NlLTJLUFFVX3J2OElDWFpuZmJHVmp4Z3BNNWMwZUVVZUd2TFhRSjVmVkVlcFlVRWtybXh0ZHloZ01ZcnVvX195YjdlWVdEcjZSWFFTTlNBWUlaTlNoLWhqVFBIb0thVlBiaWhjYjFQOFY= Connector_AiMistral_API_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGeEQxYUIxOHhia0JlQWpWQ2dWQWZzY3l6SWwyUnJoR1hRQWloX2lxb2lGNkc4UnA4U2tWNjJaYzB1d1hvNG9fWUp1N3V4OW9FMGhaWVhjSlVwWEc1X2loVDBSZDEtdHdfcTA5QkcxQTR4OHc4RkRzclJrU2d1RFZpNDJkRDRURlE= diff --git a/env_int.env b/env_int.env index 5f331e5c..61981de1 100644 --- a/env_int.env +++ b/env_int.env @@ -48,18 +48,18 @@ Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-int.poweron-center.net/api/go # ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 -Service_CLICKUP_CLIENT_SECRET = CZECD706WLSX6UV13YI4ACNW50ADZHHXDAJALHE0YE030QFSI6Y9HP4Y61JT7CF0 +Service_CLICKUP_CLIENT_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5SE1uVURMNVE3NkM4cHBKa2R2TjBnLWdpSXI5dHpKWGExZVFiUF95TFNnZ1NwLWFLdmh6eWFZTHVHYTBzU2FGRUpLYkVyM1NvZjZkWDZHN21qUER5ZVNOaGpCc3NrUGd3VnFTclF3OW1nUlVuWXQ1UVhDLVpyb1BwRExOeFpDeVhtbEhDVnd4TVdpbzNBNk5QQWFPdjdza0xBWGxFY1E3WFpCSUlNa1l4RDlBPQ== Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback # Stripe Billing (both end with _SECRET for encryption script) -STRIPE_SECRET_KEY_SECRET = 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 -STRIPE_WEBHOOK_SECRET = INT_ENC:Z0FBQUFBQnBudkpGamJBNW91VUdEaThWRTFiTWpyb3NqSDJJcGtjNkhUVVZqVElxUWExY05KcllSYVk1SkRuS1NjYWpZUk1uU29nb2pzdXUxRzBsOEgyRWtmUEw3dUF4ejFIXzNwTVZRM1R1bVVhTUs4ZHJMT0V4Xy1pcHVfWlBaQV9wVXo5MGlQYXA= +STRIPE_SECRET_KEY_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5ekdBaGNGVUlOQUpncTlzLWlTV0V5OWZzQkpDczhCUGw4U1JpTHZ0d3pfYlFNWElLRlNiNlNsaDRYTGZUTkg2OUFrTW1GZXpOUjBVbmRQWjN6ekhHd2ZSQ195OHlaeWh1TmxrUm10V2R3YmdncmFLbFMzVjdqcWJMSUJPR2xuSEozclNoZG1rZVBTaWg3OFQ1Qzdxb0wyQ2RKazc2dG1aZXBUTXlvbDZqLS1KOVI5M3BGc3NQZkZRbnFpRjIwWmh2ZHlVNlpxZVo2dWNmMjQ5eW02QmtzUT09 +STRIPE_WEBHOOK_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5VUszOWllM1E1YXlsdldIdENlUTd4bWhycVNBZVZzSWxlMjd4NEJwRnFVbnRaNTlGOWUyLVdxRUxySEtGRDdfbEVHM1dFTU93SHZtY1RKZkh0NG92M2cwYTQxQjQ0SFhqNXZnd21jbE52WW0wZC1oMlY3OXFFSV9sd2M1TC10N0hZa2Zha3FzX1FhcE14alo2TGFHX3QybHFxOTlQWWFZR3pabkRtOEp1Zm1zOFlrbDF0MFNkUjUyVFI5NUNZaU5TRXF4X29tcEQ2RUR1MTlXcUoxbTl0dz09 STRIPE_API_VERSION = 2026-01-28.clover # AI configuration Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQnBaSnM4MENkQ2xJVmE5WFZKUkh2SHJFby1YVXN3ZmVxRkptS3ZWRmlwdU93ZEJjSjlMV2NGbU5mS3NCdmFfcmFYTEJNZXFIQ3ozTWE4ZC1pemlQNk9wbjU1d3BPS0ZCTTZfOF8yWmVXMWx0TU1DamlJLVFhSTJXclZsY3hMVWlPcXVqQWtMdER4T252NHZUWEhUOTdIN1VGR3ltazEweXFqQ0lvb0hYWmxQQnpxb0JwcFNhRDNGWXdoRTVJWm9FalZpTUF5b1RqZlRaYnVKYkp0NWR5Vko1WWJ0Wmg2VWJzYXZ0Z3Q4UkpsTldDX2dsekhKMmM4YjRoa2RwemMwYVQwM2cyMFlvaU5mOTVTWGlROU8xY2ZVRXlxZzJqWkxURWlGZGI2STZNb0NpdEtWUnM9 Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09 -Connector_AiPerplexity_API_SECRET = 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 +Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQnB5dkd6UkhtU3lhYmZMSlo0bklQZ2s3UTFBSkprZTNwWkg5Q2lVa0wtenhxWXpva21xVDVMRjdKSmhpTmxWS05IUTRoRHdCbktSRVVjcVFnY1RfV0N2S2dyV0dTMlhxQlRFVm41RkFTWVQzQThuVkZwdlNuVC05QlVRVXB6Qjk3akNpYmY1MFR6R1ByMzlIMllRZlRRYVVRN2ZBPT0= Connector_AiTavily_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk= Connector_AiPrivateLlm_API_SECRET = INT_ENC:Z0FBQUFBQnBudkpGSjZ1NWh0aWc1R3Z4MHNaeS1HamtUbndhcUZFZDlqUDhjSmg5eHFfdlVkU0RsVkJ2UVRaMWs3aWhraG5jSlc0YkxNWHVmR2JoSW5ENFFCdkJBM0VienlKSnhzNnBKbTJOUTFKczRfWlQ3bWpmUkRTT1I1OGNUSTlQdExacGRpeXg= Connector_AiMistral_API_SECRET = INT_ENC:Z0FBQUFBQnBudkpGZTNtZ1E4TWIxSEU1OUlreUpxZkJIR0Vxcm9xRHRUbnBxbTQ1cXlkbnltWkJVdTdMYWZ4c3Fsam42TERWUTVhNzZFMU9xVjdyRGFCYml6bmZsZFd2YmJzemlrSWN6Q3o3X0NXX2xXNUQteTNONHdKYzJ5YVpLLWdhU2JhSTJQZnI= diff --git a/env_prod.env b/env_prod.env index a4bdea05..093b6509 100644 --- a/env_prod.env +++ b/env_prod.env @@ -48,18 +48,18 @@ Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/g # ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 -Service_CLICKUP_CLIENT_SECRET = CZECD706WLSX6UV13YI4ACNW50ADZHHXDAJALHE0YE030QFSI6Y9HP4Y61JT7CF0 +Service_CLICKUP_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6VGw5WDdhdDRsVENSalhSSUV0OFFxbEx0V1l6aktNV0E5Y18xU3JHLUlqMWVJdmxyajAydVZRaDJkZzJOVXhxRV9ROFRZbWxlRjh4c3NtQnRFMmRtZWpzTWVsdngtWldlNXRKTURHQjJCOEt6alMwQlkwOFYyVVJWNURJUGJIZDIxYVlfNnBrMU54M0Q3TVdVbFZqRkJKTUtqa05wUkV4eGZvbXNsVi1nNVdBPQ== Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback # Stripe Billing (both end with _SECRET for encryption script) -STRIPE_SECRET_KEY_SECRET = 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 +STRIPE_SECRET_KEY_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6aVA3R3VRS3VHMUgzUEVjYkR4eUZKWFhPUzFTTVlHNnBvT3FienNQaUlBWVpPLXJyVGpGMWk4LXktMXphX0J6ZTVESkJxdjNNa3ZJbF9wX2ppYzdjYlF0cmdVamlEWWJDSmJYYkJseHctTlh4dnNoQWs4SG5haVl2TTNDdXpuaFpqeDBtNkFCbUxMa0RaWG14dmxyOEdILTNrZ2licmNpbXVkN2lFSWoxZW1BODNpV0ZTQ0VaeXRmR1d4RjExMlVFS3MtQU9zZXZlZE1mTmY3OWctUXJHdz09 STRIPE_WEBHOOK_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGNUpTWldsakYydFhFelBrR1lSaWxYT3kyMENOMUljZTJUZHBWcEhhdWVCMzYxZXQ5b3VlTFVRalFiTVdsbGxrdUx0RDFwSEpsOC1sTDJRTEJNQlA3S3ZaQzBtV1h6bWp5VnlMZUgwUlF3cXYxcnljZVE5SWdzLVg3V0syOWRYS08= STRIPE_API_VERSION = 2026-01-28.clover # AI configuration Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQnBaSnM4TWJOVm4xVkx6azRlNDdxN3UxLUdwY2hhdGYxRGp4VFJqYXZIcmkxM1ZyOWV2M0Z4MHdFNkVYQ0ROb1d6LUZFUEdvMHhLMEtXYVBCRzM5TlYyY3ROYWtJRk41cDZxd0tYYi00MjVqMTh4QVcyTXl0bmVocEFHbXQwREpwNi1vODdBNmwzazE5bkpNelE2WXpvblIzWlQwbGdEelI2WXFqT1RibXVHcjNWbVhwYzBOM25XTzNmTDAwUjRvYk4yNjIyZHc5c2RSZzREQUFCdUwyb0ZuOXN1dzI2c2FKdXI4NGxEbk92czZWamJXU3ZSbUlLejZjRklRRk4tLV9aVUFZekI2bTU4OHYxNTUybDg3RVo0ZTh6dXNKRW5GNXVackZvcm9laGI0X3R6V3M9 Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3TnhYdlhSLW5RbXJyMHFXX0V0bHhuTDlTaFJsRDl2dTdIUTFtVFAwTE8tY3hLbzNSMnVTLXd3RUZualN3MGNzc1kwOTIxVUN2WW1rYi1TendFRVVBSVNqRFVjckEzNExyTGNaUkJLMmozazUwemI1cnhrcEtZVXJrWkdaVFFramp3MWZ6RmY2aGlRMXVEYjM2M3ZlbmxMdnNCRDM1QWR0Wmd6MWVnS1I1c01nV3hRLXg3d2NTZXVfTi1Wdm16UnRyNGsyRTZ0bG9TQ1g1OFB5Z002bmQ3QT09 -Connector_AiPerplexity_API_SECRET = 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 +Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6NG5CTm9QOFZRV1BIVC0tV2RKTGtCQWFOUXlpRnhEdjN1U2x3VUdDamtIZV9CQzQ5ZmRmcUh3ZUVUa0NxbGhlenVVdWtaYjdpcnhvUlNFLXZfOWh2dWFZai0xUGU5cWpuYmpnRVRWakh0RVNUUTFyX0w5V0NXVWFrQlZuOTd5TkI0eVRoQ0ZBSm9HYUlYamoyY1FCMmlBPT0= Connector_AiTavily_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3NmItcDh6V0JpcE5Jc0NlUWZqcmllRHB5eDlNZmVnUlNVenhNTm5xWExzbjJqdE1GZ0hTSUYtb2dvdWNhTnlQNmVWQ2NGVDgwZ0MwMWZBMlNKWEhzdlF3TlZzTXhCZWM4Z1Uwb18tSTRoU1JBVTVkSkJHOTJwX291b3dPaVphVFg= Connector_AiPrivateLlm_API_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGanZ6U3pzZWkwXzVPWGtIQ040XzFrTXc5QWRnazdEeEktaUJ0akJmNnEzbWUzNHczLTJfc2dIdzBDY0FTaXZYcDhxNFdNbTNtbEJTb2VRZ0ZYd05hdlNLR1h6SUFzVml2Z1FLY1BjTl90UWozUGxtak1URnhhZmNDRWFTb0dKVUo= Connector_AiMistral_API_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGc2tQc2lvMk1YZk01Q1dob1U5cnR0dG03WWE3WkpoOWo0SEpvLU9Rc2lCNDExdy1wZExaN3lpT2FEQkxnaHRmWmZUUUZUUUJmblZreGlpaFpOdnFhbzlEd1RsVVJtX216cmhxTm5BcTN2eUZ2T054cDE5bmlEamJ3NGR6MVpFQnA= From 3be09679369a16453fb8d5024ce8e27bf42c92d2 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 00:15:47 +0200 Subject: [PATCH 17/33] fixed changed customer in stripe --- .../mainServiceSubscription.py | 62 ++++++++++++++++--- 1 file changed, 54 insertions(+), 8 deletions(-) diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py index 944da4f7..55bff123 100644 --- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py +++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py @@ -283,14 +283,34 @@ class SubscriptionService: subscriptionData["trial_end"] = trialEndTs self._interface.updateFields(subRecord["id"], {"effectiveFrom": periodEnd.isoformat()}) - session = stripe.checkout.Session.create( - mode="subscription", - customer=stripeCustomerId, - line_items=lineItems, - success_url=successUrl, - cancel_url=cancelUrl, - subscription_data=subscriptionData, - ) + session = None + for attempt in range(2): + try: + session = stripe.checkout.Session.create( + mode="subscription", + customer=stripeCustomerId, + line_items=lineItems, + success_url=successUrl, + cancel_url=cancelUrl, + subscription_data=subscriptionData, + ) + break + except Exception as e: + if attempt == 0 and self._isStripeMissingCustomerError(e): + logger.warning( + "Stripe reports missing customer %s for mandate %s — " + "clearing stored stripeCustomerId (wrong account, deleted customer, or copied DB).", + stripeCustomerId, + mandateId, + ) + self._clearStoredStripeCustomerId(mandateId) + stripeCustomerId = self._resolveStripeCustomer(mandateId) + if not stripeCustomerId: + raise ValueError( + f"Could not recreate Stripe customer for mandate {mandateId}" + ) from e + continue + raise if not session or not session.url: raise ValueError("Stripe Checkout Session creation failed") @@ -298,6 +318,32 @@ class SubscriptionService: logger.info("Checkout session %s created for mandate %s, plan %s", session.id, mandateId, plan.planKey) return session.url + @staticmethod + def _isStripeMissingCustomerError(exc: BaseException) -> bool: + code = getattr(exc, "code", None) + param = getattr(exc, "param", None) + if code == "resource_missing" and param == "customer": + return True + body = getattr(exc, "json_body", None) + if isinstance(body, dict): + err = body.get("error") + if isinstance(err, dict): + return err.get("code") == "resource_missing" and err.get("param") == "customer" + return False + + def _clearStoredStripeCustomerId(self, mandateId: str) -> None: + try: + from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface + + billingIf = getBillingInterface(self.currentUser, mandateId) + settings = billingIf.getSettings(mandateId) + if not settings or not settings.get("stripeCustomerId"): + return + billingIf.updateSettings(settings["id"], {"stripeCustomerId": None}) + logger.info("Cleared stripeCustomerId on billing settings for mandate %s", mandateId) + except Exception as e: + logger.error("Failed to clear stripeCustomerId for mandate %s: %s", mandateId, e) + def _resolveStripeCustomer(self, mandateId: str) -> Optional[str]: try: from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface From 7cbcaacda1c8991984fb5ddf89bebd5ded395e08 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 00:24:56 +0200 Subject: [PATCH 18/33] managed stripe change in env to trigger db refresh --- .../serviceSubscription/stripeBootstrap.py | 66 +++++++++++++------ 1 file changed, 45 insertions(+), 21 deletions(-) diff --git a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py index 38ac29e1..edb2df1f 100644 --- a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py +++ b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py @@ -154,6 +154,23 @@ def _createStripePrice(stripe, productId: str, unitAmountCHF: float, interval: s return price.id +def _validateStripeIdsExist(stripe, mapping: StripePlanPrice) -> bool: + """Quick check whether at least the stored product IDs still exist in Stripe. + Returns False when running against a different Stripe account or after DB copy.""" + try: + if mapping.stripeProductIdUsers: + stripe.Product.retrieve(mapping.stripeProductIdUsers) + if mapping.stripeProductIdInstances: + stripe.Product.retrieve(mapping.stripeProductIdInstances) + return True + except Exception as e: + code = getattr(e, "code", None) + if code == "resource_missing": + return False + logger.debug("Stripe validation check failed (non-critical): %s", e) + return False + + def bootstrapStripePrices() -> None: """Ensure all paid plans have separate Stripe Products for users and instances.""" try: @@ -183,30 +200,37 @@ def bootstrapStripePrices() -> None: hasAllPrices = mapping.stripePriceIdUsers and mapping.stripePriceIdInstances hasAllProducts = mapping.stripeProductIdUsers and mapping.stripeProductIdInstances if hasAllPrices and hasAllProducts: - changed = False - reconciledUsers = _reconcilePrice( - stripe, mapping.stripeProductIdUsers, mapping.stripePriceIdUsers, - plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", - ) - if reconciledUsers != mapping.stripePriceIdUsers: - changed = True + if _validateStripeIdsExist(stripe, mapping): + changed = False + reconciledUsers = _reconcilePrice( + stripe, mapping.stripeProductIdUsers, mapping.stripePriceIdUsers, + plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", + ) + if reconciledUsers != mapping.stripePriceIdUsers: + changed = True - reconciledInstances = _reconcilePrice( - stripe, mapping.stripeProductIdInstances, mapping.stripePriceIdInstances, - plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Feature-Instanz", - ) - if reconciledInstances != mapping.stripePriceIdInstances: - changed = True + reconciledInstances = _reconcilePrice( + stripe, mapping.stripeProductIdInstances, mapping.stripePriceIdInstances, + plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Feature-Instanz", + ) + if reconciledInstances != mapping.stripePriceIdInstances: + changed = True - if changed: - db.recordModify(StripePlanPrice, mapping.id, { - "stripePriceIdUsers": reconciledUsers, - "stripePriceIdInstances": reconciledInstances, - }) - logger.info("Reconciled Stripe prices for plan %s: users=%s, instances=%s", planKey, reconciledUsers, reconciledInstances) + if changed: + db.recordModify(StripePlanPrice, mapping.id, { + "stripePriceIdUsers": reconciledUsers, + "stripePriceIdInstances": reconciledInstances, + }) + logger.info("Reconciled Stripe prices for plan %s: users=%s, instances=%s", planKey, reconciledUsers, reconciledInstances) + else: + logger.debug("Stripe prices up-to-date for plan %s", planKey) + continue else: - logger.debug("Stripe prices up-to-date for plan %s", planKey) - continue + logger.warning( + "Stored Stripe IDs for plan %s reference unknown objects " + "(likely wrong Stripe account or copied DB) — re-provisioning.", + planKey, + ) productIdUsers = None productIdInstances = None From b53a7f363def5d36cc975561ac129daed555743a Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 00:47:28 +0200 Subject: [PATCH 19/33] fixes stripe --- app.py | 25 +++++++++++++------------ env_int.env | 2 +- modules/routes/routeSubscription.py | 21 +++++++++++++++++++-- 3 files changed, 33 insertions(+), 15 deletions(-) diff --git a/app.py b/app.py index 63e5652a..f29436cc 100644 --- a/app.py +++ b/app.py @@ -489,18 +489,6 @@ def getAllowedOrigins(): CORS_ORIGIN_REGEX = r"https://.*\.(poweron\.swiss|poweron-center\.net)" -# CORS configuration using environment variables -app.add_middleware( - CORSMiddleware, - allow_origins=getAllowedOrigins(), - allow_origin_regex=CORS_ORIGIN_REGEX, - allow_credentials=True, - allow_methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"], - allow_headers=["*"], - expose_headers=["*"], - max_age=86400, # Increased caching for preflight requests -) - # SlowAPI rate limiter initialization from modules.auth import limiter from slowapi.errors import RateLimitExceeded @@ -538,6 +526,19 @@ app.add_middleware( ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5 ) +# CORS must be registered LAST so it wraps the whole stack: every response (errors, CSRF 403, +# rate limits) still gets Access-Control-Allow-Origin for browser cross-origin calls. +app.add_middleware( + CORSMiddleware, + allow_origins=getAllowedOrigins(), + allow_origin_regex=CORS_ORIGIN_REGEX, + allow_credentials=True, + allow_methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"], + allow_headers=["*"], + expose_headers=["*"], + max_age=86400, +) + # Include all routers from modules.routes.routeAdmin import router as generalRouter diff --git a/env_int.env b/env_int.env index 61981de1..fc9c0efd 100644 --- a/env_int.env +++ b/env_int.env @@ -53,7 +53,7 @@ Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/conn # Stripe Billing (both end with _SECRET for encryption script) STRIPE_SECRET_KEY_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5ekdBaGNGVUlOQUpncTlzLWlTV0V5OWZzQkpDczhCUGw4U1JpTHZ0d3pfYlFNWElLRlNiNlNsaDRYTGZUTkg2OUFrTW1GZXpOUjBVbmRQWjN6ekhHd2ZSQ195OHlaeWh1TmxrUm10V2R3YmdncmFLbFMzVjdqcWJMSUJPR2xuSEozclNoZG1rZVBTaWg3OFQ1Qzdxb0wyQ2RKazc2dG1aZXBUTXlvbDZqLS1KOVI5M3BGc3NQZkZRbnFpRjIwWmh2ZHlVNlpxZVo2dWNmMjQ5eW02QmtzUT09 -STRIPE_WEBHOOK_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5VUszOWllM1E1YXlsdldIdENlUTd4bWhycVNBZVZzSWxlMjd4NEJwRnFVbnRaNTlGOWUyLVdxRUxySEtGRDdfbEVHM1dFTU93SHZtY1RKZkh0NG92M2cwYTQxQjQ0SFhqNXZnd21jbE52WW0wZC1oMlY3OXFFSV9sd2M1TC10N0hZa2Zha3FzX1FhcE14alo2TGFHX3QybHFxOTlQWWFZR3pabkRtOEp1Zm1zOFlrbDF0MFNkUjUyVFI5NUNZaU5TRXF4X29tcEQ2RUR1MTlXcUoxbTl0dz09 +STRIPE_WEBHOOK_SECRET = whsec_2agCQEbDPSOn2C40EJcwoPCqlvaPLF7M STRIPE_API_VERSION = 2026-01-28.clover # AI configuration diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 88d0b21c..3193292c 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -291,14 +291,31 @@ def verifyCheckout( logger.error("Failed to retrieve checkout session %s: %s", data.sessionId, e) raise HTTPException(status_code=400, detail="Invalid session ID") - if session.get("status") != "complete" or session.get("payment_status") != "paid": + payStatus = session.get("payment_status") + if session.get("status") != "complete": + return {"status": "pending", "message": "Checkout not yet completed"} + # Subscription checkouts with trial / $0 first period use no_payment_required, not paid. + if payStatus not in ("paid", "no_payment_required"): return {"status": "pending", "message": "Checkout not yet completed"} if session.get("mode") != "subscription": raise HTTPException(status_code=400, detail="Not a subscription checkout session") from modules.routes.routeBilling import _handleSubscriptionCheckoutCompleted - _handleSubscriptionCheckoutCompleted(session, f"verify-{data.sessionId}") + + try: + _handleSubscriptionCheckoutCompleted(session, f"verify-{data.sessionId}") + except Exception as e: + logger.exception( + "verifyCheckout: handler failed for session %s mandate %s: %s", + data.sessionId, + mandateId, + e, + ) + raise HTTPException( + status_code=500, + detail="Subscription-Aktivierung nach Checkout fehlgeschlagen. Bitte erneut versuchen oder Support informieren.", + ) from e return {"status": "activated", "message": "Subscription activated"} From 350c6994738480665eac0e3d6bfb510a059148f4 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 01:12:25 +0200 Subject: [PATCH 20/33] fexed stripe webhook --- modules/interfaces/interfaceDbBilling.py | 19 +++++++++++++ modules/routes/routeSubscription.py | 34 +++++++++++++++--------- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index 948f8918..d8c052c9 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -994,6 +994,25 @@ class BillingObjects: ) return created + def ensureActivationBudget(self, mandateId: str, planKey: str) -> Optional[Dict[str, Any]]: + """Idempotent: credit the activation budget only if no SUBSCRIPTION credit exists yet.""" + poolAccount = self.getMandateAccount(mandateId) + if not poolAccount: + return self.creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") + + existing = self.db.getRecordset( + BillingTransaction, + recordFilter={ + "accountId": poolAccount["id"], + "transactionType": TransactionTypeEnum.CREDIT.value, + "referenceType": ReferenceTypeEnum.SUBSCRIPTION.value, + }, + ) + if existing: + return None + + return self.creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung") + # ========================================================================= # Workflow Cost Query # ========================================================================= diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 3193292c..9f1f0bf8 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -273,10 +273,8 @@ def verifyCheckout( ): """Verify a Stripe Checkout Session and activate the subscription if paid. - This is the synchronous counterpart to the checkout.session.completed webhook. - It's called by the frontend immediately after returning from Stripe to handle - environments where webhooks may be delayed or unavailable (e.g. localhost dev). - The logic is idempotent — if the webhook already processed the session, this is a no-op. + Idempotent: if the webhook already processed the session, returns success. + Called by the frontend immediately after returning from Stripe. """ mandateId = _resolveMandateId(context) if not mandateId: @@ -294,7 +292,6 @@ def verifyCheckout( payStatus = session.get("payment_status") if session.get("status") != "complete": return {"status": "pending", "message": "Checkout not yet completed"} - # Subscription checkouts with trial / $0 first period use no_payment_required, not paid. if payStatus not in ("paid", "no_payment_required"): return {"status": "pending", "message": "Checkout not yet completed"} @@ -306,18 +303,31 @@ def verifyCheckout( try: _handleSubscriptionCheckoutCompleted(session, f"verify-{data.sessionId}") except Exception as e: - logger.exception( - "verifyCheckout: handler failed for session %s mandate %s: %s", + logger.warning( + "verifyCheckout: handler raised for session %s mandate %s: %s", data.sessionId, mandateId, e, ) - raise HTTPException( - status_code=500, - detail="Subscription-Aktivierung nach Checkout fehlgeschlagen. Bitte erneut versuchen oder Support informieren.", - ) from e - return {"status": "activated", "message": "Subscription activated"} + from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import ( + getService as getSubscriptionService, + ) + from modules.datamodels.datamodelSubscription import OPERATIVE_STATUSES + + subService = getSubscriptionService(context.user, mandateId) + operative = subService.getOperativeSubscription(mandateId) + if operative and operative.get("status") in [s.value for s in OPERATIVE_STATUSES]: + planKey = operative.get("planKey", "") + if planKey: + try: + from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot + _getBillingRoot().ensureActivationBudget(mandateId, planKey) + except Exception as ex: + logger.warning("verifyCheckout: ensureActivationBudget failed: %s", ex) + return {"status": "activated", "message": "Subscription activated"} + + return {"status": "pending", "message": "Subscription activation pending — webhook may still be processing."} # ============================================================================= From ef39d01e1679d4378ac461cf87d5807df4bf240b Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 01:51:08 +0200 Subject: [PATCH 21/33] fixed database issue subscriptions --- modules/auth/csrf.py | 15 ++++++++------ modules/interfaces/interfaceDbApp.py | 25 ++++++++++++---------- modules/routes/routeDataMandates.py | 31 +++++++++++++++++++++++++++- modules/routes/routeStore.py | 11 ++++++---- 4 files changed, 60 insertions(+), 22 deletions(-) diff --git a/modules/auth/csrf.py b/modules/auth/csrf.py index 7cc0c07c..bac4b0c3 100644 --- a/modules/auth/csrf.py +++ b/modules/auth/csrf.py @@ -88,12 +88,15 @@ class CSRFMiddleware(BaseHTTPMiddleware): content={"detail": "Invalid CSRF token format"} ) - # Additional CSRF validation could be added here: - # - Check token against session - # - Validate token expiration - # - Verify token origin - - return await call_next(request) + try: + return await call_next(request) + except Exception as exc: + logger.error("Unhandled exception in %s %s: %s", request.method, request.url.path, exc) + from fastapi.responses import JSONResponse + return JSONResponse( + status_code=500, + content={"detail": "Internal server error"}, + ) def _is_valid_csrf_token(self, token: str) -> bool: """ diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index d980eb56..2ac768fd 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1728,8 +1728,10 @@ class AppObjects: self.db.recordDelete(UserMandate, um.get("id")) logger.info(f"Cascade: deleted {len(memberships)} UserMandates for mandate {mandateId}") - # 3. Cancel Stripe subscriptions + delete MandateSubscription records - subs = self.db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) + # 3. Cancel Stripe subscriptions + delete MandateSubscription records (poweron_billing) + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + subInterface = _getSubRoot() + subs = subInterface.listForMandate(mandateId) for sub in subs: subId = sub.get("id") stripeSubId = sub.get("stripeSubscriptionId") @@ -1741,20 +1743,21 @@ class AppObjects: logger.info(f"Cancelled Stripe subscription {stripeSubId} for mandate {mandateId}") except Exception as e: logger.warning(f"Failed to cancel Stripe sub {stripeSubId}: {e}") - self.db.recordDelete(MandateSubscription, subId) + subInterface.db.recordDelete(MandateSubscription, subId) logger.info(f"Cascade: deleted {len(subs)} subscriptions for mandate {mandateId}") - # 3b. Delete Billing data - billingTxs = self.db.getRecordset(BillingTransaction, recordFilter={"mandateId": mandateId}) if hasattr(BillingTransaction, '__table_name__') else [] - billingAccounts = self.db.getRecordset(BillingAccount, recordFilter={"mandateId": mandateId}) + # 3b. Delete Billing data (poweron_billing) + from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot + billingDb = _getBillingRoot().db + billingAccounts = billingDb.getRecordset(BillingAccount, recordFilter={"mandateId": mandateId}) for acc in billingAccounts: - accTxs = self.db.getRecordset(BillingTransaction, recordFilter={"accountId": acc.get("id")}) + accTxs = billingDb.getRecordset(BillingTransaction, recordFilter={"accountId": acc.get("id")}) for tx in accTxs: - self.db.recordDelete(BillingTransaction, tx.get("id")) - self.db.recordDelete(BillingAccount, acc.get("id")) - billingSettings = self.db.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) + billingDb.recordDelete(BillingTransaction, tx.get("id")) + billingDb.recordDelete(BillingAccount, acc.get("id")) + billingSettings = billingDb.getRecordset(BillingSettings, recordFilter={"mandateId": mandateId}) for bs in billingSettings: - self.db.recordDelete(BillingSettings, bs.get("id")) + billingDb.recordDelete(BillingSettings, bs.get("id")) if billingAccounts or billingSettings: logger.info(f"Cascade: deleted billing data for mandate {mandateId}") diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py index 2c2bd31c..e98fd1cc 100644 --- a/modules/routes/routeDataMandates.py +++ b/modules/routes/routeDataMandates.py @@ -318,7 +318,36 @@ def create_mandate( logger.warning( f"Could not create default billing settings for mandate {newMandate.id}: {billingErr}" ) - + + try: + from modules.datamodels.datamodelSubscription import ( + MandateSubscription, SubscriptionStatusEnum, BUILTIN_PLANS, + ) + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from datetime import datetime, timezone, timedelta + + planKey = mandateData.get("planKey", "TRIAL_7D") + plan = BUILTIN_PLANS.get(planKey) + if plan: + now = datetime.now(timezone.utc) + targetStatus = SubscriptionStatusEnum.TRIALING if plan.trialDays else SubscriptionStatusEnum.ACTIVE + sub = MandateSubscription( + mandateId=str(newMandate.id), + planKey=planKey, + status=targetStatus, + recurring=plan.autoRenew and not plan.trialDays, + startedAt=now, + currentPeriodStart=now, + ) + if plan.trialDays: + sub.trialEndsAt = now + timedelta(days=plan.trialDays) + sub.currentPeriodEnd = now + timedelta(days=plan.trialDays) + subInterface = _getSubRoot() + subInterface.createSubscription(sub) + logger.info(f"Created {targetStatus.value} subscription ({planKey}) for mandate {newMandate.id}") + except Exception as subErr: + logger.error(f"Failed to create subscription for mandate {newMandate.id}: {subErr}") + logger.info(f"Mandate {newMandate.id} created by SysAdmin {currentUser.id}") return newMandate diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 0d6e68ec..4af0f6b7 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -187,9 +187,12 @@ def getSubscriptionInfo( "budgetAiCHF": None, } - from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS - subs = db.getRecordset(MandateSubscription, recordFilter={"mandateId": mandateId}) - if not subs: + from modules.datamodels.datamodelSubscription import BUILTIN_PLANS + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + + subInterface = _getSubRoot() + allSubs = subInterface.listForMandate(mandateId) + if not allSubs: return { "plan": None, "maxDataVolumeMB": None, @@ -197,7 +200,7 @@ def getSubscriptionInfo( "budgetAiCHF": None, } - sub = subs[0] + sub = allSubs[0] plan = BUILTIN_PLANS.get(sub.get("planKey")) currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) From b142c0fa6cf5027039b487f4e40c515cbcc4f905 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 02:05:16 +0200 Subject: [PATCH 22/33] NT-Problem: Unhandled exception: get MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Root Cause: Die Stripe-Python-Bibliothek auf INT hat keine .get() Methode auf Stripe-Objekten. Wenn session.get("payment_status") aufgerufen wird, sucht Python via __getattr__ nach einem Feld namens "get" → AttributeError("get"). Bestätigung: In routeBilling.py gab es bereits einen hasattr(session, "get")-Check (Zeile 998) — jemand kannte das Problem. Fix: Alle Stripe-Objekte werden sofort nach dem API-Call in dict() konvertiert --- modules/routes/routeBilling.py | 18 +++++++++++------- modules/routes/routeSubscription.py | 3 ++- .../mainServiceSubscription.py | 10 ++++++---- .../serviceSubscription/stripeBootstrap.py | 2 +- 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 0f612d45..d899ad2a 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -994,9 +994,10 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: from modules.security.rootAccess import getRootUser from datetime import datetime, timezone - metadata = {} - if hasattr(session, "get"): - metadata = session.get("metadata") or {} + if not isinstance(session, dict): + session = dict(session) + + metadata = session.get("metadata") or {} subscriptionRecordId = metadata.get("subscriptionRecordId") mandateId = metadata.get("mandateId") planKey = metadata.get("planKey", "") @@ -1009,7 +1010,7 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - subObj = stripe.Subscription.retrieve(stripeSub) + subObj = dict(stripe.Subscription.retrieve(stripeSub)) metadata = subObj.get("metadata") or {} subscriptionRecordId = metadata.get("subscriptionRecordId") mandateId = metadata.get("mandateId") @@ -1041,7 +1042,7 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - stripeSub = stripe.Subscription.retrieve(stripeSubId, expand=["items"]) + stripeSub = dict(stripe.Subscription.retrieve(stripeSubId, expand=["items"])) if stripeSub.get("current_period_start"): stripeData["currentPeriodStart"] = datetime.fromtimestamp( @@ -1054,8 +1055,11 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: from modules.serviceCenter.services.serviceSubscription.stripeBootstrap import getStripePricesForPlan priceMapping = getStripePricesForPlan(planKey) - for item in stripeSub.get("items", {}).get("data", []): - priceId = item.get("price", {}).get("id", "") + items = stripeSub.get("items") or {} + if not isinstance(items, dict): + items = dict(items) + for item in items.get("data", []): + priceId = (item.get("price") or {}).get("id", "") if priceMapping and priceId == priceMapping.stripePriceIdUsers: stripeData["stripeItemIdUsers"] = item["id"] elif priceMapping and priceId == priceMapping.stripePriceIdInstances: diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 9f1f0bf8..99bdb4e6 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -284,7 +284,8 @@ def verifyCheckout( try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - session = stripe.checkout.Session.retrieve(data.sessionId) + rawSession = stripe.checkout.Session.retrieve(data.sessionId) + session = dict(rawSession) except Exception as e: logger.error("Failed to retrieve checkout session %s: %s", data.sessionId, e) raise HTTPException(status_code=400, detail="Invalid session ID") diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py index 55bff123..5d2249a0 100644 --- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py +++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py @@ -425,7 +425,7 @@ class SubscriptionService: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - stripeSub = stripe.Subscription.modify(stripeSubId, cancel_at_period_end=True) + stripeSub = dict(stripe.Subscription.modify(stripeSubId, cancel_at_period_end=True)) pUrl = (stripeSub.get("metadata") or {}).get("platformUrl", "") except Exception as e: logger.error("Failed to set cancel_at_period_end for %s: %s", stripeSubId, e) @@ -488,7 +488,7 @@ class SubscriptionService: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - stripeSub = stripe.Subscription.retrieve(stripeSubId) + stripeSub = dict(stripe.Subscription.retrieve(stripeSubId)) pUrl = (stripeSub.get("metadata") or {}).get("platformUrl", "") stripe.Subscription.cancel(stripeSubId) except Exception as e: @@ -673,7 +673,8 @@ def _buildInvoiceSummaryHtml( stripe = getStripeClient() invoices = stripe.Invoice.list(subscription=stripeSubId, limit=1) if invoices.data: - hostedUrl = invoices.data[0].get("hosted_invoice_url", "") + inv = dict(invoices.data[0]) if not isinstance(invoices.data[0], dict) else invoices.data[0] + hostedUrl = inv.get("hosted_invoice_url", "") if hostedUrl: invoiceLink = ( f'

' @@ -714,7 +715,8 @@ def _buildCancelSummaryHtml(subRecord: Dict[str, Any], platformUrl: str = "") -> stripe = getStripeClient() invoices = stripe.Invoice.list(subscription=stripeSubId, limit=1) if invoices.data: - hostedUrl = invoices.data[0].get("hosted_invoice_url", "") + inv = dict(invoices.data[0]) if not isinstance(invoices.data[0], dict) else invoices.data[0] + hostedUrl = inv.get("hosted_invoice_url", "") if hostedUrl: parts.append( f'

' diff --git a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py index edb2df1f..1e44217b 100644 --- a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py +++ b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py @@ -108,7 +108,7 @@ def _findExistingStripePrice(stripe, productId: str, unitAmount: int, interval: def _getStripePriceAmount(stripe, priceId: str) -> Optional[int]: """Retrieve the unit_amount (in Rappen) of an existing Stripe Price.""" try: - price = stripe.Price.retrieve(priceId) + price = dict(stripe.Price.retrieve(priceId)) return price.get("unit_amount") if price else None except Exception: return None From bc370ef4754fe2f3cec471875d613c3e640f58b1 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 02:14:33 +0200 Subject: [PATCH 23/33] =?UTF-8?q?Alle=207=20Stellen=20im=20Code,=20die=20S?= =?UTF-8?q?tripe-Objekte=20in=20Dicts=20konvertieren,=20nutzen=20jetzt=20s?= =?UTF-8?q?tripeToDict().=20Das=20funktioniert=20unabh=C3=A4ngig=20von=20d?= =?UTF-8?q?er=20Stripe-Bibliotheksversion=20auf=20DEV=20und=20INT.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- modules/routes/routeBilling.py | 12 ++++++++---- modules/routes/routeSubscription.py | 4 ++-- .../mainServiceSubscription.py | 12 ++++++++---- .../serviceSubscription/stripeBootstrap.py | 3 ++- modules/shared/stripeClient.py | 17 ++++++++++++++++- 5 files changed, 36 insertions(+), 12 deletions(-) diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index d899ad2a..5029e485 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -875,7 +875,8 @@ def confirmCheckoutSession( if not session: raise HTTPException(status_code=404, detail="Stripe Checkout Session not found") - session_dict = session.to_dict_recursive() if hasattr(session, "to_dict_recursive") else dict(session) + from modules.shared.stripeClient import stripeToDict + session_dict = stripeToDict(session) metadata = session_dict.get("metadata") or {} mandate_id = metadata.get("mandateId") user_id = metadata.get("userId") or None @@ -995,7 +996,8 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: from datetime import datetime, timezone if not isinstance(session, dict): - session = dict(session) + from modules.shared.stripeClient import stripeToDict + session = stripeToDict(session) metadata = session.get("metadata") or {} subscriptionRecordId = metadata.get("subscriptionRecordId") @@ -1010,7 +1012,8 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - subObj = dict(stripe.Subscription.retrieve(stripeSub)) + from modules.shared.stripeClient import stripeToDict + subObj = stripeToDict(stripe.Subscription.retrieve(stripeSub)) metadata = subObj.get("metadata") or {} subscriptionRecordId = metadata.get("subscriptionRecordId") mandateId = metadata.get("mandateId") @@ -1042,7 +1045,8 @@ def _handleSubscriptionCheckoutCompleted(session, eventId: str) -> None: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - stripeSub = dict(stripe.Subscription.retrieve(stripeSubId, expand=["items"])) + from modules.shared.stripeClient import stripeToDict + stripeSub = stripeToDict(stripe.Subscription.retrieve(stripeSubId, expand=["items"])) if stripeSub.get("current_period_start"): stripeData["currentPeriodStart"] = datetime.fromtimestamp( diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 99bdb4e6..97a7f23b 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -282,10 +282,10 @@ def verifyCheckout( _assertMandateAdmin(context, mandateId) try: - from modules.shared.stripeClient import getStripeClient + from modules.shared.stripeClient import getStripeClient, stripeToDict stripe = getStripeClient() rawSession = stripe.checkout.Session.retrieve(data.sessionId) - session = dict(rawSession) + session = stripeToDict(rawSession) except Exception as e: logger.error("Failed to retrieve checkout session %s: %s", data.sessionId, e) raise HTTPException(status_code=400, detail="Invalid session ID") diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py index 5d2249a0..9535a2da 100644 --- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py +++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py @@ -425,7 +425,8 @@ class SubscriptionService: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - stripeSub = dict(stripe.Subscription.modify(stripeSubId, cancel_at_period_end=True)) + from modules.shared.stripeClient import stripeToDict + stripeSub = stripeToDict(stripe.Subscription.modify(stripeSubId, cancel_at_period_end=True)) pUrl = (stripeSub.get("metadata") or {}).get("platformUrl", "") except Exception as e: logger.error("Failed to set cancel_at_period_end for %s: %s", stripeSubId, e) @@ -488,7 +489,8 @@ class SubscriptionService: try: from modules.shared.stripeClient import getStripeClient stripe = getStripeClient() - stripeSub = dict(stripe.Subscription.retrieve(stripeSubId)) + from modules.shared.stripeClient import stripeToDict + stripeSub = stripeToDict(stripe.Subscription.retrieve(stripeSubId)) pUrl = (stripeSub.get("metadata") or {}).get("platformUrl", "") stripe.Subscription.cancel(stripeSubId) except Exception as e: @@ -673,7 +675,8 @@ def _buildInvoiceSummaryHtml( stripe = getStripeClient() invoices = stripe.Invoice.list(subscription=stripeSubId, limit=1) if invoices.data: - inv = dict(invoices.data[0]) if not isinstance(invoices.data[0], dict) else invoices.data[0] + from modules.shared.stripeClient import stripeToDict + inv = stripeToDict(invoices.data[0]) hostedUrl = inv.get("hosted_invoice_url", "") if hostedUrl: invoiceLink = ( @@ -715,7 +718,8 @@ def _buildCancelSummaryHtml(subRecord: Dict[str, Any], platformUrl: str = "") -> stripe = getStripeClient() invoices = stripe.Invoice.list(subscription=stripeSubId, limit=1) if invoices.data: - inv = dict(invoices.data[0]) if not isinstance(invoices.data[0], dict) else invoices.data[0] + from modules.shared.stripeClient import stripeToDict + inv = stripeToDict(invoices.data[0]) hostedUrl = inv.get("hosted_invoice_url", "") if hostedUrl: parts.append( diff --git a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py index 1e44217b..14e9424a 100644 --- a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py +++ b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py @@ -108,7 +108,8 @@ def _findExistingStripePrice(stripe, productId: str, unitAmount: int, interval: def _getStripePriceAmount(stripe, priceId: str) -> Optional[int]: """Retrieve the unit_amount (in Rappen) of an existing Stripe Price.""" try: - price = dict(stripe.Price.retrieve(priceId)) + from modules.shared.stripeClient import stripeToDict + price = stripeToDict(stripe.Price.retrieve(priceId)) return price.get("unit_amount") if price else None except Exception: return None diff --git a/modules/shared/stripeClient.py b/modules/shared/stripeClient.py index 9c7b4c67..3f7dd3a7 100644 --- a/modules/shared/stripeClient.py +++ b/modules/shared/stripeClient.py @@ -8,13 +8,28 @@ API key, API version, and fallback handling across billing and subscription flow """ import logging -from typing import Optional +import json +from typing import Any, Dict, Optional logger = logging.getLogger(__name__) _stripeInitialized = False +def stripeToDict(obj) -> Dict[str, Any]: + """Convert a Stripe object to a plain dict, compatible with all stripe-python versions.""" + if isinstance(obj, dict): + return obj + if hasattr(obj, "to_dict_recursive"): + return obj.to_dict_recursive() + if hasattr(obj, "to_dict"): + return obj.to_dict() + try: + return json.loads(str(obj)) + except (json.JSONDecodeError, TypeError): + return dict(obj) + + def getStripeClient(): """ Initialize and return the configured Stripe SDK module. From 695c652a56b683bf7f04c385a2e937d273ec7f25 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 13:31:25 +0200 Subject: [PATCH 24/33] mandate admin fixes --- modules/interfaces/interfaceDbApp.py | 83 +++++++++++++++++++++--- modules/interfaces/interfaceDbBilling.py | 2 +- modules/routes/routeAdminFeatures.py | 2 + modules/routes/routeDataMandates.py | 40 +++++++----- modules/routes/routeInvitations.py | 9 ++- modules/routes/routeStore.py | 62 +++++++++++++++--- modules/routes/routeSystem.py | 2 + 7 files changed, 162 insertions(+), 38 deletions(-) diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 2ac768fd..27ec5fcf 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1074,19 +1074,66 @@ class AppObjects: return False def _deleteUserReferencedData(self, userId: str) -> None: - """Deletes all data associated with a user.""" + """Deletes all data associated with a user (full cascade).""" try: - # Delete user auth events + from modules.datamodels.datamodelRbac import FeatureAccessRole, UserMandateRole + from modules.datamodels.datamodelNotification import UserNotification + from modules.datamodels.datamodelInvitation import Invitation + + # 1. FeatureAccess + FeatureAccessRole + accesses = self.db.getRecordset(FeatureAccess, recordFilter={"userId": userId}) + for acc in accesses: + accId = acc.get("id") + if not accId: + continue + roles = self.db.getRecordset(FeatureAccessRole, recordFilter={"featureAccessId": accId}) + for role in roles: + self.db.recordDelete(FeatureAccessRole, role.get("id")) + self.db.recordDelete(FeatureAccess, accId) + if accesses: + logger.info(f"User cascade: deleted {len(accesses)} FeatureAccess records for user {userId}") + + # 2. UserMandate + UserMandateRole + memberships = self.db.getRecordset(UserMandate, recordFilter={"userId": userId}) + for um in memberships: + umId = um.get("id") + if not umId: + continue + umRoles = self.db.getRecordset(UserMandateRole, recordFilter={"userMandateId": umId}) + for umr in umRoles: + self.db.recordDelete(UserMandateRole, umr.get("id")) + self.db.recordDelete(UserMandate, umId) + if memberships: + logger.info(f"User cascade: deleted {len(memberships)} UserMandate records for user {userId}") + + # 3. UserNotifications + notifications = self.db.getRecordset(UserNotification, recordFilter={"userId": userId}) + for notif in notifications: + self.db.recordDelete(UserNotification, notif.get("id")) + if notifications: + logger.info(f"User cascade: deleted {len(notifications)} notifications for user {userId}") + + # 4. Invitations (by email) + user = self.getUser(userId) + userEmail = getattr(user, "email", None) if user else None + if userEmail: + invitations = self.db.getRecordset(Invitation, recordFilter={"email": userEmail}) + for inv in invitations: + self.db.recordDelete(Invitation, inv.get("id")) + if invitations: + logger.info(f"User cascade: deleted {len(invitations)} invitations for {userEmail}") + + # 5. AuthEvents events = self.db.getRecordset(AuthEvent, recordFilter={"userId": userId}) for event in events: self.db.recordDelete(AuthEvent, event["id"]) - # Delete user tokens + # 6. Tokens tokens = self.db.getRecordset(Token, recordFilter={"userId": userId}) for token in tokens: self.db.recordDelete(Token, token["id"]) - # Delete user connections + # 7. UserConnections connections = self.db.getRecordset( UserConnection, recordFilter={"userId": userId} ) @@ -1448,14 +1495,23 @@ class AppObjects: self.createUserMandate(userId, mandateId, roleIds=[adminRoleId], skipCapacityCheck=True) + from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from datetime import datetime, timezone, timedelta + + now = datetime.now(timezone.utc) + targetStatus = SubscriptionStatusEnum.TRIALING if plan.trialDays else SubscriptionStatusEnum.ACTIVE subscription = MandateSubscription( mandateId=mandateId, planKey=planKey, - status=SubscriptionStatusEnum.PENDING, + status=targetStatus, + startedAt=now.isoformat(), + currentPeriodStart=now.isoformat(), ) if plan.trialDays: - pass # trialEndsAt set on ACTIVE/TRIALING transition - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + trialEnd = now + timedelta(days=plan.trialDays) + subscription.trialEndsAt = trialEnd.isoformat() + subscription.currentPeriodEnd = trialEnd.isoformat() + subInterface = _getSubRoot() subInterface.createSubscription(subscription) @@ -1584,8 +1640,9 @@ class AppObjects: if not mandate: raise ValueError(f"Mandate {mandateId} not found") - # Strip immutable/protected fields from update data - _protectedFields = {"id", "isSystem"} + _protectedFields = {"id"} + if not getattr(self.currentUser, "isSysAdmin", False): + _protectedFields.add("isSystem") _sanitizedData = {k: v for k, v in updateData.items() if k not in _protectedFields} # Update mandate data using model @@ -1761,6 +1818,14 @@ class AppObjects: if billingAccounts or billingSettings: logger.info(f"Cascade: deleted billing data for mandate {mandateId}") + # 3c. Delete Invitations for this mandate + from modules.datamodels.datamodelInvitation import Invitation + invitations = self.db.getRecordset(Invitation, recordFilter={"mandateId": mandateId}) + for inv in invitations: + self.db.recordDelete(Invitation, inv.get("id")) + if invitations: + logger.info(f"Cascade: deleted {len(invitations)} Invitations for mandate {mandateId}") + # 4. Delete mandate-level Roles from modules.datamodels.datamodelRbac import Role, AccessRule roles = self.db.getRecordset(Role, recordFilter={"mandateId": mandateId}) diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index d8c052c9..1ea1786a 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -1145,7 +1145,7 @@ class BillingObjects: continue mandate = rootInterface.getMandate(mandateId) - if not mandate: + if not mandate or not getattr(mandate, "enabled", True): continue mandateName = getattr(mandate, 'label', None) or getattr(mandate, 'name', None) or (mandate.get("label") or mandate.get("name", "") if isinstance(mandate, dict) else "") diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py index e69df7b9..9d05daf6 100644 --- a/modules/routes/routeAdminFeatures.py +++ b/modules/routes/routeAdminFeatures.py @@ -159,6 +159,8 @@ def get_my_feature_instances( mandateId = str(instance.mandateId) if mandateId not in mandatesMap: mandate = rootInterface.getMandate(mandateId) + if mandate and not getattr(mandate, "enabled", True): + continue if mandate: mandatesMap[mandateId] = { "id": mandateId, diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py index e98fd1cc..1615a03a 100644 --- a/modules/routes/routeDataMandates.py +++ b/modules/routes/routeDataMandates.py @@ -125,11 +125,11 @@ def get_mandates( # SysAdmin: all mandates result = appInterface.getAllMandates(pagination=paginationParams) else: - # MandateAdmin: only their mandates + # MandateAdmin: only their enabled mandates allMandates = [] for mandateId in adminMandateIds: mandate = appInterface.getMandate(mandateId) - if mandate: + if mandate and getattr(mandate, "enabled", True): mandateDict = mandate if isinstance(mandate, dict) else mandate.model_dump() if hasattr(mandate, 'model_dump') else vars(mandate) allMandates.append(mandateDict) result = allMandates @@ -411,41 +411,47 @@ def update_mandate( def delete_mandate( request: Request, mandateId: str = Path(..., description="ID of the mandate to delete"), + force: bool = Query(False, description="Hard-delete with full cascade (irreversible)"), currentUser: User = Depends(requireSysAdminRole) ) -> Dict[str, Any]: """ Delete a mandate. + Default: soft-delete (sets enabled=False, 30-day retention). + With ?force=true: hard-delete with full cascade (irreversible). + Requires X-Confirm-Name header matching the mandate name for hard-delete. MULTI-TENANT: SysAdmin-only. """ try: appInterface = interfaceDbApp.getRootInterface() - - # Check if mandate exists + existingMandate = appInterface.getMandate(mandateId) if not existingMandate: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"Mandate {mandateId} not found" ) - - # MULTI-TENANT: Delete all UserMandate entries for this mandate first - userMandates = appInterface.getUserMandatesByMandate(mandateId) - for um in userMandates: - appInterface.deleteUserMandate(str(um.userId), mandateId) - logger.info(f"Deleted {len(userMandates)} UserMandate entries for mandate {mandateId}") - - # Delete mandate + + if force: + confirmName = request.headers.get("X-Confirm-Name", "") + mandateName = getattr(existingMandate, "name", "") or "" + if confirmName != mandateName: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Hard-delete requires X-Confirm-Name header matching the mandate name" + ) + try: - appInterface.deleteMandate(mandateId) + appInterface.deleteMandate(mandateId, force=force) except ValueError as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=str(e) ) - - logger.info(f"Mandate {mandateId} deleted by SysAdmin {currentUser.id}") - - return {"message": f"Mandate {mandateId} deleted successfully"} + + mode = "hard-deleted" if force else "soft-deleted" + logger.info(f"Mandate {mandateId} {mode} by SysAdmin {currentUser.id}") + + return {"message": f"Mandate {mandateId} {mode} successfully"} except HTTPException: raise except Exception as e: diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py index ccefcc87..8e3be0ba 100644 --- a/modules/routes/routeInvitations.py +++ b/modules/routes/routeInvitations.py @@ -678,8 +678,15 @@ def validate_invitation( roleLabels = [] targetUsername = invitation.targetUsername - # Get mandate name mandate = rootInterface.getMandate(str(mandateId)) if mandateId else None + if mandate and not getattr(mandate, "enabled", True): + return InvitationValidation( + valid=False, + reason="Mandate is disabled", + mandateId=None, + featureInstanceId=None, + roleIds=[] + ) if mandate: mandateName = mandate.label or mandate.name diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py index 4af0f6b7..ab50087c 100644 --- a/modules/routes/routeStore.py +++ b/modules/routes/routeStore.py @@ -87,6 +87,35 @@ def _isUserAdminInMandate(db, userId: str, mandateId: str) -> bool: return False +def _autoActivatePending(subInterface, pendingSub: Dict[str, Any]) -> None: + """Auto-activate a PENDING subscription to its target operative status.""" + from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, BUILTIN_PLANS + from datetime import datetime, timezone, timedelta + + subId = pendingSub.get("id") + planKey = pendingSub.get("planKey", "") + plan = BUILTIN_PLANS.get(planKey) + now = datetime.now(timezone.utc) + targetStatus = SubscriptionStatusEnum.TRIALING if plan and plan.trialDays else SubscriptionStatusEnum.ACTIVE + + additionalData = {"currentPeriodStart": now.isoformat()} + if plan and plan.trialDays: + trialEnd = now + timedelta(days=plan.trialDays) + additionalData["trialEndsAt"] = trialEnd.isoformat() + additionalData["currentPeriodEnd"] = trialEnd.isoformat() + + try: + subInterface.transitionStatus( + subId, + expectedFromStatus=SubscriptionStatusEnum.PENDING, + toStatus=targetStatus, + additionalData=additionalData, + ) + logger.info("Auto-activated PENDING subscription %s -> %s for mandate", subId, targetStatus.value) + except Exception as e: + logger.warning("Failed to auto-activate PENDING subscription %s: %s", subId, e) + + def _getUserAdminMandateIds(db, userId: str) -> List[str]: """Get all mandate IDs where user is admin.""" userMandates = db.getRecordset(UserMandate, recordFilter={"userId": userId, "enabled": True}) @@ -150,6 +179,8 @@ def listUserMandates( records = db.getRecordset(Mandate, recordFilter={"id": mid}) if records: m = records[0] + if not m.get("enabled", True): + continue result.append({ "id": mid, "name": m.get("name", ""), @@ -200,13 +231,15 @@ def getSubscriptionInfo( "budgetAiCHF": None, } - sub = allSubs[0] + operative = subInterface.getOperativeForMandate(mandateId) + sub = operative or allSubs[0] plan = BUILTIN_PLANS.get(sub.get("planKey")) currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) return { "plan": sub.get("planKey"), "status": sub.get("status"), + "operative": operative is not None, "maxDataVolumeMB": plan.maxDataVolumeMB if plan else None, "maxFeatureInstances": plan.maxFeatureInstances if plan else None, "budgetAiCHF": plan.budgetAiCHF if plan else None, @@ -241,7 +274,7 @@ def listStoreFeatures( for um in userMandates: mid = um.get("mandateId") mRecord = db.getRecordset(Mandate, recordFilter={"id": mid}) - if mRecord and not mRecord[0].get("isSystem"): + if mRecord and not mRecord[0].get("isSystem") and mRecord[0].get("enabled", True): userMandateIds.append(mid) storeFeatures = _getStoreFeatures(catalogService) @@ -302,7 +335,22 @@ def activateStoreFeature( subInterface = _getSubRoot() operative = subInterface.getOperativeForMandate(mandateId) + if not operative: + allSubs = subInterface.listForMandate(mandateId) + pendingSubs = [s for s in allSubs if s.get("status") == SubscriptionStatusEnum.PENDING.value] + if pendingSubs: + _autoActivatePending(subInterface, pendingSubs[0]) + operative = subInterface.getOperativeForMandate(mandateId) + + if not operative: + allSubs = subInterface.listForMandate(mandateId) + statuses = [s.get("status") for s in allSubs] if allSubs else [] + logger.warning( + "Store activate 402: no operative subscription for mandate %s. " + "Found %d subscription(s) with statuses: %s", + mandateId, len(allSubs), statuses, + ) raise HTTPException( status_code=status.HTTP_402_PAYMENT_REQUIRED, detail="Kein aktives Abonnement. Bitte zuerst ein Abo abschliessen.", @@ -310,14 +358,8 @@ def activateStoreFeature( planKey = operative.get("planKey", "") plan = BUILTIN_PLANS.get(planKey) - isBillable = plan is not None and (plan.pricePerFeatureInstanceCHF or 0) > 0 - - if isBillable: - if not operative.get("stripeSubscriptionId") or not operative.get("stripeItemIdInstances"): - raise HTTPException( - status_code=status.HTTP_402_PAYMENT_REQUIRED, - detail="Stripe-Abonnement ist nicht vollständig eingerichtet — Aktivierung nicht möglich.", - ) + hasStripeIds = bool(operative.get("stripeSubscriptionId") and operative.get("stripeItemIdInstances")) + isBillable = hasStripeIds and plan is not None and (plan.pricePerFeatureInstanceCHF or 0) > 0 # ── 2. Capacity check ─────────────────────────────────────────── if plan and plan.maxFeatureInstances is not None: diff --git a/modules/routes/routeSystem.py b/modules/routes/routeSystem.py index 5a08202c..f287d908 100644 --- a/modules/routes/routeSystem.py +++ b/modules/routes/routeSystem.py @@ -168,6 +168,8 @@ def _buildDynamicBlock( mandateId = str(instance.mandateId) if mandateId not in mandatesMap: mandate = rootInterface.getMandate(mandateId) + if not mandate or not getattr(mandate, "enabled", True): + continue mandateName = (mandate.label or mandate.name) if mandate else mandateId mandatesMap[mandateId] = { "id": mandateId, From c6e7438dfa5758cda193b56973f35e33ee1c6992 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 20:56:35 +0200 Subject: [PATCH 25/33] teams bot adapt vars --- modules/features/teamsbot/service.py | 45 ++++++++++------------------ 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py index 773cc1c9..9e59f653 100644 --- a/modules/features/teamsbot/service.py +++ b/modules/features/teamsbot/service.py @@ -17,6 +17,8 @@ from fastapi import WebSocket from modules.datamodels.datamodelUam import User from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum from modules.shared.timeUtils import getUtcTimestamp, getIsoTimestamp +from modules.serviceCenter import getService as _getServiceCenterService +from modules.serviceCenter.context import ServiceCenterContext from .datamodelTeamsbot import ( TeamsbotSessionStatus, @@ -35,18 +37,18 @@ logger = logging.getLogger(__name__) # ========================================================================= -# Minimal Service Context (for AI billing in bridge callbacks) +# AI Service Factory (for billing-aware AI calls) # ========================================================================= -class _ServiceContext: - """Minimal context providing user/mandate info for AiService billing. - Used by bridge callbacks where a full Services instance is not available.""" - - def __init__(self, user, mandateId, featureInstanceId=None): - self.user = user - self.mandateId = mandateId - self.featureInstanceId = featureInstanceId - self.featureCode = "teamsbot" +def _createAiService(user, mandateId, featureInstanceId=None): + """Create a properly wired AiService via the service center.""" + ctx = ServiceCenterContext( + user=user, + mandate_id=mandateId, + feature_instance_id=featureInstanceId, + feature_code="teamsbot", + ) + return _getServiceCenterService("ai", ctx) # ========================================================================= @@ -1062,11 +1064,7 @@ class TeamsbotService: # Call SPEECH_TEAMS try: - from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService - - # Create minimal service context for AI billing - serviceContext = _ServiceContext(self.currentUser, self.mandateId, self.instanceId) - aiService = AiService(serviceCenter=serviceContext) + aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( @@ -1684,11 +1682,7 @@ class TeamsbotService: """Summarize a long user-provided session context to its essential points. This reduces token usage in every subsequent AI call.""" try: - from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService - from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum - - serviceContext = _ServiceContext(self.currentUser, self.mandateId, self.instanceId) - aiService = AiService(serviceCenter=serviceContext) + aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( @@ -1738,11 +1732,7 @@ class TeamsbotService: lines.append(f"[{speaker}]: {text}") textToSummarize = "\n".join(lines) - from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService - from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum - - serviceContext = _ServiceContext(self.currentUser, self.mandateId, self.instanceId) - aiService = AiService(serviceCenter=serviceContext) + aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( @@ -1783,10 +1773,7 @@ class TeamsbotService: for t in transcripts ) - from modules.serviceCenter.services.serviceAi.mainServiceAi import AiService - - serviceContext = _ServiceContext(self.currentUser, self.mandateId, self.instanceId) - aiService = AiService(serviceCenter=serviceContext) + aiService = _createAiService(self.currentUser, self.mandateId, self.instanceId) await aiService.ensureAiObjectsInitialized() request = AiCallRequest( From 413dcd9b6c28967f06e9d4ed4356a96af0b52fd2 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 21:59:26 +0200 Subject: [PATCH 26/33] fix hard delete cascade: wrong import for FeatureAccessRole Made-with: Cursor --- modules/interfaces/interfaceDbApp.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 27ec5fcf..01863b41 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1076,7 +1076,6 @@ class AppObjects: def _deleteUserReferencedData(self, userId: str) -> None: """Deletes all data associated with a user (full cascade).""" try: - from modules.datamodels.datamodelRbac import FeatureAccessRole, UserMandateRole from modules.datamodels.datamodelNotification import UserNotification from modules.datamodels.datamodelInvitation import Invitation @@ -1699,7 +1698,6 @@ class AppObjects: from modules.datamodels.datamodelKnowledge import FileContentIndex, ContentChunk from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource from modules.datamodels.datamodelBilling import BillingSettings, BillingAccount, BillingTransaction - from modules.datamodels.datamodelRbac import FeatureAccessRole, UserMandateRole from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutralizerAttributes instances = self.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId}) From 5a40b54524c3ea4d1b9725a16ea8165037b8ce75 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 31 Mar 2026 23:40:59 +0200 Subject: [PATCH 27/33] fixed data source --- .../datamodels/datamodelFeatureDataSource.py | 6 +- modules/features/commcoach/mainCommcoach.py | 21 ++- modules/features/teamsbot/mainTeamsbot.py | 21 ++- modules/features/trustee/mainTrustee.py | 24 ++- .../workspace/routeFeatureWorkspace.py | 149 +++++++++++++++++- .../services/serviceAgent/featureDataAgent.py | 15 +- .../serviceAgent/featureDataProvider.py | 70 ++++++-- .../services/serviceAgent/mainServiceAgent.py | 6 + 8 files changed, 282 insertions(+), 30 deletions(-) diff --git a/modules/datamodels/datamodelFeatureDataSource.py b/modules/datamodels/datamodelFeatureDataSource.py index 80ceb03c..02de0a67 100644 --- a/modules/datamodels/datamodelFeatureDataSource.py +++ b/modules/datamodels/datamodelFeatureDataSource.py @@ -6,7 +6,7 @@ A FeatureDataSource links a FeatureInstance table (DATA_OBJECT) to a workspace so the agent can query structured feature data (e.g. TrusteePosition rows). """ -from typing import Optional +from typing import Dict, Optional from pydantic import BaseModel, Field from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels @@ -39,6 +39,10 @@ class FeatureDataSource(PowerOnModel): description="Whether this data source should be neutralized before AI processing", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} ) + recordFilter: Optional[Dict[str, str]] = Field( + default=None, + description="Record-level filter applied when querying this table, e.g. {'sessionId': 'abc-123'}", + ) registerModelLabels( diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py index 9d949e13..d21da056 100644 --- a/modules/features/commcoach/mainCommcoach.py +++ b/modules/features/commcoach/mainCommcoach.py @@ -36,12 +36,22 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.commcoach.CoachingContext", "label": {"en": "Coaching Context", "de": "Coaching-Kontext", "fr": "Contexte coaching"}, - "meta": {"table": "CoachingContext", "fields": ["id", "title", "category", "status"]} + "meta": { + "table": "CoachingContext", + "fields": ["id", "title", "category", "status"], + "isParent": True, + "displayFields": ["title", "category", "status"], + } }, { "objectKey": "data.feature.commcoach.CoachingSession", "label": {"en": "Coaching Session", "de": "Coaching-Session", "fr": "Session coaching"}, - "meta": {"table": "CoachingSession", "fields": ["id", "contextId", "status", "summary"]} + "meta": { + "table": "CoachingSession", + "fields": ["id", "contextId", "status", "summary"], + "parentTable": "CoachingContext", + "parentKey": "contextId", + } }, { "objectKey": "data.feature.commcoach.CoachingMessage", @@ -51,7 +61,12 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.commcoach.CoachingTask", "label": {"en": "Coaching Task", "de": "Coaching-Aufgabe", "fr": "Tache coaching"}, - "meta": {"table": "CoachingTask", "fields": ["id", "contextId", "title", "status"]} + "meta": { + "table": "CoachingTask", + "fields": ["id", "contextId", "title", "status"], + "parentTable": "CoachingContext", + "parentKey": "contextId", + } }, { "objectKey": "data.feature.commcoach.CoachingScore", diff --git a/modules/features/teamsbot/mainTeamsbot.py b/modules/features/teamsbot/mainTeamsbot.py index afdce822..ea6d3b01 100644 --- a/modules/features/teamsbot/mainTeamsbot.py +++ b/modules/features/teamsbot/mainTeamsbot.py @@ -39,17 +39,32 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.teamsbot.TeamsbotSession", "label": {"en": "Session", "de": "Sitzung", "fr": "Session"}, - "meta": {"table": "TeamsbotSession", "fields": ["id", "meetingLink", "botName", "status", "startedAt", "endedAt"]} + "meta": { + "table": "TeamsbotSession", + "fields": ["id", "meetingLink", "botName", "status", "startedAt", "endedAt"], + "isParent": True, + "displayFields": ["botName", "status", "startedAt"], + } }, { "objectKey": "data.feature.teamsbot.TeamsbotTranscript", "label": {"en": "Transcript", "de": "Transkript", "fr": "Transcription"}, - "meta": {"table": "TeamsbotTranscript", "fields": ["id", "sessionId", "speaker", "text", "timestamp"]} + "meta": { + "table": "TeamsbotTranscript", + "fields": ["id", "sessionId", "speaker", "text", "timestamp"], + "parentTable": "TeamsbotSession", + "parentKey": "sessionId", + } }, { "objectKey": "data.feature.teamsbot.TeamsbotBotResponse", "label": {"en": "Bot Response", "de": "Bot-Antwort", "fr": "Réponse du bot"}, - "meta": {"table": "TeamsbotBotResponse", "fields": ["id", "sessionId", "responseText", "detectedIntent"]} + "meta": { + "table": "TeamsbotBotResponse", + "fields": ["id", "sessionId", "responseText", "detectedIntent"], + "parentTable": "TeamsbotSession", + "parentKey": "sessionId", + } }, { "objectKey": "data.feature.teamsbot.*", diff --git a/modules/features/trustee/mainTrustee.py b/modules/features/trustee/mainTrustee.py index 45824b1b..2fd82bc5 100644 --- a/modules/features/trustee/mainTrustee.py +++ b/modules/features/trustee/mainTrustee.py @@ -58,10 +58,25 @@ UI_OBJECTS = [ # DATA Objects for RBAC catalog (tables/entities) # Used for AccessRules on data-level permissions DATA_OBJECTS = [ + { + "objectKey": "data.feature.trustee.TrusteeOrganisation", + "label": {"en": "Organisation", "de": "Organisation", "fr": "Organisation"}, + "meta": { + "table": "TrusteeOrganisation", + "fields": ["id", "label", "enabled"], + "isParent": True, + "displayFields": ["label"], + } + }, { "objectKey": "data.feature.trustee.TrusteePosition", "label": {"en": "Position", "de": "Position", "fr": "Position"}, - "meta": {"table": "TrusteePosition", "fields": ["id", "label", "description", "organisationId"]} + "meta": { + "table": "TrusteePosition", + "fields": ["id", "label", "description", "organisationId"], + "parentTable": "TrusteeOrganisation", + "parentKey": "organisationId", + } }, { "objectKey": "data.feature.trustee.TrusteeDocument", @@ -71,7 +86,12 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.trustee.TrusteeAccountingConfig", "label": {"en": "Accounting Config", "de": "Buchhaltungs-Konfiguration", "fr": "Config. comptable"}, - "meta": {"table": "TrusteeAccountingConfig", "fields": ["id", "connectorType", "displayLabel", "encryptedConfig", "isActive"]} + "meta": { + "table": "TrusteeAccountingConfig", + "fields": ["id", "connectorType", "displayLabel", "encryptedConfig", "isActive"], + "parentTable": "TrusteeOrganisation", + "parentKey": "organisationId", + } }, { "objectKey": "data.feature.trustee.TrusteeAccountingSync", diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 7feef4db..ae0154dc 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -270,12 +270,19 @@ def _buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str: tableFields = obj.get("meta", {}).get("fields", []) break + recordFilter = fds.get("recordFilter") + filterLine = "" + if recordFilter and isinstance(recordFilter, dict): + filterParts = [f"{k} = {v}" for k, v in recordFilter.items()] + filterLine = f"\n recordFilter: {', '.join(filterParts)} (data is scoped to this record)" + parts.append( f"- featureInstanceId: {fiId}\n" f" feature: {featureCode}\n" f" instance: \"{instanceLabel}\"\n" f" table: {tableName} ({label})\n" f" fields: {', '.join(tableFields) if tableFields else 'all'}" + f"{filterLine}" ) except Exception as e: logger.warning(f"Error loading FeatureDataSource {fdsId}: {e}") @@ -1336,8 +1343,8 @@ async def listFeatureConnections( instanceId: str = Path(...), context: RequestContext = Depends(getRequestContext), ): - """List feature instances the user has access to across ALL mandates.""" - _validateInstanceAccess(instanceId, context) + """List feature instances the user has access to, scoped to the workspace mandate.""" + wsMandateId, _ = _validateInstanceAccess(instanceId, context) from modules.interfaces.interfaceDbApp import getRootInterface from modules.security.rbacCatalog import getCatalogService from modules.datamodels.datamodelUam import Mandate @@ -1352,8 +1359,14 @@ async def listFeatureConnections( if not userMandates: return JSONResponse({"featureConnectionsByMandate": []}) + allowedMandateIds = {um.mandateId for um in userMandates} + if wsMandateId and wsMandateId in allowedMandateIds: + allowedMandateIds = {wsMandateId} + mandateLabels: dict = {} for um in userMandates: + if um.mandateId not in allowedMandateIds: + continue try: rows = rootIf.db.getRecordset(Mandate, recordFilter={"id": um.mandateId}) if rows: @@ -1365,6 +1378,8 @@ async def listFeatureConnections( byMandate: dict = {} seenIds: set = set() for um in userMandates: + if um.mandateId not in allowedMandateIds: + continue allInstances = rootIf.getFeatureInstancesByMandate(um.mandateId) for inst in allInstances: if inst.id in seenIds: @@ -1418,7 +1433,7 @@ async def listFeatureConnectionTables( context: RequestContext = Depends(getRequestContext), ): """List data tables (DATA_OBJECTS) for a feature instance, filtered by RBAC.""" - _validateInstanceAccess(instanceId, context) + wsMandateId, _ = _validateInstanceAccess(instanceId, context) from modules.interfaces.interfaceDbApp import getRootInterface from modules.security.rbacCatalog import getCatalogService @@ -1428,6 +1443,8 @@ async def listFeatureConnectionTables( raise HTTPException(status_code=404, detail="Feature instance not found") mandateId = str(inst.mandateId) if inst.mandateId else None + if wsMandateId and mandateId and mandateId != wsMandateId: + raise HTTPException(status_code=403, detail="Feature instance does not belong to workspace mandate") catalog = getCatalogService() try: @@ -1448,16 +1465,132 @@ async def listFeatureConnectionTables( tables = [] for obj in accessible: meta = obj.get("meta", {}) - tables.append({ + node = { "objectKey": obj.get("objectKey", ""), "tableName": meta.get("table", ""), "label": obj.get("label", {}), "fields": meta.get("fields", []), - }) + } + if meta.get("isParent"): + node["isParent"] = True + node["displayFields"] = meta.get("displayFields", []) + if meta.get("parentTable"): + node["parentTable"] = meta["parentTable"] + node["parentKey"] = meta.get("parentKey", "") + tables.append(node) return JSONResponse({"tables": tables}) +@router.get("/{instanceId}/feature-connections/{fiId}/parent-objects/{tableName}") +@limiter.limit("120/minute") +async def listParentObjects( + request: Request, + instanceId: str = Path(...), + fiId: str = Path(..., description="Feature instance ID"), + tableName: str = Path(..., description="Parent table name from DATA_OBJECTS"), + context: RequestContext = Depends(getRequestContext), +): + """List records from a parent table so the user can pick a specific record to scope data.""" + wsMandateId, _ = _validateInstanceAccess(instanceId, context) + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.security.rbacCatalog import getCatalogService + + rootIf = getRootInterface() + inst = rootIf.getFeatureInstance(fiId) + if not inst: + raise HTTPException(status_code=404, detail="Feature instance not found") + + featureCode = inst.featureCode + mandateId = str(inst.mandateId) if inst.mandateId else "" + if wsMandateId and mandateId and mandateId != wsMandateId: + raise HTTPException(status_code=403, detail="Feature instance does not belong to workspace mandate") + catalog = getCatalogService() + + parentObj = None + for obj in catalog.getDataObjects(featureCode): + meta = obj.get("meta", {}) + if meta.get("table") == tableName and meta.get("isParent"): + parentObj = obj + break + if not parentObj: + raise HTTPException(status_code=400, detail=f"Table '{tableName}' is not a registered parent table") + + displayFields = parentObj["meta"].get("displayFields", []) + selectCols = ', '.join(f'"{f}"' for f in (["id"] + displayFields)) if displayFields else "*" + + from modules.connectors.connectorDbPostgre import DatabaseConnector + from modules.shared.configuration import APP_CONFIG + featureDbName = f"poweron_{featureCode.lower()}" + featureDbConn = None + try: + featureDbConn = DatabaseConnector( + dbHost=APP_CONFIG.get("DB_HOST", "localhost"), + dbDatabase=featureDbName, + dbUser=APP_CONFIG.get("DB_USER"), + dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"), + dbPort=int(APP_CONFIG.get("DB_PORT", 5432)), + userId=str(context.user.id), + ) + conn = featureDbConn.connection + with conn.cursor() as cur: + cur.execute( + "SELECT column_name FROM information_schema.columns " + "WHERE table_schema = 'public' AND LOWER(table_name) = LOWER(%s) " + "AND column_name IN ('featureInstanceId', 'instanceId')", + [tableName], + ) + instanceCols = [row["column_name"] for row in cur.fetchall()] + instanceCol = "featureInstanceId" if "featureInstanceId" in instanceCols else "instanceId" + + cur.execute( + "SELECT column_name FROM information_schema.columns " + "WHERE table_schema = 'public' AND LOWER(table_name) = LOWER(%s) " + "AND column_name = 'userId'", + [tableName], + ) + hasUserId = cur.rowcount > 0 + + sql = ( + f'SELECT {selectCols} FROM "{tableName}" ' + f'WHERE "{instanceCol}" = %s' + ) + params = [fiId] + if mandateId: + sql += ' AND "mandateId" = %s' + params.append(mandateId) + if hasUserId: + sql += ' AND "userId" = %s' + params.append(str(context.user.id)) + sql += ' ORDER BY "id" DESC LIMIT 100' + cur.execute(sql, params) + rows = [] + for row in cur.fetchall(): + r = dict(row) + for k, v in r.items(): + if hasattr(v, "isoformat"): + r[k] = v.isoformat() + elif isinstance(v, (bytes, bytearray)): + r[k] = f"" + displayParts = [str(r.get(f, "")) for f in displayFields if r.get(f) is not None] + rows.append({ + "id": r.get("id", ""), + "displayLabel": " | ".join(displayParts) if displayParts else r.get("id", ""), + "fields": {f: r.get(f) for f in displayFields}, + }) + except Exception as e: + logger.error(f"listParentObjects({tableName}) failed: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=f"Failed to list parent objects: {e}") + finally: + if featureDbConn: + try: + featureDbConn.close() + except Exception: + pass + + return JSONResponse({"parentObjects": rows}) + + class CreateFeatureDataSourceRequest(BaseModel): """Request body for adding a feature table as data source.""" featureInstanceId: str = Field(description="Feature instance ID") @@ -1465,6 +1598,7 @@ class CreateFeatureDataSourceRequest(BaseModel): tableName: str = Field(description="Table name from DATA_OBJECTS") objectKey: str = Field(description="RBAC object key") label: str = Field(description="User-visible label") + recordFilter: Optional[dict] = Field(default=None, description="Record-level filter for scoping") @router.post("/{instanceId}/feature-datasources") @@ -1476,13 +1610,15 @@ async def createFeatureDataSource( context: RequestContext = Depends(getRequestContext), ): """Create a FeatureDataSource for this workspace instance.""" - _validateInstanceAccess(instanceId, context) + wsMandateId, _ = _validateInstanceAccess(instanceId, context) from modules.interfaces.interfaceDbApp import getRootInterface from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource rootIf = getRootInterface() inst = rootIf.getFeatureInstance(body.featureInstanceId) mandateId = str(inst.mandateId) if inst else (str(context.mandateId) if context.mandateId else "") + if wsMandateId and mandateId and mandateId != wsMandateId: + raise HTTPException(status_code=403, detail="Feature instance does not belong to workspace mandate") fds = FeatureDataSource( featureInstanceId=body.featureInstanceId, @@ -1493,6 +1629,7 @@ async def createFeatureDataSource( mandateId=mandateId, userId=str(context.user.id), workspaceInstanceId=instanceId, + recordFilter=body.recordFilter, ) created = rootIf.db.recordCreate(FeatureDataSource, fds.model_dump()) return JSONResponse(created if isinstance(created, dict) else fds.model_dump()) diff --git a/modules/serviceCenter/services/serviceAgent/featureDataAgent.py b/modules/serviceCenter/services/serviceAgent/featureDataAgent.py index e36745df..8ef0bfcc 100644 --- a/modules/serviceCenter/services/serviceAgent/featureDataAgent.py +++ b/modules/serviceCenter/services/serviceAgent/featureDataAgent.py @@ -38,6 +38,7 @@ async def runFeatureDataAgent( aiCallFn: Callable[[AiCallRequest], Awaitable[AiCallResponse]], dbConnector, instanceLabel: str = "", + tableFilters: Optional[Dict[str, Dict[str, str]]] = None, ) -> str: """Run the feature data sub-agent and return the textual result. @@ -51,13 +52,14 @@ async def runFeatureDataAgent( aiCallFn: AI call function (with billing). dbConnector: DatabaseConnector for queries. instanceLabel: Human-readable instance name for context. + tableFilters: Per-table record filters from FeatureDataSource.recordFilter. Returns: Plain-text answer produced by the sub-agent. """ provider = FeatureDataProvider(dbConnector) - registry = _buildSubAgentTools(provider, featureInstanceId, mandateId) + registry = _buildSubAgentTools(provider, featureInstanceId, mandateId, tableFilters or {}) for tbl in selectedTables: meta = tbl.get("meta", {}) @@ -103,9 +105,18 @@ def _buildSubAgentTools( provider: FeatureDataProvider, featureInstanceId: str, mandateId: str, + tableFilters: Dict[str, Dict[str, str]] = None, ) -> ToolRegistry: """Register browseTable and queryTable as sub-agent tools.""" registry = ToolRegistry() + _tableFilters = tableFilters or {} + + def _recordFilterToList(tableName: str) -> Optional[List[Dict[str, Any]]]: + """Convert a recordFilter dict to a list of {field, op, value} filter dicts.""" + rf = _tableFilters.get(tableName) + if not rf: + return None + return [{"field": k, "op": "=", "value": v} for k, v in rf.items()] async def _browseTable(args: Dict[str, Any], context: Dict[str, Any]): tableName = args.get("tableName", "") @@ -121,6 +132,7 @@ def _buildSubAgentTools( fields=fields, limit=min(limit, 200), offset=offset, + extraFilters=_recordFilterToList(tableName), ) return ToolResult( toolCallId="", toolName="browseTable", @@ -147,6 +159,7 @@ def _buildSubAgentTools( orderBy=orderBy, limit=min(limit, 200), offset=offset, + extraFilters=_recordFilterToList(tableName), ) return ToolResult( toolCallId="", toolName="queryTable", diff --git a/modules/serviceCenter/services/serviceAgent/featureDataProvider.py b/modules/serviceCenter/services/serviceAgent/featureDataProvider.py index 40bf0c6b..25a0ff95 100644 --- a/modules/serviceCenter/services/serviceAgent/featureDataProvider.py +++ b/modules/serviceCenter/services/serviceAgent/featureDataProvider.py @@ -69,28 +69,36 @@ class FeatureDataProvider: fields: List[str] = None, limit: int = 50, offset: int = 0, + extraFilters: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: """List rows from a feature table with pagination. Returns ``{"rows": [...], "total": N, "limit": L, "offset": O}``. """ _validateTableName(tableName) - scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId) + conn = self._db.connection + scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn) + extraWhere, extraParams = _buildFilterClauses(extraFilters) + + fullWhere = scopeFilter["where"] + allParams = list(scopeFilter["params"]) + if extraWhere: + fullWhere += " AND " + extraWhere + allParams.extend(extraParams) try: - conn = self._db.connection with conn.cursor() as cur: - countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {scopeFilter["where"]}' - cur.execute(countSql, scopeFilter["params"]) + countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {fullWhere}' + cur.execute(countSql, allParams) total = cur.fetchone()["count"] if cur.rowcount else 0 selectCols = ", ".join(f'"{f}"' for f in fields) if fields else "*" dataSql = ( f'SELECT {selectCols} FROM "{tableName}" ' - f'WHERE {scopeFilter["where"]} ' + f'WHERE {fullWhere} ' f'ORDER BY "id" LIMIT %s OFFSET %s' ) - cur.execute(dataSql, scopeFilter["params"] + [limit, offset]) + cur.execute(dataSql, allParams + [limit, offset]) rows = [_serializeRow(dict(r)) for r in cur.fetchall()] return {"rows": rows, "total": total, "limit": limit, "offset": offset} @@ -108,14 +116,19 @@ class FeatureDataProvider: orderBy: str = None, limit: int = 50, offset: int = 0, + extraFilters: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: """Query a feature table with optional filters. ``filters`` is a list of ``{"field": "x", "op": "=", "value": "y"}``. + ``extraFilters`` are mandatory record-level scoping filters injected by the pipeline. """ _validateTableName(tableName) - scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId) - extraWhere, extraParams = _buildFilterClauses(filters) + conn = self._db.connection + scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn) + + combinedFilters = list(filters or []) + list(extraFilters or []) + extraWhere, extraParams = _buildFilterClauses(combinedFilters if combinedFilters else None) fullWhere = scopeFilter["where"] allParams = list(scopeFilter["params"]) @@ -124,7 +137,6 @@ class FeatureDataProvider: allParams.extend(extraParams) try: - conn = self._db.connection with conn.cursor() as cur: countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {fullWhere}' cur.execute(countSql, allParams) @@ -149,6 +161,34 @@ class FeatureDataProvider: # helpers # ------------------------------------------------------------------ +_instanceColCache: Dict[str, str] = {} + + +def _resolveInstanceColumn(tableName: str, dbConnection=None) -> str: + """Detect whether the table uses ``instanceId`` or ``featureInstanceId``.""" + if tableName in _instanceColCache: + return _instanceColCache[tableName] + if dbConnection: + try: + with dbConnection.cursor() as cur: + cur.execute( + "SELECT column_name FROM information_schema.columns " + "WHERE table_schema = 'public' AND LOWER(table_name) = LOWER(%s) " + "AND column_name IN ('featureInstanceId', 'instanceId')", + [tableName], + ) + cols = [row["column_name"] for row in cur.fetchall()] + if "featureInstanceId" in cols: + _instanceColCache[tableName] = "featureInstanceId" + return "featureInstanceId" + if "instanceId" in cols: + _instanceColCache[tableName] = "instanceId" + return "instanceId" + except Exception: + pass + return "instanceId" + + def _validateTableName(tableName: str): if not tableName or not _isValidIdentifier(tableName): raise ValueError(f"Invalid table name: {tableName}") @@ -159,17 +199,19 @@ def _isValidIdentifier(name: str) -> bool: return name.isidentifier() -def _buildScopeFilter(tableName: str, featureInstanceId: str, mandateId: str) -> Dict[str, Any]: +def _buildScopeFilter(tableName: str, featureInstanceId: str, mandateId: str, dbConnection=None) -> Dict[str, Any]: """Build the mandatory WHERE clause that scopes rows to the feature instance. - Feature tables usually have either ``featureInstanceId`` or a combination - of ``mandateId`` + an org/context FK. We try ``featureInstanceId`` first, - then fall back to ``mandateId``. + Feature tables use either ``instanceId`` (commcoach, teamsbot) or + ``featureInstanceId`` (trustee) as the FK. We detect the actual column + from ``information_schema`` when a DB connection is provided. """ + instanceCol = _resolveInstanceColumn(tableName, dbConnection) + conditions = [] params = [] - conditions.append('"featureInstanceId" = %s') + conditions.append(f'"{instanceCol}" = %s') params.append(featureInstanceId) if mandateId: diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 9a702aa0..08950ea3 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -3192,11 +3192,16 @@ def _registerCoreTools(registry: ToolRegistry, services): from modules.security.rbacCatalog import getCatalogService catalog = getCatalogService() + tableFilters = {} if not featureDataSources: selectedTables = catalog.getDataObjects(featureCode) else: allObjs = {o["meta"]["table"]: o for o in catalog.getDataObjects(featureCode) if "meta" in o and "table" in o.get("meta", {})} selectedTables = [allObjs[ds["tableName"]] for ds in featureDataSources if ds.get("tableName") in allObjs] + for ds in featureDataSources: + rf = ds.get("recordFilter") + if rf and isinstance(rf, dict) and ds.get("tableName"): + tableFilters[ds["tableName"]] = rf if not selectedTables: return ToolResult( @@ -3239,6 +3244,7 @@ def _registerCoreTools(registry: ToolRegistry, services): aiCallFn=_subAgentAiCall, dbConnector=featureDbConn, instanceLabel=instanceLabel, + tableFilters=tableFilters, ) finally: try: From 0a5fa20cb8fdc8b318cbe073d4aac97a23e73970 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Wed, 1 Apr 2026 21:59:28 +0200 Subject: [PATCH 28/33] fixed voice feat commcoach --- modules/datamodels/datamodelAi.py | 1 + .../features/commcoach/datamodelCommcoach.py | 4 + .../commcoach/routeFeatureCommcoach.py | 15 +- .../features/commcoach/serviceCommcoach.py | 615 +++++++++++++++--- .../features/commcoach/serviceCommcoachAi.py | 63 +- .../commcoach/serviceCommcoachIndexer.py | 223 +++++++ .../commcoach/serviceCommcoachScheduler.py | 56 +- modules/interfaces/interfaceAiObjects.py | 18 +- modules/routes/routeVoiceGoogle.py | 4 +- .../services/serviceAgent/agentLoop.py | 15 +- .../services/serviceAgent/mainServiceAgent.py | 104 +-- .../services/serviceAgent/toolRegistry.py | 14 +- 12 files changed, 932 insertions(+), 200 deletions(-) create mode 100644 modules/features/commcoach/serviceCommcoachIndexer.py diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py index 96e05185..662eded2 100644 --- a/modules/datamodels/datamodelAi.py +++ b/modules/datamodels/datamodelAi.py @@ -172,6 +172,7 @@ class AiCallRequest(BaseModel): contentParts: Optional[List['ContentPart']] = None # Content parts for model-aware chunking messages: Optional[List[Dict[str, Any]]] = Field(default=None, description="OpenAI-style messages for multi-turn agent conversations") tools: Optional[List[Dict[str, Any]]] = Field(default=None, description="Tool definitions for native function calling") + toolChoice: Optional[Any] = Field(default=None, description="Tool choice: 'auto', 'none', or specific tool (passed through to model call)") requireNeutralization: Optional[bool] = Field(default=None, description="Per-request neutralization override: True=force, False=skip, None=use config") diff --git a/modules/features/commcoach/datamodelCommcoach.py b/modules/features/commcoach/datamodelCommcoach.py index 635ba19a..82be6044 100644 --- a/modules/features/commcoach/datamodelCommcoach.py +++ b/modules/features/commcoach/datamodelCommcoach.py @@ -228,6 +228,10 @@ class UpdateContextRequest(BaseModel): class SendMessageRequest(BaseModel): content: str = Field(description="User message text") contentType: Optional[CoachingMessageContentType] = CoachingMessageContentType.TEXT + fileIds: Optional[List[str]] = Field(default=None, description="Attached file IDs for agent context") + dataSourceIds: Optional[List[str]] = Field(default=None, description="Personal data source IDs") + featureDataSourceIds: Optional[List[str]] = Field(default=None, description="Feature data source IDs") + allowedProviders: Optional[List[str]] = Field(default=None, description="Allowed AI providers") class CreateTaskRequest(BaseModel): diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py index ccb4d342..8ffd3eca 100644 --- a/modules/features/commcoach/routeFeatureCommcoach.py +++ b/modules/features/commcoach/routeFeatureCommcoach.py @@ -334,9 +334,8 @@ async def startSession( try: from modules.interfaces.interfaceVoiceObjects import getVoiceInterface voiceInterface = getVoiceInterface(context.user, mandateId) - from .serviceCommcoach import _getUserVoicePrefs + from .serviceCommcoach import _getUserVoicePrefs, _stripMarkdownForTts, _buildTtsConfigErrorMessage language, voiceName = _getUserVoicePrefs(userId, mandateId) - from .serviceCommcoach import _stripMarkdownForTts ttsResult = await voiceInterface.textToSpeech( text=_stripMarkdownForTts(greetingText), languageCode=language, @@ -349,8 +348,12 @@ async def startSession( audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode() ).decode() yield f"data: {json.dumps({'type': 'ttsAudio', 'data': {'audio': audioB64, 'format': 'mp3'}})}\n\n" + else: + errorDetail = ttsResult.get("error", "Text-to-Speech failed") + yield f"data: {json.dumps({'type': 'error', 'data': {'message': _buildTtsConfigErrorMessage(language, voiceName, errorDetail), 'detail': errorDetail, 'ttsLanguage': language, 'ttsVoice': voiceName}})}\n\n" except Exception as e: logger.warning(f"TTS failed for resumed session: {e}") + yield f"data: {json.dumps({'type': 'error', 'data': {'message': 'Die konfigurierte Stimme für diese Sprache ist ungültig oder nicht verfügbar. Bitte passe sie unter Einstellungen > Stimme & Sprache an.', 'detail': str(e)}})}\n\n" yield f"data: {json.dumps({'type': 'complete', 'data': {}, 'timestamp': getIsoTimestamp()})}\n\n" return StreamingResponse( @@ -511,7 +514,13 @@ async def sendMessageStream( _activeProcessTasks.pop(sessionId, None) task = asyncio.create_task( - service.processMessage(sessionId, contextId, body.content, interface) + service.processMessage( + sessionId, contextId, body.content, interface, + fileIds=body.fileIds, + dataSourceIds=body.dataSourceIds, + featureDataSourceIds=body.featureDataSourceIds, + allowedProviders=body.allowedProviders, + ) ) task.add_done_callback(_onTaskDone) _activeProcessTasks[sessionId] = task diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py index 5e5aa810..332a4a01 100644 --- a/modules/features/commcoach/serviceCommcoach.py +++ b/modules/features/commcoach/serviceCommcoach.py @@ -6,6 +6,7 @@ Manages the coaching pipeline: message processing, AI calls, scoring, task extra """ import re +import html import logging import json import asyncio @@ -43,25 +44,117 @@ from .serviceCommcoachContextRetrieval import ( logger = logging.getLogger(__name__) +def _selectConfiguredVoice( + language: str, + voiceMap: Any, + legacyVoice: Optional[str] = None, + legacyLanguage: Optional[str] = None, +) -> Optional[str]: + """Resolve the configured TTS voice for a language from ttsVoiceMap, then legacy ttsVoice.""" + normalizedLanguage = str(language or "").strip() + normalizedLower = normalizedLanguage.lower() + baseLanguage = normalizedLower.split("-", 1)[0] if normalizedLower else "" + + if isinstance(voiceMap, dict) and voiceMap: + direct = voiceMap.get(normalizedLanguage) + if isinstance(direct, str) and direct.strip(): + return direct.strip() + + directBase = voiceMap.get(baseLanguage) + if isinstance(directBase, str) and directBase.strip(): + return directBase.strip() + + for mapKey, mapValue in voiceMap.items(): + if not isinstance(mapValue, str) or not mapValue.strip(): + continue + keyNorm = str(mapKey or "").strip().lower() + if keyNorm == normalizedLower or keyNorm == baseLanguage or (baseLanguage and keyNorm.startswith(baseLanguage + "-")): + return mapValue.strip() + + if legacyVoice and str(legacyVoice).strip(): + legacyLangNorm = str(legacyLanguage or "").strip().lower() + if not legacyLangNorm or legacyLangNorm == normalizedLower: + return str(legacyVoice).strip() + + return None + + +def _buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str: + if voiceName: + return ( + f'Die konfigurierte Stimme "{voiceName}" für {language} ist ungültig oder nicht verfügbar. ' + 'Bitte passe sie unter Einstellungen > Stimme & Sprache an.' + ) + return ( + f'Für die Sprache {language} ist keine gültige TTS-Stimme konfiguriert. ' + 'Bitte prüfe die Einstellungen unter Stimme & Sprache.' + ) + + def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple: """Load voice language and voiceName from central UserVoicePreferences. Returns (language, voiceName) tuple.""" try: from modules.datamodels.datamodelUam import UserVoicePreferences - from modules.security.rootAccess import getRootInterface + from modules.interfaces.interfaceDbApp import getRootInterface rootIf = getRootInterface() prefs = rootIf.db.getRecordset( UserVoicePreferences, - recordFilter={"userId": userId, "mandateId": mandateId} + recordFilter={"userId": userId} ) - if not prefs and mandateId: - prefs = rootIf.db.getRecordset( - UserVoicePreferences, - recordFilter={"userId": userId} - ) if prefs: - p = prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump() - return (p.get("ttsLanguage") or p.get("sttLanguage") or "de-DE", p.get("ttsVoice")) + allPrefs = [ + pref if isinstance(pref, dict) else pref.model_dump() + for pref in prefs + ] + scopedPref = next( + ( + pref for pref in allPrefs + if str(pref.get("mandateId") or "").strip() == str(mandateId or "").strip() + ), + None, + ) + globalPref = next( + ( + pref for pref in allPrefs + if not str(pref.get("mandateId") or "").strip() + ), + None, + ) + + language = ( + (globalPref or {}).get("ttsLanguage") + or (globalPref or {}).get("sttLanguage") + or (scopedPref or {}).get("ttsLanguage") + or (scopedPref or {}).get("sttLanguage") + or "de-DE" + ) + + scopedVoiceFromMap = _selectConfiguredVoice( + language=language, + voiceMap=(scopedPref or {}).get("ttsVoiceMap"), + ) + globalVoice = _selectConfiguredVoice( + language=language, + voiceMap=(globalPref or {}).get("ttsVoiceMap"), + legacyVoice=(globalPref or {}).get("ttsVoice"), + legacyLanguage=(globalPref or {}).get("ttsLanguage"), + ) + scopedLegacyVoice = _selectConfiguredVoice( + language=language, + voiceMap=None, + legacyVoice=(scopedPref or {}).get("ttsVoice"), + legacyLanguage=(scopedPref or {}).get("ttsLanguage"), + ) + anyPref = allPrefs[0] + fallbackVoice = _selectConfiguredVoice( + language=language, + voiceMap=(anyPref or {}).get("ttsVoiceMap"), + legacyVoice=(anyPref or {}).get("ttsVoice"), + legacyLanguage=(anyPref or {}).get("ttsLanguage"), + ) + voiceName = scopedVoiceFromMap or globalVoice or scopedLegacyVoice or fallbackVoice + return (language, voiceName) except Exception as e: logger.warning(f"Failed to load UserVoicePreferences for user={userId}: {e}") return ("de-DE", None) @@ -111,26 +204,91 @@ def cleanupSessionEvents(sessionId: str): CHUNK_WORD_SIZE = 4 CHUNK_DELAY_SECONDS = 0.05 -def _wrapEmailHtml(contentHtml: str) -> str: - """Wrap AI-generated HTML content in a styled email shell.""" - return f""" - - - -

-
-
-

Coaching-Session Zusammenfassung

-

PowerOn CommCoach

-
-
{contentHtml}
-
-

Diese Zusammenfassung wurde automatisch erstellt.

-
-
-
- -""" + +def _normalizeEmailBulletList(values: Any, maxItems: int = 4) -> List[str]: + items: List[str] = [] + if not isinstance(values, list): + return items + for value in values: + text = str(value or "").strip() + if text: + items.append(text) + if len(items) >= maxItems: + break + return items + + +def _buildSummaryEmailBlock( + emailData: Optional[Dict[str, Any]], + summary: str, + contextTitle: str, +) -> str: + """Render a stable, mail-client-friendly CommCoach summary block.""" + payload = emailData or {} + headline = str(payload.get("headline") or contextTitle or "Coaching-Session").strip() + intro = str(payload.get("intro") or "").strip() + coreTopic = str(payload.get("coreTopic") or "").strip() + insights = _normalizeEmailBulletList(payload.get("insights")) + nextSteps = _normalizeEmailBulletList(payload.get("nextSteps")) + progress = _normalizeEmailBulletList(payload.get("progress")) + + if not (intro or coreTopic or insights or nextSteps or progress): + escapedSummary = html.escape(summary or "").replace("\n", "
") + return ( + '
' + f'

{html.escape(headline)}

' + f'
{escapedSummary}
' + '
' + ) + + def _renderSection(title: str, bodyHtml: str) -> str: + if not bodyHtml: + return "" + return ( + '' + f'
{html.escape(title)}
' + f'
{bodyHtml}
' + '' + ) + + def _renderList(values: List[str]) -> str: + if not values: + return "" + rows = "".join( + '' + '•' + f'{html.escape(item)}' + '' + for item in values + ) + return f'{rows}
' + + introHtml = f'

{html.escape(intro)}

' if intro else "" + coreTopicHtml = f'

{html.escape(coreTopic)}

' if coreTopic else "" + + sectionsHtml = "".join([ + _renderSection("Kernbotschaft", introHtml), + _renderSection("Kernthema", coreTopicHtml), + _renderSection("Erkenntnisse", _renderList(insights)), + _renderSection("Nächste Schritte", _renderList(nextSteps)), + _renderSection("Fortschritt", _renderList(progress)), + ]) + + return ( + '' + '' + '
' + f'

{html.escape(headline)}

' + f'

Thema: {html.escape(contextTitle)}

' + '' + f'{sectionsHtml}' + '
' + '
' + ) DOC_INTENT_MAX_DOCS = 3 DOC_CONTENT_MAX_CHARS = 3000 @@ -160,7 +318,7 @@ def _stripPendingUserMessages(messages: List[Dict[str, Any]]) -> List[Dict[str, def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]: - """Parse the structured JSON response from AI. Strips optional markdown code fences.""" + """Parse optional structured AI output; otherwise treat free text as normal response.""" text = rawText.strip() if text.startswith("```"): lines = text.split("\n") @@ -169,10 +327,14 @@ def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]: lines = lines[:-1] text = "\n".join(lines) try: - return json.loads(text) + parsed = json.loads(text) + if isinstance(parsed, dict): + if parsed.get("text") and not parsed.get("speech"): + parsed["speech"] = parsed.get("text") + return parsed + return {"text": rawText.strip(), "speech": rawText.strip(), "documents": []} except json.JSONDecodeError: - logger.warning(f"AI JSON parse failed, using raw text: {text[:200]}") - return {"text": rawText.strip(), "speech": "", "documents": []} + return {"text": rawText.strip(), "speech": rawText.strip(), "documents": []} async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mandateId: str, @@ -197,8 +359,20 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode() ).decode() await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"}) + return + errorDetail = ttsResult.get("error", "Text-to-Speech failed") + await emitSessionEvent(sessionId, "error", { + "message": _buildTtsConfigErrorMessage(language, voiceName, errorDetail), + "detail": errorDetail, + "ttsLanguage": language, + "ttsVoice": voiceName, + }) except Exception as e: logger.warning(f"TTS failed for session {sessionId}: {e}") + await emitSessionEvent(sessionId, "error", { + "message": _buildTtsConfigErrorMessage("de-DE", None, str(e)), + "detail": str(e), + }) def _resolveFileNameAndMime(title: str) -> tuple: @@ -400,6 +574,151 @@ def _getDocumentSummaries(contextId: str, userId: str, interface, return None +def _createCommcoachRagFn( + userId: str, + featureInstanceId: str, + mandateId: str, + context: Dict[str, Any], + tasks: List[Dict[str, Any]], + currentUser=None, +): + """Create a CommCoach-specific RAG function combining KnowledgeService RAG with live coaching DB context.""" + + async def _buildRagContext( + currentPrompt: str, workflowId: str, userId: str, + featureInstanceId: str, mandateId: str, **kwargs + ) -> str: + parts = [] + + # 1. Standard KnowledgeService RAG (finds indexed session chunks + files) + try: + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + serviceContext = ServiceCenterContext( + user=currentUser, + mandate_id=mandateId, + feature_instance_id=featureInstanceId, + ) + knowledgeService = getService("knowledge", serviceContext) + ragContext = await knowledgeService.buildAgentContext( + currentPrompt=currentPrompt, + workflowId=workflowId, + userId=userId, + featureInstanceId=featureInstanceId, + mandateId=mandateId, + ) + if ragContext: + parts.append(ragContext) + except Exception as e: + logger.debug(f"CommCoach RAG knowledge context failed: {e}") + + # 2. Live coaching DB context (current goals, tasks, rolling overview) + liveContext = [] + goals = _parseJsonField(context.get("goals")) if context else None + if goals: + goalTexts = [g.get("text", g) if isinstance(g, dict) else str(g) for g in goals if g] + if goalTexts: + liveContext.append("Aktuelle Ziele:\n" + "\n".join(f"- {g}" for g in goalTexts)) + + openTasks = [t for t in (tasks or []) if t.get("status") in ("open", "inProgress")] + if openTasks: + taskLines = [f"- {t.get('title', '')}" for t in openTasks[:5]] + liveContext.append("Offene Aufgaben:\n" + "\n".join(taskLines)) + + rollingOverview = context.get("rollingOverview") if context else None + if rollingOverview: + liveContext.append(f"Gesamtüberblick bisheriger Sessions:\n{rollingOverview[:500]}") + + insights = _parseJsonField(context.get("insights")) if context else None + if insights: + insightTexts = [i.get("text", i) if isinstance(i, dict) else str(i) for i in insights[-5:] if i] + if insightTexts: + liveContext.append("Bisherige Erkenntnisse:\n" + "\n".join(f"- {t}" for t in insightTexts)) + + if liveContext: + parts.append("--- Coaching-Kontext (Live) ---\n" + "\n\n".join(liveContext)) + + return "\n\n".join(parts) if parts else "" + + return _buildRagContext + + +def _parseJsonField(value, fallback=None): + if not value: + return fallback + if isinstance(value, (list, dict)): + return value + try: + return json.loads(value) + except (json.JSONDecodeError, TypeError): + return fallback + + +_RESEARCH_KEYWORDS = re.compile( + r"\b(such|recherchier|schau nach|im web|finde heraus|google|online|nachschlagen|" + r"search|look up|find out|browse)\b", + re.IGNORECASE, +) + + +def _shouldActivateTools( + fileIds: Optional[List[str]], + dataSourceIds: Optional[List[str]], + featureDataSourceIds: Optional[List[str]], + userMessage: str, +) -> bool: + """Decide whether the agent should have tools activated for this turn.""" + if fileIds: + return True + if dataSourceIds: + return True + if featureDataSourceIds: + return True + if _RESEARCH_KEYWORDS.search(userMessage or ""): + return True + return False + + +def _buildConversationHistory(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Convert coaching messages to OpenAI-style conversation history for the agent.""" + history = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + if role in ("user", "assistant") and content: + history.append({"role": role, "content": content}) + return history + + +_TTS_WORD_LIMIT = 200 + + +async def _prepareSpeechText(fullText: str, callAiFn) -> str: + """Prepare text for TTS. Short responses used directly; long ones get summarized.""" + cleaned = _stripMarkdownForTts(fullText) + wordCount = len(cleaned.split()) + if wordCount <= _TTS_WORD_LIMIT: + return cleaned + try: + prompt = f"""Fasse den folgenden Text in 3-4 natürlichen, gesprochenen Sätzen zusammen. +Der Text soll vorgelesen werden – schreibe daher natürlich und flüssig, keine Aufzählungen. +Behalte die wichtigsten Punkte und den Ton bei. + +Text: +{cleaned[:3000]} + +Antworte NUR mit der gekürzten Sprachversion.""" + response = await callAiFn( + "Du kürzt Texte für Sprachausgabe. Antworte kurz und natürlich.", + prompt, + ) + if response and response.errorCount == 0 and response.content: + return response.content.strip() + except Exception as e: + logger.warning(f"Speech summary generation failed: {e}") + return cleaned[:1500] + + class CommcoachService: """Coaching orchestrator: processes messages, calls AI, extracts tasks and scores.""" @@ -409,14 +728,20 @@ class CommcoachService: self.instanceId = instanceId self.userId = str(currentUser.id) - async def processMessage(self, sessionId: str, contextId: str, userContent: str, interface) -> Dict[str, Any]: + async def processMessage( + self, sessionId: str, contextId: str, userContent: str, interface, + fileIds: Optional[List[str]] = None, + dataSourceIds: Optional[List[str]] = None, + featureDataSourceIds: Optional[List[str]] = None, + allowedProviders: Optional[List[str]] = None, + ) -> Dict[str, Any]: """ - Process a user message through the coaching pipeline: + Process a user message through the agent-based coaching pipeline: 1. Store user message - 2. Build context with history - 3. Call AI for coaching response - 4. Store assistant message - 5. Emit SSE events + 2. Build coaching system prompt + session history + 3. Run AgentService with CommCoach RAG and optional tools + 4. Map agent events to CommCoach SSE events + 5. Post-processing: store message, TTS, tasks, scores """ from . import interfaceFeatureCommcoach as interfaceDb @@ -474,88 +799,62 @@ class CommcoachService: logger.warning(f"History compression failed for session {sessionId}: {e}") previousMessages = messages[-20:] - # Combine all pending user messages (after last assistant message) as the user prompt combinedUserPrompt = _buildCombinedUserPrompt(previousMessages) if not combinedUserPrompt: combinedUserPrompt = userContent - # Strip pending user messages from previousMessages to avoid redundancy in system prompt contextMessages = _stripPendingUserMessages(previousMessages) - tasks = interface.getTasks(contextId, self.userId) await emitSessionEvent(sessionId, "status", {"label": "Kontext wird geladen..."}) - retrievalResult = await self._buildRetrievalContext( - contextId, sessionId, combinedUserPrompt, context, interface - ) - persona = _resolvePersona(session, interface) - documentSummaries = _getDocumentSummaries( - contextId, self.userId, interface, mandateId=self.mandateId, instanceId=self.instanceId - ) - - # Document intent detection (pre-AI-call) - referencedDocumentContents = None - allDocs = _getPlatformFileList(self.mandateId, self.instanceId) if documentSummaries else [] - if allDocs: - await emitSessionEvent(sessionId, "status", {"label": "Dokumente werden geprueft..."}) - docIntent = await _resolveDocumentIntent(combinedUserPrompt, allDocs, self._callAi) - if not docIntent.get("noDocumentAction"): - docIdsToLoad = list(set((docIntent.get("read") or []) + (docIntent.get("update") or []))) - if docIdsToLoad: - referencedDocumentContents = _loadDocumentContents( - docIdsToLoad, interface, mandateId=self.mandateId, instanceId=self.instanceId - ) systemPrompt = aiPrompts.buildCoachingSystemPrompt( context, contextMessages, tasks, - previousSessionSummaries=retrievalResult.get("previousSessionSummaries"), earlierSummary=earlierSummary, - rollingOverview=retrievalResult.get("rollingOverview"), - retrievedSession=retrievalResult.get("retrievedSession"), - retrievedByTopic=retrievalResult.get("retrievedByTopic"), persona=persona, - documentSummaries=documentSummaries, - referencedDocumentContents=referencedDocumentContents, ) - if retrievalResult.get("intent") == RetrievalIntent.SUMMARIZE_ALL: - systemPrompt += "\n\nWICHTIG: Der Benutzer möchte eine Gesamtzusammenfassung. Erstelle eine umfassende Zusammenfassung aller genannten Sessions und der aktuellen Session." + # Build conversation history for the agent + conversationHistory = _buildConversationHistory(contextMessages) + + # Dynamic tool activation + useTools = _shouldActivateTools(fileIds, dataSourceIds, featureDataSourceIds, combinedUserPrompt) - # Call AI await emitSessionEvent(sessionId, "status", {"label": "Coach formuliert Antwort..."}) try: - aiResponse = await self._callAi(systemPrompt, combinedUserPrompt) + agentResponse = await self._runAgent( + sessionId=sessionId, + prompt=combinedUserPrompt, + systemPrompt=systemPrompt, + conversationHistory=conversationHistory, + context=context, + tasks=tasks, + fileIds=fileIds, + useTools=useTools, + allowedProviders=allowedProviders, + ) except asyncio.CancelledError: logger.info(f"processMessage cancelled for session {sessionId} (new message arrived)") return createdUserMsg except Exception as e: - logger.error(f"AI call failed for session {sessionId}: {e}") + logger.error(f"Agent call failed for session {sessionId}: {e}") await emitSessionEvent(sessionId, "error", {"message": f"AI error: {str(e)}"}) return createdUserMsg - responseRaw = aiResponse.content.strip() if aiResponse and aiResponse.errorCount == 0 else "" + textContent = agentResponse or "" - if not responseRaw: - parsed = {"text": "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut.", "speech": "", "documents": []} - else: - parsed = _parseAiJsonResponse(responseRaw) - - textContent = parsed.get("text", "") - speechContent = parsed.get("speech", "") - documents = parsed.get("documents", []) + if not textContent: + textContent = "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut." if asyncio.current_task() and asyncio.current_task().cancelled(): logger.info(f"processMessage cancelled before storing response for session {sessionId}") return createdUserMsg - for doc in documents: - await _saveOrUpdateDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser) - assistantMsg = CoachingMessage( sessionId=sessionId, contextId=contextId, @@ -571,8 +870,11 @@ class CommcoachService: await emitSessionEvent(sessionId, "status", {"label": "Antwort wird verarbeitet..."}) + # TTS: use free-text directly; for long responses, generate speech summary + speechText = await _prepareSpeechText(textContent, self._callAi) + ttsTask = asyncio.create_task( - _generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface) + _generateAndEmitTts(sessionId, speechText, self.currentUser, self.mandateId, self.instanceId, interface) ) await _emitChunkedResponse(sessionId, createdAssistantMsg, textContent) await ttsTask @@ -580,6 +882,75 @@ class CommcoachService: await emitSessionEvent(sessionId, "complete", {}) return createdAssistantMsg + async def _runAgent( + self, + sessionId: str, + prompt: str, + systemPrompt: str, + conversationHistory: List[Dict[str, Any]], + context: Dict[str, Any], + tasks: List[Dict[str, Any]], + fileIds: Optional[List[str]] = None, + useTools: bool = False, + allowedProviders: Optional[List[str]] = None, + ) -> str: + """Run the AgentService for a coaching message. Returns the final text response.""" + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentConfig, AgentEventTypeEnum + + serviceContext = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + ) + agentService = getService("agent", serviceContext) + + config = AgentConfig( + toolSet="commcoach" if useTools else "none", + maxRounds=3 if useTools else 1, + temperature=0.4, + ) + + buildRagContextFn = _createCommcoachRagFn( + userId=self.userId, + featureInstanceId=self.instanceId, + mandateId=self.mandateId, + context=context, + tasks=tasks, + currentUser=self.currentUser, + ) + + finalText = "" + async for event in agentService.runAgent( + prompt=prompt, + fileIds=fileIds, + config=config, + toolSet=config.toolSet, + workflowId=f"commcoach:{sessionId}", + conversationHistory=conversationHistory, + buildRagContextFn=buildRagContextFn, + systemPromptOverride=systemPrompt, + ): + if event.type == AgentEventTypeEnum.CHUNK: + chunk = event.content or "" + finalText += chunk + elif event.type == AgentEventTypeEnum.MESSAGE: + finalText += event.content or "" + elif event.type == AgentEventTypeEnum.FINAL: + if not finalText: + finalText = event.content or "" + elif event.type == AgentEventTypeEnum.TOOL_CALL: + await emitSessionEvent(sessionId, "toolCall", event.data or {}) + elif event.type == AgentEventTypeEnum.TOOL_RESULT: + await emitSessionEvent(sessionId, "toolResult", event.data or {}) + elif event.type == AgentEventTypeEnum.AGENT_PROGRESS: + await emitSessionEvent(sessionId, "agentProgress", event.data or {}) + elif event.type == AgentEventTypeEnum.ERROR: + await emitSessionEvent(sessionId, "error", {"message": event.content or "Agent error"}) + + return finalText.strip() + async def processSessionOpening(self, sessionId: str, contextId: str, interface) -> Dict[str, Any]: """ Generate and stream the opening greeting for a new session. @@ -742,9 +1113,9 @@ class CommcoachService: }) return session - # Generate summary (AI returns JSON with summary + emailHtml) + # Generate summary (AI returns JSON with summary + structured email payload) summary = None - emailHtml = None + emailData = None try: summaryPrompt = aiPrompts.buildSummaryPrompt(messages, context.get("title", "Coaching")) summaryResponse = await self._callAi("Du bist ein präziser Zusammenfasser. Antworte NUR als JSON.", summaryPrompt) @@ -752,7 +1123,10 @@ class CommcoachService: parsed = aiPrompts.parseJsonResponse(summaryResponse.content.strip(), None) if isinstance(parsed, dict): summary = parsed.get("summary") or parsed.get("text") - emailHtml = parsed.get("emailHtml") + if isinstance(parsed.get("email"), dict): + emailData = parsed.get("email") + elif isinstance(parsed.get("emailData"), dict): + emailData = parsed.get("emailData") else: summary = summaryResponse.content.strip() except Exception as e: @@ -843,6 +1217,40 @@ class CommcoachService: except Exception as e: logger.warning(f"Insight generation failed: {e}") + # Index session data for RAG-based long-term memory + try: + from .serviceCommcoachIndexer import indexSessionData + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + + serviceContext = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + ) + knowledgeService = getService("knowledge", serviceContext) + parsedGoals = aiPrompts._parseJsonField(context.get("goals") if context else None, []) + parsedInsights = aiPrompts._parseJsonField(context.get("insights") if context else None, []) + allTasks = interface.getTasks(contextId, self.userId) + + await indexSessionData( + sessionId=sessionId, + contextId=contextId, + userId=self.userId, + featureInstanceId=self.instanceId, + mandateId=self.mandateId, + messages=messages, + summary=summary, + keyTopics=keyTopics, + goals=parsedGoals, + insights=parsedInsights, + tasks=allTasks, + contextTitle=context.get("title", "Coaching") if context else "Coaching", + knowledgeService=knowledgeService, + ) + except Exception as e: + logger.warning(f"Coaching session indexing failed (non-blocking): {e}") + # Calculate duration startedAt = session.get("startedAt", "") durationSeconds = 0 @@ -898,7 +1306,7 @@ class CommcoachService: # Send email summary if summary: contextTitle = context.get("title", "Coaching") if context else "Coaching" - await self._sendSessionEmail(session, summary, emailHtml, contextTitle, interface) + await self._sendSessionEmail(session, summary, emailData, contextTitle, interface) await emitSessionEvent(sessionId, "sessionState", { "status": "completed", @@ -949,8 +1357,15 @@ class CommcoachService: except Exception as e: logger.warning(f"Failed to update streak: {e}") - async def _sendSessionEmail(self, session: Dict[str, Any], summary: str, emailHtml: str, contextTitle: str, interface): - """Send session summary via email if enabled. Uses AI-generated HTML directly.""" + async def _sendSessionEmail( + self, + session: Dict[str, Any], + summary: str, + emailData: Optional[Dict[str, Any]], + contextTitle: str, + interface, + ): + """Send session summary via email with the standard PowerOn layout.""" try: profile = interface.getProfile(self.userId, self.instanceId) if profile and not profile.get("emailSummaryEnabled", True): @@ -958,6 +1373,7 @@ class CommcoachService: from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface from modules.interfaces.interfaceDbApp import getRootInterface + from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName rootInterface = getRootInterface() user = rootInterface.getUser(self.userId) @@ -966,9 +1382,18 @@ class CommcoachService: messaging = getMessagingInterface() subject = f"Coaching-Session Zusammenfassung: {contextTitle}" - - contentHtml = emailHtml if emailHtml else f"

{summary}

" - htmlMessage = _wrapEmailHtml(contentHtml) + mandateName = _resolveMandateName(self.mandateId) + contentHtml = _buildSummaryEmailBlock(emailData, summary, contextTitle) + htmlMessage = _renderHtmlEmail( + "Coaching-Session Zusammenfassung", + [ + f'Thema: {contextTitle}', + "Hier ist die kompakte Zusammenfassung deiner abgeschlossenen Session.", + ], + mandateName, + footerNote="Diese Zusammenfassung wurde automatisch aus deiner Coaching-Session erstellt.", + rawHtmlBlock=contentHtml, + ) messaging.send("email", user.email, subject, htmlMessage) interface.updateSession(session.get("id"), {"emailSent": True}) diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py index 97deb373..8b916005 100644 --- a/modules/features/commcoach/serviceCommcoachAi.py +++ b/modules/features/commcoach/serviceCommcoachAi.py @@ -168,29 +168,18 @@ Handlungsprinzip: - Wenn der Benutzer dich bittet, etwas zu erstellen (Dokument, Präsentation, Checkliste, Plan), dann TU ES SOFORT. Frage NICHT nochmals nach Bestätigung. - Verwende alle verfügbaren Informationen aus dem Chat-Verlauf, den Dokumenten und dem Kontext. - Wenn der Benutzer sagt "erstelle", "mach", "schreib", dann liefere das fertige Ergebnis — keine Aufzählung von Punkten, die du "gleich umsetzen wirst". +- Dir wird automatisch relevanter Kontext aus früheren Sessions bereitgestellt (Relevant Knowledge). Nutze diesen für Kontinuität und Bezugnahme auf frühere Gespräche. Antwortformat: -Du antwortest IMMER als reines JSON-Objekt mit exakt diesen Feldern: -{"text": "...", "speech": "...", "documents": []} +- Antworte direkt als Freitext (KEIN JSON). Markdown-Formatierung ist erlaubt. +- Halte Antworten gesprächig und kurz (2-6 Sätze im Normalfall), wie in einem echten Coaching-Gespräch. +- Bei komplexen Themen oder wenn der Benutzer Details anfragt, darf die Antwort ausführlicher sein. +- Dein Text wird sowohl angezeigt als auch vorgelesen – schreibe daher natürlich und gut sprechbar. -"text": Dein schriftlicher Chat-Text. Details, Struktur, Übungen, Beispiele. Markdown-Formatierung erlaubt. -"speech": Dein gesprochener Kommentar. Natürlich, wie ein Gespräch. Fasse zusammen, kommentiere, motiviere, stelle Fragen. Lies NICHT den Text vor, ergänze ihn mündlich. 2-4 Sätze, reiner Redetext ohne Formatierung. -"documents": Dokumente die der Benutzer aufbewahren kann. Erstelle ein Dokument wenn: der Benutzer explizit darum bittet, du strukturierte Inhalte lieferst, oder Material zum Aufbewahren sinnvoll ist. Wenn keine: leeres Array []. - -Dokument-Format: -{"title": "Dateiname_mit_Extension.html", "content": "...vollstaendiger Inhalt..."} -- Der Title IST der Dateiname inkl. Extension (.html, .md, .txt etc.) -- Fuer HTML-Dokumente: Erstelle VOLLSTAENDIGES, professionell gestyltes HTML mit inline CSS. Kein Markdown, sondern fertiges HTML mit Farben, Layout, Typografie. -- Fuer andere Dokumente: Verwende Markdown. -- WICHTIG: Der Content muss VOLLSTAENDIG und AUSFUEHRLICH sein. Keine Platzhalter, keine "hier kommt..."-Abschnitte. Schreibe echte, detaillierte Inhalte basierend auf allen verfuegbaren Informationen aus dem Chat und den Dokumenten. -- Laengenbeschraenkung fuer Dokumente: KEINE. Schreibe so viel wie noetig fuer ein vollstaendiges Ergebnis. - -Kanalverteilung: -- Fakten, Listen, Übungen -> text -- Empathie, Einordnung, Nachfragen -> speech -- Erstellte Dateien, Materialien zum Aufbewahren -> documents - -WICHTIG: Antworte NUR mit dem JSON-Objekt. Kein Text vor oder nach dem JSON.""" +Tool-Nutzung: +- Du hast Zugriff auf Tools (Dateien lesen, Web-Suche, Datenquellen abfragen) wenn der Benutzer Dateien/Quellen angehängt hat oder Recherche benötigt. +- Nutze Tools NUR wenn nötig. Für normales Coaching-Gespräch: antworte direkt ohne Tools. +- Wenn du ein Tool nutzt, erkläre kurz was du tust.""" if contextDescription: prompt += f"\n\nKontext-Beschreibung: {contextDescription}" @@ -279,7 +268,7 @@ Fuer ein NEUES Dokument: {"title": "...", "content": "...Inhalt..."}""" def buildSummaryPrompt(messages: List[Dict[str, Any]], contextTitle: str) -> str: - """Build a prompt to generate a session summary as JSON with plain text and styled HTML email.""" + """Build a prompt to generate a session summary plus structured email content.""" conversation = "" for msg in messages: role = "Benutzer" if msg.get("role") == "user" else "Coach" @@ -287,27 +276,33 @@ def buildSummaryPrompt(messages: List[Dict[str, Any]], contextTitle: str) -> str return f"""Erstelle eine Zusammenfassung dieser Coaching-Session zum Thema "{contextTitle}". -Antworte AUSSCHLIESSLICH als JSON mit zwei Feldern: +Antworte AUSSCHLIESSLICH als JSON im folgenden Format: {{ - "summary": "Kompakte Zusammenfassung als Plaintext (fuer Anzeige in der App). Struktur: 1. Kernthema, 2. Erkenntnisse, 3. Naechste Schritte, 4. Fortschritt.", - "emailHtml": "
...
" + "summary": "Kompakte Plaintext-Zusammenfassung fuer die App. Struktur: Kernthema, Erkenntnisse, Naechste Schritte, Fortschritt.", + "email": {{ + "headline": "Kurze, professionelle Titelzeile fuer die E-Mail", + "intro": "1-2 Saetze, die den Kern der Session auf den Punkt bringen", + "coreTopic": "Das zentrale Thema in einem praezisen Satz", + "insights": ["Erkenntnis 1", "Erkenntnis 2"], + "nextSteps": ["Naechster Schritt 1", "Naechster Schritt 2"], + "progress": ["Fortschritt 1", "Fortschritt 2"] + }} }} -Fuer "emailHtml": Erstelle ein professionell formatiertes HTML-Fragment (KEIN vollstaendiges HTML-Dokument, nur der Inhalt-Block). -Verwende inline CSS fuer schoene Darstellung in E-Mail-Clients: -- Verwende

fuer Abschnitte (color: #1e40af; margin: 20px 0 8px; font-size: 16px) -- Verwende
    /
  • fuer Stichpunkte (margin: 4px 0; line-height: 1.6) -- Verwende fuer Hervorhebungen -- Verwende

    fuer Fliesstext (color: #374151; line-height: 1.65; font-size: 15px) -- Verwende


    als Trenner - -Fuer "summary": Kompakter Plaintext ohne HTML/Markdown. Abschnitte mit Zeilenumbruechen trennen. +Regeln: +- KEIN HTML erzeugen. +- "summary" ist reiner Plaintext ohne Markdown. +- "headline" kurz und professionell. +- "intro" in natuerlichem Business-Deutsch. +- "insights", "nextSteps" und "progress" jeweils als kurze Stichpunkte. +- Maximal 4 Eintraege pro Liste. +- Wenn eine Liste leer ist, gib [] zurueck. Gespräch: {conversation} -Antworte auf Deutsch, sachlich und kompakt. NUR JSON, keine Erklaerungen.""" +Antworte auf Deutsch, sachlich, klar und kompakt. NUR JSON, keine Erklaerungen.""" def buildScoringPrompt(messages: List[Dict[str, Any]], contextCategory: str) -> str: diff --git a/modules/features/commcoach/serviceCommcoachIndexer.py b/modules/features/commcoach/serviceCommcoachIndexer.py new file mode 100644 index 00000000..b43764a1 --- /dev/null +++ b/modules/features/commcoach/serviceCommcoachIndexer.py @@ -0,0 +1,223 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +CommCoach Session Indexer. +Indexes coaching session data into the knowledge store (pgvector) for RAG-based long-term memory. +Called after session completion to ensure semantic searchability across 20+ sessions. +""" + +import logging +import uuid +import json +from typing import List, Dict, Any, Optional + +logger = logging.getLogger(__name__) + +_COACHING_FILE_PREFIX = "coaching-session:" + + +async def indexSessionData( + sessionId: str, + contextId: str, + userId: str, + featureInstanceId: str, + mandateId: str, + messages: List[Dict[str, Any]], + summary: Optional[str], + keyTopics: Optional[str], + goals: Optional[List[Any]], + insights: Optional[List[Any]], + tasks: Optional[List[Dict[str, Any]]], + contextTitle: str = "", + knowledgeService=None, +): + """Index a completed coaching session into the knowledge store. + + Creates ContentChunks with embeddings for: + - Each User+Assistant message pair (maximum detail depth) + - Session summary + - Key topics (individually, for precise retrieval) + - Current goals + - New insights + - Tasks (open + done) + """ + if not knowledgeService: + logger.warning("No knowledge service available for coaching indexer") + return + + syntheticFileId = f"{_COACHING_FILE_PREFIX}{sessionId}" + + chunks = [] + + # 1. Message pairs (User + Assistant) as individual chunks + messagePairs = _extractMessagePairs(messages) + for idx, pair in enumerate(messagePairs): + chunks.append({ + "contentObjectId": f"{sessionId}:msg-pair:{idx}", + "data": pair["text"], + "contextRef": { + "containerPath": f"session:{sessionId}", + "location": f"message-pair-{idx}", + "type": "coaching-message-pair", + "contextId": contextId, + "sessionId": sessionId, + "contextTitle": contextTitle, + }, + }) + + # 2. Session summary + if summary: + chunks.append({ + "contentObjectId": f"{sessionId}:summary", + "data": f"Session-Zusammenfassung ({contextTitle}): {summary}", + "contextRef": { + "containerPath": f"session:{sessionId}", + "location": "summary", + "type": "coaching-session-summary", + "contextId": contextId, + "sessionId": sessionId, + "contextTitle": contextTitle, + }, + }) + + # 3. Key topics (each as separate chunk for precise retrieval) + parsedTopics = _parseJsonSafe(keyTopics, []) + for tidx, topic in enumerate(parsedTopics): + topicStr = str(topic).strip() + if topicStr: + chunks.append({ + "contentObjectId": f"{sessionId}:topic:{tidx}", + "data": f"Coaching-Thema ({contextTitle}): {topicStr}", + "contextRef": { + "containerPath": f"session:{sessionId}", + "location": f"topic-{tidx}", + "type": "coaching-key-topic", + "contextId": contextId, + "sessionId": sessionId, + "contextTitle": contextTitle, + }, + }) + + # 4. Goals + if goals: + goalTexts = [g.get("text", g) if isinstance(g, dict) else str(g) for g in goals if g] + if goalTexts: + goalsStr = "\n".join(f"- {g}" for g in goalTexts) + chunks.append({ + "contentObjectId": f"{sessionId}:goals", + "data": f"Coaching-Ziele ({contextTitle}):\n{goalsStr}", + "contextRef": { + "containerPath": f"session:{sessionId}", + "location": "goals", + "type": "coaching-goals", + "contextId": contextId, + "sessionId": sessionId, + "contextTitle": contextTitle, + }, + }) + + # 5. Insights + if insights: + insightTexts = [i.get("text", i) if isinstance(i, dict) else str(i) for i in insights if i] + if insightTexts: + insightsStr = "\n".join(f"- {t}" for t in insightTexts) + chunks.append({ + "contentObjectId": f"{sessionId}:insights", + "data": f"Coaching-Erkenntnisse ({contextTitle}):\n{insightsStr}", + "contextRef": { + "containerPath": f"session:{sessionId}", + "location": "insights", + "type": "coaching-insights", + "contextId": contextId, + "sessionId": sessionId, + "contextTitle": contextTitle, + }, + }) + + # 6. Tasks + if tasks: + taskLines = [] + for t in tasks: + status = t.get("status", "open") + title = t.get("title", "") + if title: + taskLines.append(f"- [{status}] {title}") + if taskLines: + tasksStr = "\n".join(taskLines) + chunks.append({ + "contentObjectId": f"{sessionId}:tasks", + "data": f"Coaching-Aufgaben ({contextTitle}):\n{tasksStr}", + "contextRef": { + "containerPath": f"session:{sessionId}", + "location": "tasks", + "type": "coaching-tasks", + "contextId": contextId, + "sessionId": sessionId, + "contextTitle": contextTitle, + }, + }) + + if not chunks: + logger.info(f"No chunks to index for session {sessionId}") + return + + logger.info(f"Indexing {len(chunks)} chunks for coaching session {sessionId}") + + try: + contentObjects = [ + { + "contentObjectId": c["contentObjectId"], + "contentType": "text", + "data": c["data"], + "contextRef": c["contextRef"], + } + for c in chunks + ] + + await knowledgeService.indexFile( + fileId=syntheticFileId, + fileName=f"coaching-session-{sessionId[:8]}", + mimeType="application/x-coaching-session", + userId=userId, + featureInstanceId=featureInstanceId, + mandateId=mandateId, + contentObjects=contentObjects, + ) + logger.info(f"Successfully indexed coaching session {sessionId} ({len(chunks)} chunks)") + except Exception as e: + logger.error(f"Failed to index coaching session {sessionId}: {e}", exc_info=True) + + +def _extractMessagePairs(messages: List[Dict[str, Any]]) -> List[Dict[str, str]]: + """Extract User+Assistant pairs from message list.""" + pairs = [] + i = 0 + while i < len(messages): + msg = messages[i] + if msg.get("role") == "user": + userText = (msg.get("content") or "").strip() + assistantText = "" + if i + 1 < len(messages) and messages[i + 1].get("role") == "assistant": + assistantText = (messages[i + 1].get("content") or "").strip() + i += 2 + else: + i += 1 + if userText: + text = f"Benutzer: {userText}" + if assistantText: + text += f"\nCoach: {assistantText}" + pairs.append({"text": text}) + else: + i += 1 + return pairs + + +def _parseJsonSafe(value, fallback): + if not value: + return fallback + if isinstance(value, (list, dict)): + return value + try: + return json.loads(value) + except (json.JSONDecodeError, TypeError): + return fallback diff --git a/modules/features/commcoach/serviceCommcoachScheduler.py b/modules/features/commcoach/serviceCommcoachScheduler.py index 3db548cf..dcbc1e86 100644 --- a/modules/features/commcoach/serviceCommcoachScheduler.py +++ b/modules/features/commcoach/serviceCommcoachScheduler.py @@ -6,11 +6,44 @@ Handles daily reminders and scheduled email summaries. """ import logging +import html from typing import Dict, Any, List logger = logging.getLogger(__name__) +def _buildReminderHtmlBlock(contextTitles: List[str], streakDays: int) -> str: + rows = "".join( + '' + '•' + f'{html.escape(title)}' + '' + for title in contextTitles[:3] + ) + topicsBlock = ( + '' + '
    ' + '
    Aktive Coaching-Themen
    ' + f'{rows}
    ' + '
    ' + ) + streakBlock = ( + '' + '
    ' + '
    Dein Rhythmus
    ' + f'
    Aktueller Streak: ' + f'{int(streakDays or 0)} Tage
    ' + '
    ' + ) + return topicsBlock + streakBlock + + def registerScheduledJobs(eventManagement): """Register CommCoach scheduled jobs with the event management system.""" try: @@ -31,6 +64,7 @@ async def _runDailyReminders(): from modules.connectors.connectorDbPostgre import DatabaseConnector from .datamodelCommcoach import CoachingUserProfile, CoachingContextStatus from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface + from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data") db = DatabaseConnector( @@ -71,15 +105,21 @@ async def _runDailyReminders(): contextTitles = [c.get("title", "Unbenannt") for c in contexts[:3]] contextList = ", ".join(contextTitles) - subject = "Dein taegliches Coaching wartet" - message = f""" -

    Zeit fuer dein Coaching

    -

    Du hast aktive Coaching-Themen: {contextList}

    -

    Nimm dir 10 Minuten fuer eine kurze Session. Konsistenz ist der Schluessel zu Fortschritt.

    -

    Dein aktueller Streak: {profile.get('streakDays', 0)} Tage

    - """ + subject = "Dein tägliches Coaching wartet" + mandateName = _resolveMandateName(profile.get("mandateId")) + htmlMessage = _renderHtmlEmail( + "Zeit für dein tägliches Coaching", + [ + f"Du hast aktuell {len(contexts)} aktive Coaching-Themen.", + "Schon 10 Minuten reichen oft, um einen Gedanken zu klären, eine nächste Aktion festzulegen oder ein Gespräch vorzubereiten.", + f"Im Fokus: {contextList}", + ], + mandateName, + footerNote="Diese Erinnerung wurde automatisch auf Basis deiner CommCoach-Einstellungen versendet.", + rawHtmlBlock=_buildReminderHtmlBlock(contextTitles, int(profile.get("streakDays", 0) or 0)), + ) - messaging.send("email", user.email, subject, message) + messaging.send("email", user.email, subject, htmlMessage) sentCount += 1 except Exception as e: logger.warning(f"Failed to send reminder to user {profile.get('userId')}: {e}") diff --git a/modules/interfaces/interfaceAiObjects.py b/modules/interfaces/interfaceAiObjects.py index f0aedc87..a859ffa7 100644 --- a/modules/interfaces/interfaceAiObjects.py +++ b/modules/interfaces/interfaceAiObjects.py @@ -134,7 +134,7 @@ class AiObjects: logger.info(f"Attempting AI call with model: {model.name} (attempt {attempt + 1}/{len(failoverModelList)})") if request.messages: - response = await self._callWithMessages(model, request.messages, options, request.tools) + response = await self._callWithMessages(model, request.messages, options, request.tools, toolChoice=request.toolChoice) else: response = await self._callWithModel(model, prompt, context, options) @@ -149,7 +149,7 @@ class AiObjects: await asyncio.sleep(retryAfter + 0.5) try: if request.messages: - response = await self._callWithMessages(model, request.messages, options, request.tools) + response = await self._callWithMessages(model, request.messages, options, request.tools, toolChoice=request.toolChoice) else: response = await self._callWithModel(model, prompt, context, options) logger.info(f"AI call successful with {model.name} after rate-limit retry") @@ -288,7 +288,8 @@ class AiObjects: async def _callWithMessages(self, model: AiModel, messages: List[Dict[str, Any]], options: AiCallOptions = None, - tools: List[Dict[str, Any]] = None) -> AiCallResponse: + tools: List[Dict[str, Any]] = None, + toolChoice: Any = None) -> AiCallResponse: """Call a model with pre-built messages (agent mode). Supports tools for native function calling.""" import json as _json @@ -302,7 +303,8 @@ class AiObjects: messages=messages, model=model, options=options or {}, - tools=tools + tools=tools, + toolChoice=toolChoice, ) modelResponse = await model.functionCall(modelCall) @@ -379,7 +381,7 @@ class AiObjects: for attempt, model in enumerate(failoverModelList): try: logger.info(f"Streaming AI call with model: {model.name} (attempt {attempt + 1})") - async for chunk in self._callWithMessagesStream(model, request.messages, options, request.tools): + async for chunk in self._callWithMessagesStream(model, request.messages, options, request.tools, toolChoice=request.toolChoice): yield chunk return @@ -390,7 +392,7 @@ class AiObjects: logger.info(f"Rate limit on {model.name}, waiting {retryAfter:.1f}s before retry") await asyncio.sleep(retryAfter + 0.5) try: - async for chunk in self._callWithMessagesStream(model, request.messages, options, request.tools): + async for chunk in self._callWithMessagesStream(model, request.messages, options, request.tools, toolChoice=request.toolChoice): yield chunk return except Exception as retryErr: @@ -421,6 +423,7 @@ class AiObjects: async def _callWithMessagesStream( self, model: AiModel, messages: List[Dict[str, Any]], options: AiCallOptions = None, tools: List[Dict[str, Any]] = None, + toolChoice: Any = None, ) -> AsyncGenerator[Union[str, AiCallResponse], None]: """Stream a model call. Yields str deltas, then final AiCallResponse with billing.""" from modules.datamodels.datamodelAi import AiModelCall, AiModelResponse @@ -429,7 +432,7 @@ class AiObjects: startTime = time.time() if not model.functionCallStream: - response = await self._callWithMessages(model, messages, options, tools) + response = await self._callWithMessages(model, messages, options, tools, toolChoice=toolChoice) if response.content: yield response.content yield response @@ -438,6 +441,7 @@ class AiObjects: modelCall = AiModelCall( messages=messages, model=model, options=options or {}, tools=tools, + toolChoice=toolChoice, ) finalModelResponse = None diff --git a/modules/routes/routeVoiceGoogle.py b/modules/routes/routeVoiceGoogle.py index 1c796361..309e59bb 100644 --- a/modules/routes/routeVoiceGoogle.py +++ b/modules/routes/routeVoiceGoogle.py @@ -444,7 +444,7 @@ async def health_check(currentUser: User = Depends(getCurrentUser)): async def get_voice_settings(currentUser: User = Depends(getCurrentUser)): """Get voice settings for the current user (reads from UserVoicePreferences).""" from modules.datamodels.datamodelUam import UserVoicePreferences - from modules.security.rootAccess import getRootInterface + from modules.interfaces.interfaceDbApp import getRootInterface rootInterface = getRootInterface() userId = str(currentUser.id) @@ -464,7 +464,7 @@ async def save_voice_settings( ): """Save voice settings for the current user (writes to UserVoicePreferences).""" from modules.datamodels.datamodelUam import UserVoicePreferences, _normalizeTtsVoiceMap - from modules.security.rootAccess import getRootInterface + from modules.interfaces.interfaceDbApp import getRootInterface rootInterface = getRootInterface() userId = str(currentUser.id) diff --git a/modules/serviceCenter/services/serviceAgent/agentLoop.py b/modules/serviceCenter/services/serviceAgent/agentLoop.py index c196d237..fa76141d 100644 --- a/modules/serviceCenter/services/serviceAgent/agentLoop.py +++ b/modules/serviceCenter/services/serviceAgent/agentLoop.py @@ -48,6 +48,7 @@ async def runAgentLoop( conversationHistory: List[Dict[str, Any]] = None, persistRoundMemoryFn: Callable[..., Awaitable[None]] = None, getExternalMemoryKeysFn: Callable[[], List[str]] = None, + systemPromptOverride: str = None, ) -> AsyncGenerator[AgentEvent, None]: """Run the agent loop. Yields AgentEvent for each step (SSE-ready). @@ -74,16 +75,20 @@ async def runAgentLoop( featureInstanceId=featureInstanceId ) - tools = toolRegistry.getTools() - toolDefinitions = toolRegistry.formatToolsForFunctionCalling() + activeToolSet = config.toolSet if config else None + tools = toolRegistry.getTools(toolSet=activeToolSet) + toolDefinitions = toolRegistry.formatToolsForFunctionCalling(toolSet=activeToolSet) # Text-based tool descriptions are ONLY used as fallback when native function # calling is unavailable. Including both creates conflicting instructions # (text ```tool_call format vs native tool_use blocks) and can cause the model # to respond with plain text instead of actual tool calls. - toolsText = "" if toolDefinitions else toolRegistry.formatToolsForPrompt() + toolsText = "" if toolDefinitions else toolRegistry.formatToolsForPrompt(toolSet=activeToolSet) - systemPrompt = buildSystemPrompt(tools, toolsText, userLanguage=userLanguage) + if systemPromptOverride: + systemPrompt = systemPromptOverride + else: + systemPrompt = buildSystemPrompt(tools, toolsText, userLanguage=userLanguage) conversation = ConversationManager(systemPrompt) if conversationHistory: conversation.loadHistory(conversationHistory) @@ -168,7 +173,7 @@ async def runAgentLoop( temperature=config.temperature ), messages=conversation.messages, - tools=toolDefinitions + tools=toolDefinitions if toolDefinitions else None, ) try: diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index 08950ea3..b370b827 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -132,6 +132,8 @@ class AgentService: additionalTools: List[Dict[str, Any]] = None, userLanguage: str = "", conversationHistory: List[Dict[str, Any]] = None, + buildRagContextFn: Callable = None, + systemPromptOverride: str = None, ) -> AsyncGenerator[AgentEvent, None]: """Run an agent with the given prompt and tools. @@ -144,6 +146,8 @@ class AgentService: additionalTools: Extra tool definitions to register dynamically userLanguage: ISO 639-1 language code; falls back to user.language from profile conversationHistory: Prior messages for follow-up context + buildRagContextFn: Optional custom RAG context builder (overrides default) + systemPromptOverride: Optional system prompt override (replaces generated prompt) Yields: AgentEvent for each step (SSE-ready) @@ -163,7 +167,8 @@ class AgentService: aiCallFn = self._createAiCallFn() aiCallStreamFn = self._createAiCallStreamFn() getWorkflowCostFn = self._createGetWorkflowCostFn(workflowId) - buildRagContextFn = self._createBuildRagContextFn() + if buildRagContextFn is None: + buildRagContextFn = self._createBuildRagContextFn() persistRoundMemoryFn = self._createPersistRoundMemoryFn(workflowId) getExternalMemoryKeysFn = self._createGetExternalMemoryKeysFn(workflowId) @@ -183,6 +188,7 @@ class AgentService: conversationHistory=conversationHistory, persistRoundMemoryFn=persistRoundMemoryFn, getExternalMemoryKeysFn=getExternalMemoryKeysFn, + systemPromptOverride=systemPromptOverride, ): if event.type == AgentEventTypeEnum.AGENT_SUMMARY: await self._persistTrace(workflowId, event.data or {}) @@ -2610,54 +2616,54 @@ def _registerCoreTools(registry: ToolRegistry, services): if not voiceName: try: from modules.datamodels.datamodelUam import UserVoicePreferences - from modules.security.rootAccess import getRootInterface + from modules.interfaces.interfaceDbApp import getRootInterface userId = context.get("userId", "") if userId: rootIf = getRootInterface() prefRecords = rootIf.db.getRecordset( UserVoicePreferences, - recordFilter={"userId": userId, "mandateId": mandateId} + recordFilter={"userId": userId} ) - if not prefRecords and mandateId: - prefRecords = rootIf.db.getRecordset( - UserVoicePreferences, - recordFilter={"userId": userId} - ) if prefRecords: - vs = prefRecords[0] if isinstance(prefRecords[0], dict) else prefRecords[0].model_dump() if hasattr(prefRecords[0], "model_dump") else prefRecords[0] - voiceMap = vs.get("ttsVoiceMap", {}) or {} - if isinstance(voiceMap, dict) and voiceMap: - selectedKey = None - selectedVoiceEntry = None - baseLanguage = language.split("-")[0].lower() if isinstance(language, str) and language else "" + allPrefs = [ + r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else r + for r in prefRecords + ] + _mid = str(mandateId or "").strip() + scopedPref = next((p for p in allPrefs if str(p.get("mandateId") or "").strip() == _mid), None) + globalPref = next((p for p in allPrefs if not str(p.get("mandateId") or "").strip()), None) - if isinstance(language, str) and language in voiceMap: - selectedKey = language - selectedVoiceEntry = voiceMap[language] + def _resolveVoiceFromMap(prefDict, lang): + vm = (prefDict or {}).get("ttsVoiceMap", {}) or {} + if not isinstance(vm, dict) or not vm: + return None + baseLang = lang.split("-")[0].lower() if isinstance(lang, str) and lang else "" + langNorm = str(lang or "").strip() + if langNorm in vm: + entry = vm[langNorm] + return entry.get("voiceName") if isinstance(entry, dict) else entry + if baseLang and baseLang in vm: + entry = vm[baseLang] + return entry.get("voiceName") if isinstance(entry, dict) else entry + if baseLang: + for mk, mv in vm.items(): + mkn = str(mk).lower() + if mkn == baseLang or mkn.startswith(f"{baseLang}-"): + return mv.get("voiceName") if isinstance(mv, dict) else mv + return None - if selectedVoiceEntry is None and baseLanguage and baseLanguage in voiceMap: - selectedKey = baseLanguage - selectedVoiceEntry = voiceMap[baseLanguage] - - if selectedVoiceEntry is None and baseLanguage: - for mapKey, mapValue in voiceMap.items(): - mapKeyNorm = str(mapKey).lower() - if mapKeyNorm == baseLanguage or mapKeyNorm.startswith(f"{baseLanguage}-"): - selectedKey = str(mapKey) - selectedVoiceEntry = mapValue - break - - if selectedVoiceEntry is not None: - voiceName = ( - selectedVoiceEntry.get("voiceName") - if isinstance(selectedVoiceEntry, dict) - else selectedVoiceEntry - ) - logger.info( - f"textToSpeech: using configured voice '{voiceName}' for requested language '{language}' (matched key '{selectedKey}')" - ) - if not voiceName and vs.get("ttsVoice") and vs.get("ttsLanguage") == language: - voiceName = vs["ttsVoice"] + voiceName = ( + _resolveVoiceFromMap(scopedPref, language) + or _resolveVoiceFromMap(globalPref, language) + or _resolveVoiceFromMap(allPrefs[0], language) + ) + if not voiceName: + for candidate in [globalPref, scopedPref, allPrefs[0]]: + if candidate and candidate.get("ttsVoice") and candidate.get("ttsLanguage") == language: + voiceName = candidate["ttsVoice"] + break + if voiceName: + logger.info(f"textToSpeech: using configured voice '{voiceName}' for language '{language}'") except Exception as prefErr: logger.debug(f"textToSpeech: could not load voice preferences: {prefErr}") @@ -3416,3 +3422,21 @@ def _registerCoreTools(registry: ToolRegistry, services): }, readOnly=True, ) + + # Tag core-only tools so restricted toolSets (e.g. "commcoach") exclude them. + # Tools NOT in this set remain toolSet=None → available to ALL sets. + _CORE_ONLY_TOOLS = { + "listFiles", "listFolders", "tagFile", "moveFile", "createFolder", + "writeFile", "deleteFile", "renameFile", "translateText", + "deleteFolder", "renameFolder", "moveFolder", "copyFile", "replaceInFile", + "listConnections", "uploadToExternal", "sendMail", "downloadFromDataSource", + "browseContainer", "readContentObjects", "extractContainerItem", + "summarizeContent", "describeImage", "renderDocument", + "textToSpeech", "generateImage", "createChart", + "speechToText", "detectLanguage", "neutralizeData", "executeCode", + "listWorkflowHistory", "readWorkflowMessages", + } + for _toolName in _CORE_ONLY_TOOLS: + _td = registry.getTool(_toolName) + if _td: + _td.toolSet = "core" diff --git a/modules/serviceCenter/services/serviceAgent/toolRegistry.py b/modules/serviceCenter/services/serviceAgent/toolRegistry.py index d241bb93..b4b5cd86 100644 --- a/modules/serviceCenter/services/serviceAgent/toolRegistry.py +++ b/modules/serviceCenter/services/serviceAgent/toolRegistry.py @@ -125,20 +125,22 @@ class ToolRegistry: durationMs=durationMs ) - def formatToolsForPrompt(self) -> str: - """Format all tools as text for system prompt (text-based fallback).""" + def formatToolsForPrompt(self, toolSet: str = None) -> str: + """Format tools as text for system prompt (text-based fallback).""" + tools = self.getTools(toolSet=toolSet) if toolSet else list(self._tools.values()) parts = [] - for tool in self._tools.values(): + for tool in tools: paramStr = ", ".join( f"{k}: {v}" for k, v in tool.parameters.items() ) if tool.parameters else "none" parts.append(f"- **{tool.name}**: {tool.description}\n Parameters: {{{paramStr}}}") return "\n".join(parts) - def formatToolsForFunctionCalling(self) -> List[Dict[str, Any]]: - """Format all tools as OpenAI-compatible function definitions for native function calling.""" + def formatToolsForFunctionCalling(self, toolSet: str = None) -> List[Dict[str, Any]]: + """Format tools as OpenAI-compatible function definitions for native function calling.""" + tools = self.getTools(toolSet=toolSet) if toolSet else list(self._tools.values()) functions = [] - for tool in self._tools.values(): + for tool in tools: functions.append({ "type": "function", "function": { From 563018b5e1ace8364600fe8f72fdb789c539a82b Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Wed, 1 Apr 2026 22:16:08 +0200 Subject: [PATCH 29/33] fixed tool execute subprocess --- .../services/serviceAgent/sandboxExecutor.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/modules/serviceCenter/services/serviceAgent/sandboxExecutor.py b/modules/serviceCenter/services/serviceAgent/sandboxExecutor.py index 1882d7eb..15362e65 100644 --- a/modules/serviceCenter/services/serviceAgent/sandboxExecutor.py +++ b/modules/serviceCenter/services/serviceAgent/sandboxExecutor.py @@ -3,7 +3,6 @@ """Sandboxed code execution for the AI agent executeCode tool.""" import logging -import signal import sys import io import traceback @@ -72,15 +71,12 @@ async def executePython(code: str) -> Dict[str, Any]: sys.stdout = capturedOutput sys.stderr = capturedOutput - if sys.platform != "win32": - signal.signal(signal.SIGALRM, lambda *_: (_ for _ in ()).throw(TimeoutError("Execution timed out"))) - signal.alarm(_MAX_EXECUTION_TIME_S) + # Do not use signal.SIGALRM here: _run executes inside a thread-pool worker + # (asyncio.run_in_executor). signal.signal only works on the main thread. + # Wall-clock limit is enforced by asyncio.wait_for around run_in_executor. exec(compile(code, "", "exec"), restrictedGlobals) - if sys.platform != "win32": - signal.alarm(0) - output = capturedOutput.getvalue() if len(output) > _MAX_OUTPUT_CHARS: output = output[:_MAX_OUTPUT_CHARS] + f"\n... (truncated at {_MAX_OUTPUT_CHARS} chars)" @@ -94,14 +90,12 @@ async def executePython(code: str) -> Dict[str, Any]: finally: sys.stdout = oldStdout sys.stderr = oldStderr - if sys.platform != "win32": - signal.alarm(0) loop = asyncio.get_event_loop() try: result = await asyncio.wait_for( loop.run_in_executor(None, _run), - timeout=_MAX_EXECUTION_TIME_S + 5, + timeout=float(_MAX_EXECUTION_TIME_S) + 5.0, ) return result except asyncio.TimeoutError: From 93f28f57df1910efe6bf92819995eed1c68b26a4 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Thu, 2 Apr 2026 11:58:34 +0200 Subject: [PATCH 30/33] fix sequenst subscription and mandate --- modules/interfaces/interfaceDbApp.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 01863b41..5e346a86 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1492,9 +1492,8 @@ class AppObjects: if not adminRoleId: raise ValueError(f"No admin role found for mandate {mandateId} — cannot assign user without role") - self.createUserMandate(userId, mandateId, roleIds=[adminRoleId], skipCapacityCheck=True) - from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot + from modules.interfaces.interfaceDbBilling import _getRootInterface as _getBillingRoot from datetime import datetime, timezone, timedelta now = datetime.now(timezone.utc) @@ -1514,6 +1513,20 @@ class AppObjects: subInterface = _getSubRoot() subInterface.createSubscription(subscription) + try: + billingRoot = _getBillingRoot() + billingRoot.getOrCreateSettings(mandateId) + billingRoot.ensureActivationBudget(mandateId, planKey) + except Exception as billingEx: + logger.error( + "Initial billing setup failed for mandate %s (plan=%s): %s", + mandateId, + planKey, + billingEx, + ) + + self.createUserMandate(userId, mandateId, roleIds=[adminRoleId], skipCapacityCheck=True) + featureInterface = getFeatureInterface(self.db) mainModules = loadFeatureMainModules() createdInstances = [] @@ -1552,6 +1565,8 @@ class AppObjects: except Exception as e: logger.error(f"Error auto-creating instance for '{featureName}': {e}") + self._syncSubscriptionQuantity(mandateId) + logger.info(f"Provisioned mandate {mandateId} (plan={planKey}) for user {userId}, instances={createdInstances}") return { "mandateId": mandateId, From 268c4b8e1e3112e032d0635bc71f1235b1f29d09 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Thu, 2 Apr 2026 13:09:04 +0200 Subject: [PATCH 31/33] prices --- modules/datamodels/datamodelSubscription.py | 8 +- .../serviceSubscription/stripeBootstrap.py | 118 +++++++++++++++--- 2 files changed, 107 insertions(+), 19 deletions(-) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 8fcf10f2..227ba5eb 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -217,8 +217,8 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich. Inkl. 10 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.MONTHLY, - pricePerUserCHF=90.0, - pricePerFeatureInstanceCHF=150.0, + pricePerUserCHF=19.0, + pricePerFeatureInstanceCHF=29.0, maxDataVolumeMB=1024, budgetAiCHF=10.0, ), @@ -231,8 +231,8 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich. Inkl. 120 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.YEARLY, - pricePerUserCHF=1080.0, - pricePerFeatureInstanceCHF=1800.0, + pricePerUserCHF=228.0, + pricePerFeatureInstanceCHF=348.0, maxDataVolumeMB=1024, budgetAiCHF=120.0, ), diff --git a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py index 14e9424a..869ab52f 100644 --- a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py +++ b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py @@ -9,6 +9,12 @@ so that invoice line items show clear, descriptive names: - "Feature-Instanzen" Idempotent — safe to call on every startup. + +Source of truth for unit amounts is BUILTIN_PLANS (CHF). On each run, persisted +Stripe Price IDs are reconciled: if Stripe's unit_amount differs from the +catalog, a new Price is created, the old one is archived, and poweron_billing +StripePlanPrice is updated. Other stale active Prices on the same Product +(same recurring interval) are archived so only the catalog-matching Price stays active. """ import logging @@ -93,12 +99,25 @@ def _createStripeProduct(stripe, name: str, description: str, planKey: str, line return product.id -def _findExistingStripePrice(stripe, productId: str, unitAmount: int, interval: str) -> Optional[str]: +def _recurringMatches(recurring: Dict, interval: str, intervalCount: int) -> bool: + if not recurring: + return False + if recurring.get("interval") != interval: + return False + ic = recurring.get("interval_count") + if ic is None: + ic = 1 + return int(ic) == int(intervalCount) + + +def _findExistingStripePrice( + stripe, productId: str, unitAmount: int, interval: str, intervalCount: int = 1, +) -> Optional[str]: try: prices = stripe.Price.list(product=productId, active=True, limit=50) for p in prices.data: recurring = p.get("recurring") or {} - if p.get("unit_amount") == unitAmount and recurring.get("interval") == interval: + if p.get("unit_amount") == unitAmount and _recurringMatches(recurring, interval, intervalCount): return p.id except Exception: pass @@ -115,24 +134,43 @@ def _getStripePriceAmount(stripe, priceId: str) -> Optional[int]: return None -def _reconcilePrice(stripe, productId: str, oldPriceId: str, expectedCHF: float, interval: str, nickname: str) -> str: +def _reconcilePrice( + stripe, + productId: str, + oldPriceId: str, + expectedCHF: float, + interval: str, + nickname: str, + intervalCount: int = 1, +) -> str: """If the stored Stripe Price has a different amount, create a new one and deactivate the old.""" - expectedCents = int(expectedCHF * 100) - actualCents = _getStripePriceAmount(stripe, oldPriceId) + from modules.shared.stripeClient import stripeToDict - if actualCents == expectedCents: + expectedCents = int(round(expectedCHF * 100)) + actualCents = _getStripePriceAmount(stripe, oldPriceId) + matchesRecurring = False + try: + raw = stripe.Price.retrieve(oldPriceId) + pd = stripeToDict(raw) + matchesRecurring = _recurringMatches(pd.get("recurring") or {}, interval, intervalCount) + except Exception: + pass + + if actualCents == expectedCents and matchesRecurring: return oldPriceId logger.warning( - "Price drift detected for %s: Stripe has %s Rappen, catalog expects %s Rappen. Rotating price.", + "Price drift or recurring mismatch for %s: Stripe amount=%s Rappen (expected %s). Rotating price.", oldPriceId, actualCents, expectedCents, ) - existingMatch = _findExistingStripePrice(stripe, productId, expectedCents, interval) + existingMatch = _findExistingStripePrice(stripe, productId, expectedCents, interval, intervalCount) if existingMatch: newPriceId = existingMatch else: - newPriceId = _createStripePrice(stripe, productId, expectedCHF, interval, nickname) + newPriceId = _createStripePrice( + stripe, productId, expectedCHF, interval, nickname, intervalCount, + ) try: stripe.Price.modify(oldPriceId, active=False) @@ -143,18 +181,45 @@ def _reconcilePrice(stripe, productId: str, oldPriceId: str, expectedCHF: float, return newPriceId -def _createStripePrice(stripe, productId: str, unitAmountCHF: float, interval: str, nickname: str) -> str: +def _createStripePrice( + stripe, productId: str, unitAmountCHF: float, interval: str, nickname: str, intervalCount: int = 1, +) -> str: price = stripe.Price.create( product=productId, - unit_amount=int(unitAmountCHF * 100), + unit_amount=int(round(unitAmountCHF * 100)), currency="chf", - recurring={"interval": interval}, + recurring={"interval": interval, "interval_count": intervalCount}, nickname=nickname, ) logger.info("Created Stripe Price %s (%s, %s CHF/%s)", price.id, nickname, unitAmountCHF, interval) return price.id +def _archiveOtherRecurringPrices( + stripe, productId: Optional[str], keepPriceId: Optional[str], interval: str, intervalCount: int = 1, +) -> None: + """Archive every other active recurring price on the product (same interval pattern).""" + if not productId or not keepPriceId: + return + try: + prices = stripe.Price.list(product=productId, active=True, limit=100) + for p in prices.data: + if p.id == keepPriceId: + continue + recurring = p.get("recurring") or {} + if not recurring: + continue + if not _recurringMatches(recurring, interval, intervalCount): + continue + try: + stripe.Price.modify(p.id, active=False) + logger.info("Archived stale Stripe Price %s on product %s", p.id, productId) + except Exception as ex: + logger.warning("Could not archive price %s: %s", p.id, ex) + except Exception as e: + logger.warning("Stale price archive pass failed for product %s: %s", productId, e) + + def _validateStripeIdsExist(stripe, mapping: StripePlanPrice) -> bool: """Quick check whether at least the stored product IDs still exist in Stripe. Returns False when running against a different Stripe account or after DB copy.""" @@ -195,6 +260,7 @@ def bootstrapStripePrices() -> None: continue interval = stripePeriod["interval"] + intervalCount = int(stripePeriod.get("interval_count") or 1) if planKey in existing: mapping = existing[planKey] @@ -206,6 +272,7 @@ def bootstrapStripePrices() -> None: reconciledUsers = _reconcilePrice( stripe, mapping.stripeProductIdUsers, mapping.stripePriceIdUsers, plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", + intervalCount, ) if reconciledUsers != mapping.stripePriceIdUsers: changed = True @@ -213,16 +280,27 @@ def bootstrapStripePrices() -> None: reconciledInstances = _reconcilePrice( stripe, mapping.stripeProductIdInstances, mapping.stripePriceIdInstances, plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Feature-Instanz", + intervalCount, ) if reconciledInstances != mapping.stripePriceIdInstances: changed = True + _archiveOtherRecurringPrices( + stripe, mapping.stripeProductIdUsers, reconciledUsers, interval, intervalCount, + ) + _archiveOtherRecurringPrices( + stripe, mapping.stripeProductIdInstances, reconciledInstances, interval, intervalCount, + ) + if changed: db.recordModify(StripePlanPrice, mapping.id, { "stripePriceIdUsers": reconciledUsers, "stripePriceIdInstances": reconciledInstances, }) - logger.info("Reconciled Stripe prices for plan %s: users=%s, instances=%s", planKey, reconciledUsers, reconciledInstances) + logger.info( + "Reconciled Stripe prices for plan %s to catalog (CHF): users=%s, instances=%s", + planKey, reconciledUsers, reconciledInstances, + ) else: logger.debug("Stripe prices up-to-date for plan %s", planKey) continue @@ -245,11 +323,16 @@ def bootstrapStripePrices() -> None: stripe, "Benutzer-Lizenzen", f"Benutzer-Lizenzen für {plan.title.get('de', planKey)}", planKey, "users", ) - priceIdUsers = _findExistingStripePrice(stripe, productIdUsers, int(plan.pricePerUserCHF * 100), interval) + userCents = int(round(plan.pricePerUserCHF * 100)) + priceIdUsers = _findExistingStripePrice( + stripe, productIdUsers, userCents, interval, intervalCount, + ) if not priceIdUsers: priceIdUsers = _createStripePrice( stripe, productIdUsers, plan.pricePerUserCHF, interval, f"{planKey} — Benutzer-Lizenz", + intervalCount, ) + _archiveOtherRecurringPrices(stripe, productIdUsers, priceIdUsers, interval, intervalCount) if plan.pricePerFeatureInstanceCHF > 0: productIdInstances = _findStripeProduct(stripe, planKey, "instances") @@ -258,14 +341,19 @@ def bootstrapStripePrices() -> None: stripe, "Feature-Instanzen", f"Feature-Instanzen für {plan.title.get('de', planKey)}", planKey, "instances", ) + instCents = int(round(plan.pricePerFeatureInstanceCHF * 100)) priceIdInstances = _findExistingStripePrice( - stripe, productIdInstances, int(plan.pricePerFeatureInstanceCHF * 100), interval, + stripe, productIdInstances, instCents, interval, intervalCount, ) if not priceIdInstances: priceIdInstances = _createStripePrice( stripe, productIdInstances, plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Feature-Instanz", + intervalCount, ) + _archiveOtherRecurringPrices( + stripe, productIdInstances, priceIdInstances, interval, intervalCount, + ) persistData = { "stripeProductId": "", From ecbdd1ea74c9ef944f3b3b9edebb750217f36185 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Thu, 2 Apr 2026 14:12:49 +0200 Subject: [PATCH 32/33] pricing --- modules/datamodels/datamodelSubscription.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 227ba5eb..1791e7a9 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -217,8 +217,8 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich. Inkl. 10 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.MONTHLY, - pricePerUserCHF=19.0, - pricePerFeatureInstanceCHF=29.0, + pricePerUserCHF=79.0, + pricePerFeatureInstanceCHF=119.0, maxDataVolumeMB=1024, budgetAiCHF=10.0, ), @@ -231,8 +231,8 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich. Inkl. 120 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.YEARLY, - pricePerUserCHF=228.0, - pricePerFeatureInstanceCHF=348.0, + pricePerUserCHF=948.0, + pricePerFeatureInstanceCHF=1428.0, maxDataVolumeMB=1024, budgetAiCHF=120.0, ), From 50bf59879fded9d7ee7b7bbad6e75f63e7892802 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Thu, 2 Apr 2026 23:53:36 +0200 Subject: [PATCH 33/33] fix: mandate subscription provisioning, capacity errors, invitations API Made-with: Cursor --- modules/interfaces/interfaceDbApp.py | 4 ++- modules/routes/routeDataMandates.py | 6 ++++ modules/routes/routeInvitations.py | 3 +- .../mainServiceSubscription.py | 33 ++++++++++++++++--- 4 files changed, 39 insertions(+), 7 deletions(-) diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index 5e346a86..d52c23d6 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -1990,8 +1990,10 @@ class AppObjects: cleanedRecord = dict(createdRecord) return UserMandate(**cleanedRecord) except Exception as e: + if e.__class__.__name__ == "SubscriptionCapacityException": + raise logger.error(f"Error creating UserMandate: {e}") - raise ValueError(f"Failed to create UserMandate: {e}") + raise ValueError(f"Failed to create UserMandate: {e}") from e def _ensureUserBillingAccount(self, userId: str, mandateId: str) -> None: """ diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py index 1615a03a..cb6a3efc 100644 --- a/modules/routes/routeDataMandates.py +++ b/modules/routes/routeDataMandates.py @@ -31,6 +31,7 @@ from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole from modules.datamodels.datamodelRbac import Role from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict from modules.routes.routeNotifications import create_access_change_notification +from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionCapacityException # ============================================================================= @@ -795,6 +796,11 @@ def add_user_to_mandate( except HTTPException: raise + except SubscriptionCapacityException as cap: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=cap.message, + ) except Exception as e: logger.error(f"Error adding user to mandate: {e}") raise HTTPException( diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py index 8e3be0ba..6e34eb88 100644 --- a/modules/routes/routeInvitations.py +++ b/modules/routes/routeInvitations.py @@ -41,11 +41,10 @@ class InvitationCreate(BaseModel): - Mandate-level: featureInstanceId omitted, roleIds are mandate-level roles (user, viewer, admin) - Feature-instance-level: featureInstanceId required, roleIds are instance-level roles - Email is required for new users; targetUsername is optional. At least one of email or targetUsername must be provided. """ targetUsername: Optional[str] = Field(None, description="Username of the user to invite (must match on acceptance)") - email: Optional[str] = Field(None, description="Email address to send invitation link (required for new users)") + email: Optional[str] = Field(None, description="Email address to send invitation link (optional if targetUsername is set)") featureInstanceId: Optional[str] = Field(None, description="Feature instance to grant access to (optional for mandate-level invitations)") roleIds: List[str] = Field(..., description="Role IDs: mandate-level (user, viewer, admin) or instance-level") frontendUrl: str = Field(..., description="Frontend URL for building the invite link (provided by frontend)") diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py index 9535a2da..89e20112 100644 --- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py +++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py @@ -786,15 +786,40 @@ class SubscriptionInactiveException(Exception): return out +_SUBSCRIPTION_LIMITS_UI_HINT_DE = ( + " Details zu Ihrem Abonnement, den enthaltenen Limits und Upgrade-Optionen: " + "Menü «Administration» → «Billing» → Registerkarte «Abonnement»." +) + + class SubscriptionCapacityException(Exception): def __init__(self, resourceType: str, currentCount: int, maxAllowed: int, message: Optional[str] = None): self.resourceType = resourceType self.currentCount = currentCount self.maxAllowed = maxAllowed - self.message = message or ( - f"Ihr Plan erlaubt maximal {maxAllowed} {'Benutzer' if resourceType == 'users' else 'Feature-Instanzen'} " - f"(aktuell {currentCount}). Bitte wechseln Sie zu einem grösseren Plan." - ) + if message is not None: + self.message = message + elif resourceType == "users": + self.message = ( + f"Mit dem aktuellen Abonnement sind für diesen Mandanten höchstens {maxAllowed} " + f"Benutzer zulässig (derzeit {currentCount}). " + f"Ohne Planwechsel können keine weiteren Benutzer hinzugefügt werden." + ) + _SUBSCRIPTION_LIMITS_UI_HINT_DE + elif resourceType == "featureInstances": + self.message = ( + f"Es sind höchstens {maxAllowed} aktive Feature-Instanzen erlaubt (derzeit {currentCount}). " + f"Bitte Abonnement erweitern oder eine Instanz entfernen." + ) + _SUBSCRIPTION_LIMITS_UI_HINT_DE + elif resourceType == "dataVolumeMB": + self.message = ( + f"Das im Abonnement enthaltene Datenvolumen ({maxAllowed} MB) reicht nicht " + f"(aktuell ca. {currentCount} MB). Bitte Speicher-Limit oder Plan anpassen." + ) + _SUBSCRIPTION_LIMITS_UI_HINT_DE + else: + self.message = ( + f"Abonnement-Limit überschritten (Ressource «{resourceType}»: " + f"aktuell {currentCount}, erlaubt {maxAllowed})." + ) + _SUBSCRIPTION_LIMITS_UI_HINT_DE super().__init__(self.message) def toClientDict(self) -> Dict[str, Any]: