streamlined bootstrap and initial config

This commit is contained in:
patrick-motsch 2026-02-09 12:49:35 +01:00
parent 887867acd0
commit 1f3746aef5
34 changed files with 1103 additions and 580 deletions

9
app.py
View file

@ -286,6 +286,15 @@ instanceLabel = APP_CONFIG.get("APP_ENV_LABEL")
async def lifespan(app: FastAPI):
logger.info("Application is starting up")
# --- Register RBAC catalog for features (moved here from loadFeatureRouters for single-pass loading) ---
try:
from modules.security.rbacCatalog import getCatalogService
from modules.system.registry import registerAllFeaturesInCatalog
catalogService = getCatalogService()
registerAllFeaturesInCatalog(catalogService)
except Exception as e:
logger.warning(f"Could not register feature RBAC catalog: {e}")
# Get event user for feature lifecycle (system-level user for background operations)
rootInterface = getRootInterface()
eventUser = rootInterface.getUserByUsername("event")

View file

@ -73,12 +73,14 @@ class ModelSelector:
contextSize = len(context.encode("utf-8"))
totalSize = promptSize + contextSize
# Convert bytes to approximate tokens
# Conservative estimate: 1 token ≈ 2 bytes (for safety margin)
# Balanced estimate: 1 token ≈ 3 bytes
# Note: Actual tokenization varies by content type and model
# - English text: ~4 bytes/token
# - Structured data/JSON: ~2-3 bytes/token
# - German/European text: ~3.5 bytes/token
# - Structured data/JSON: ~2.5-3 bytes/token
# - Base64/encoded data: ~1.5-2 bytes/token
bytesPerToken = 2 # Conservative estimate for mixed content
# Using 3 as balanced estimate (previously 2 which overestimated by ~2x)
bytesPerToken = 3 # Balanced estimate for mixed content
promptTokens = promptSize / bytesPerToken
contextTokens = contextSize / bytesPerToken
totalTokens = totalSize / bytesPerToken
@ -98,9 +100,16 @@ class ModelSelector:
logger.debug(f"Models with {options.operationType.value}: {[m.name for m in operationFiltered]}")
# Step 2: Filter by prompt size (MUST be <= 80% of context size)
# AND by maxInputTokensPerRequest (provider rate limit / TPM)
# Note: contextLength is in tokens, so we need to compare tokens with tokens
promptFiltered = []
for model in operationFiltered:
# Check provider rate limit first (maxInputTokensPerRequest)
maxRequestTokens = getattr(model, 'maxInputTokensPerRequest', None)
if maxRequestTokens and maxRequestTokens > 0 and totalTokens > maxRequestTokens:
logger.debug(f"Model {model.name} filtered out: totalTokens={totalTokens:.0f} > maxInputTokensPerRequest={maxRequestTokens} (provider rate limit)")
continue
if model.contextLength == 0:
# No context length limit - always pass
promptFiltered.append(model)

View file

@ -46,7 +46,6 @@ class AiAnthropic(BaseConnectorAi):
return "anthropic"
def getModels(self) -> List[AiModel]:
# return [] # TODO: DEBUG TO TURN ON AFTER TESTING
# Get all available Anthropic models.
return [
AiModel(
@ -57,11 +56,10 @@ class AiAnthropic(BaseConnectorAi):
temperature=0.2,
maxTokens=8192,
contextLength=200000,
costPer1kTokensInput=0.015,
costPer1kTokensOutput=0.075,
costPer1kTokensInput=0.003, # $3/M tokens (updated 2026-02)
costPer1kTokensOutput=0.015, # $15/M tokens (updated 2026-02)
speedRating=6, # Slower due to high-quality processing
qualityRating=10, # Best quality available
# capabilities removed (not used in business logic)
functionCall=self.callAiBasic,
priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED,
@ -72,7 +70,55 @@ class AiAnthropic(BaseConnectorAi):
(OperationTypeEnum.DATA_EXTRACT, 8)
),
version="claude-sonnet-4-5-20250929",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
),
AiModel(
name="claude-haiku-4-5-20251001",
displayName="Anthropic Claude Haiku 4.5",
connectorType="anthropic",
apiUrl="https://api.anthropic.com/v1/messages",
temperature=0.2,
maxTokens=8192,
contextLength=200000,
costPer1kTokensInput=0.001, # $1/M tokens (updated 2026-02)
costPer1kTokensOutput=0.005, # $5/M tokens (updated 2026-02)
speedRating=9, # Very fast, lightweight model
qualityRating=8, # Good quality, cost-efficient
functionCall=self.callAiBasic,
priority=PriorityEnum.SPEED,
processingMode=ProcessingModeEnum.BASIC,
operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 8),
(OperationTypeEnum.DATA_ANALYSE, 8),
(OperationTypeEnum.DATA_GENERATE, 8),
(OperationTypeEnum.DATA_EXTRACT, 7)
),
version="claude-haiku-4-5-20251001",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.001 + (bytesReceived / 4 / 1000) * 0.005
),
AiModel(
name="claude-opus-4-6",
displayName="Anthropic Claude Opus 4.6",
connectorType="anthropic",
apiUrl="https://api.anthropic.com/v1/messages",
temperature=0.2,
maxTokens=8192,
contextLength=200000,
costPer1kTokensInput=0.005, # $5/M tokens (updated 2026-02)
costPer1kTokensOutput=0.025, # $25/M tokens (updated 2026-02)
speedRating=5, # Moderate latency, most capable
qualityRating=10, # Top-tier intelligence
functionCall=self.callAiBasic,
priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED,
operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 10),
(OperationTypeEnum.DATA_ANALYSE, 10),
(OperationTypeEnum.DATA_GENERATE, 10),
(OperationTypeEnum.DATA_EXTRACT, 9)
),
version="claude-opus-4-6",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.025
),
AiModel(
name="claude-sonnet-4-5-20250929",
@ -82,8 +128,8 @@ class AiAnthropic(BaseConnectorAi):
temperature=0.2,
maxTokens=8192,
contextLength=200000,
costPer1kTokensInput=0.015,
costPer1kTokensOutput=0.075,
costPer1kTokensInput=0.003, # $3/M tokens (updated 2026-02)
costPer1kTokensOutput=0.015, # $15/M tokens (updated 2026-02)
speedRating=6,
qualityRating=10,
functionCall=self.callAiImage,
@ -93,7 +139,7 @@ class AiAnthropic(BaseConnectorAi):
(OperationTypeEnum.IMAGE_ANALYSE, 10)
),
version="claude-sonnet-4-5-20250929",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
)
]

View file

@ -6,7 +6,7 @@ from typing import List
from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG
from .aicoreBase import BaseConnectorAi
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptImage
# Configure logger
logger = logging.getLogger(__name__)
@ -15,6 +15,10 @@ class ContextLengthExceededException(Exception):
"""Exception raised when the context length exceeds the model's limit"""
pass
class RateLimitExceededException(Exception):
"""Exception raised when the provider's rate limit (TPM) is exceeded"""
pass
def loadConfigData():
"""Load configuration data for OpenAI connector"""
return {
@ -57,11 +61,11 @@ class AiOpenai(BaseConnectorAi):
temperature=0.2,
maxTokens=16384,
contextLength=128000,
costPer1kTokensInput=0.03,
costPer1kTokensOutput=0.06,
maxInputTokensPerRequest=25000, # OpenAI org TPM limit is 30K, keep 5K buffer
costPer1kTokensInput=0.0025, # $2.50/M tokens (updated 2026-02)
costPer1kTokensOutput=0.01, # $10.00/M tokens (updated 2026-02)
speedRating=8, # Good speed for complex tasks
qualityRating=10, # High quality
# capabilities removed (not used in business logic)
functionCall=self.callAiBasic,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
@ -72,43 +76,44 @@ class AiOpenai(BaseConnectorAi):
(OperationTypeEnum.DATA_EXTRACT, 7)
),
version="gpt-4o",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.01
),
AiModel(
name="gpt-3.5-turbo",
displayName="OpenAI GPT-3.5 Turbo",
connectorType="openai",
apiUrl="https://api.openai.com/v1/chat/completions",
temperature=0.2,
maxTokens=4096,
contextLength=16000,
costPer1kTokensInput=0.0015,
costPer1kTokensOutput=0.002,
speedRating=9, # Very fast
qualityRating=7, # Good but not premium
# capabilities removed (not used in business logic)
functionCall=self.callAiBasic,
priority=PriorityEnum.SPEED,
processingMode=ProcessingModeEnum.BASIC,
operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 7),
(OperationTypeEnum.DATA_ANALYSE, 8),
(OperationTypeEnum.DATA_GENERATE, 8)
# Note: GPT-3.5-turbo does NOT support vision/image operations
),
version="gpt-3.5-turbo",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0015 + (bytesReceived / 4 / 1000) * 0.002
),
AiModel(
name="gpt-4o",
displayName="OpenAI GPT-4o Instance Vision",
name="gpt-4o-mini",
displayName="OpenAI GPT-4o Mini",
connectorType="openai",
apiUrl="https://api.openai.com/v1/chat/completions",
temperature=0.2,
maxTokens=16384,
contextLength=128000,
costPer1kTokensInput=0.03,
costPer1kTokensOutput=0.06,
maxInputTokensPerRequest=25000, # OpenAI org TPM limit, keep buffer
costPer1kTokensInput=0.00015, # $0.15/M tokens (updated 2026-02)
costPer1kTokensOutput=0.0006, # $0.60/M tokens (updated 2026-02)
speedRating=9, # Very fast
qualityRating=8, # Good quality, replaces gpt-3.5-turbo
functionCall=self.callAiBasic,
priority=PriorityEnum.SPEED,
processingMode=ProcessingModeEnum.BASIC,
operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 8),
(OperationTypeEnum.DATA_ANALYSE, 8),
(OperationTypeEnum.DATA_GENERATE, 9),
(OperationTypeEnum.DATA_EXTRACT, 7)
),
version="gpt-4o-mini",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00015 + (bytesReceived / 4 / 1000) * 0.0006
),
AiModel(
name="gpt-4o",
displayName="OpenAI GPT-4o Vision",
connectorType="openai",
apiUrl="https://api.openai.com/v1/chat/completions",
temperature=0.2,
maxTokens=16384,
contextLength=128000,
maxInputTokensPerRequest=25000, # OpenAI org TPM limit is 30K, keep 5K buffer
costPer1kTokensInput=0.0025, # $2.50/M tokens (updated 2026-02)
costPer1kTokensOutput=0.01, # $10.00/M tokens (updated 2026-02)
speedRating=6, # Slower for vision tasks
qualityRating=9, # High quality vision
functionCall=self.callAiImage,
@ -118,7 +123,7 @@ class AiOpenai(BaseConnectorAi):
(OperationTypeEnum.IMAGE_ANALYSE, 9)
),
version="gpt-4o",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.01
),
AiModel(
name="dall-e-3",
@ -183,6 +188,19 @@ class AiOpenai(BaseConnectorAi):
error_message = f"OpenAI API error: {response.status_code} - {response.text}"
logger.error(error_message)
# Check for rate limit exceeded (429 TPM)
if response.status_code == 429:
try:
error_data = response.json()
error_msg = error_data.get("error", {}).get("message", "Rate limit exceeded")
raise RateLimitExceededException(
f"Rate limit exceeded for {model.name}: {error_msg}"
)
except (ValueError, KeyError):
raise RateLimitExceededException(
f"Rate limit exceeded for {model.name}"
)
# Check for context length exceeded error
if response.status_code == 400:
try:

View file

@ -59,13 +59,12 @@ class AiPerplexity(BaseConnectorAi):
connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=24000, # Increased for detailed web crawl responses (Perplexity supports up to 25k)
contextLength=32000,
costPer1kTokensInput=0.005,
costPer1kTokensOutput=0.005,
maxTokens=24000,
contextLength=127000, # 127K context window (updated 2026-02)
costPer1kTokensInput=0.001, # $1/M tokens (updated 2026-02)
costPer1kTokensOutput=0.001, # $1/M tokens (updated 2026-02)
speedRating=8,
qualityRating=8,
# capabilities removed (not used in business logic)
functionCall=self._routeWebOperation,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
@ -74,7 +73,7 @@ class AiPerplexity(BaseConnectorAi):
(OperationTypeEnum.WEB_CRAWL, 7)
),
version="sonar",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.005
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.001 + (bytesReceived / 4 / 1000) * 0.001
),
AiModel(
name="sonar-pro",
@ -82,13 +81,12 @@ class AiPerplexity(BaseConnectorAi):
connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=24000, # Increased for detailed web crawl responses (Perplexity supports up to 25k)
contextLength=32000,
costPer1kTokensInput=0.01,
costPer1kTokensOutput=0.01,
maxTokens=24000,
contextLength=200000, # 200K context window (updated 2026-02)
costPer1kTokensInput=0.003, # $3/M tokens (updated 2026-02)
costPer1kTokensOutput=0.015, # $15/M tokens (updated 2026-02)
speedRating=6, # Slower due to AI analysis
qualityRating=9, # Best AI analysis quality
# capabilities removed (not used in business logic)
functionCall=self._routeWebOperation,
priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED,
@ -97,7 +95,7 @@ class AiPerplexity(BaseConnectorAi):
(OperationTypeEnum.WEB_CRAWL, 8)
),
version="sonar-pro",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.01 + (bytesReceived / 4 / 1000) * 0.01
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
)
]

View file

@ -87,6 +87,7 @@ class AiModel(BaseModel):
# Token and context limits
maxTokens: int = Field(description="Maximum tokens this model can generate")
contextLength: int = Field(description="Maximum context length this model can handle")
maxInputTokensPerRequest: Optional[int] = Field(default=None, description="Max input tokens per single request (provider rate limit / TPM). If set, model selector filters requests exceeding this limit.")
# Cost information
costPer1kTokensInput: float = Field(default=0.0, description="Cost per 1000 input tokens")

View file

@ -83,6 +83,11 @@ class Mandate(BaseModel):
description="Indicates whether the mandate is enabled",
json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
)
isSystem: bool = Field(
default=False,
description="Whether this is a system mandate (e.g. root mandate). Cannot be deleted.",
json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
)
registerModelLabels(
@ -93,6 +98,7 @@ registerModelLabels(
"name": {"en": "Name", "de": "Name", "fr": "Nom"},
"description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
"enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"},
"isSystem": {"en": "System Mandate", "de": "System-Mandant", "fr": "Mandat système"},
},
)

View file

@ -69,8 +69,6 @@ class AutomationObjects:
userId=self.userId,
)
# Initialize database system
self.db.initDbSystem()
logger.debug(f"Automation database initialized for user {self.userId}")
def setUserContext(self, currentUser: User, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None):

View file

@ -59,24 +59,7 @@ RESOURCE_OBJECTS = [
]
# Template roles for this feature
# IMPORTANT: "viewer" role is required for automatic user assignment!
TEMPLATE_ROLES = [
{
"roleLabel": "viewer",
"description": {
"en": "Automation Viewer - View automations and execution results",
"de": "Automatisierungs-Betrachter - Automatisierungen und Ausführungsergebnisse einsehen",
"fr": "Visualiseur automatisation - Consulter les automatisations et résultats"
},
"accessRules": [
# UI access to all views
{"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
{"context": "UI", "item": "ui.feature.automation.templates", "view": True},
{"context": "UI", "item": "ui.feature.automation.logs", "view": True},
# Read-only DATA access
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
]
},
{
"roleLabel": "automation-admin",
"description": {
@ -115,7 +98,7 @@ TEMPLATE_ROLES = [
"fr": "Visualiseur automatisation - Consulter les automatisations et résultats"
},
"accessRules": [
# UI access to view only - vollqualifizierte ObjectKeys
# UI access to view only
{"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
{"context": "UI", "item": "ui.feature.automation.logs", "view": True},
# Read-only DATA access (my level)
@ -130,7 +113,8 @@ def getFeatureDefinition() -> Dict[str, Any]:
return {
"code": FEATURE_CODE,
"label": FEATURE_LABEL,
"icon": FEATURE_ICON
"icon": FEATURE_ICON,
"autoCreateInstance": True, # Automatically create instance in root mandate during bootstrap
}
@ -215,8 +199,6 @@ def _syncTemplateRolesToDb() -> int:
if roleLabel in existingRoleLabels:
roleId = existingRoleLabels[roleLabel]
logger.debug(f"Template role '{roleLabel}' already exists with ID {roleId}")
# Ensure AccessRules exist for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
else:

View file

@ -19,8 +19,6 @@ from modules.features.automation.datamodelFeatureAutomation import AutomationDef
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.shared.attributeUtils import getModelAttributeDefinitions
from modules.workflows.automation import executeAutomation
# Configure logger
logger = logging.getLogger(__name__)
@ -371,6 +369,7 @@ async def execute_automation_route(
if context.featureInstanceId:
services.featureInstanceId = str(context.featureInstanceId)
services.featureCode = 'automation'
from modules.workflows.automation import executeAutomation
workflow = await executeAutomation(automationId, services)
return workflow
except HTTPException:

View file

@ -2,8 +2,14 @@
# All rights reserved.
"""
Chatbot feature - LangGraph-based chatbot implementation.
Lazy-loaded to avoid importing langgraph/langchain at boot time.
"""
from .service import chatProcess
async def chatProcess(*args, **kwargs):
"""Lazy wrapper - imports the real chatProcess on first call to defer langgraph loading."""
from .service import chatProcess as _chatProcess
return await _chatProcess(*args, **kwargs)
__all__ = ['chatProcess']

View file

@ -329,9 +329,6 @@ class ChatObjects:
userId=self.userId
)
# Initialize database system
self.db.initDbSystem()
logger.info("Database initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize database: {str(e)}")

View file

@ -32,9 +32,6 @@ from modules.datamodels.datamodelPagination import PaginationParams, PaginatedRe
from modules.features.chatbot import chatProcess
from modules.features.chatbot.streaming.events import get_event_manager
# Import workflow control functions
from modules.workflows.automation import chatStop
# Configure logger
logger = logging.getLogger(__name__)

View file

@ -54,15 +54,30 @@ TEMPLATE_ROLES = [
{
"roleLabel": "viewer",
"description": {
"en": "Chat Playground Viewer - View and use chat playground",
"de": "Chat Playground Betrachter - Chat Playground ansehen und nutzen",
"fr": "Visualiseur Chat Playground - Consulter et utiliser le chat playground"
"en": "Chat Playground Viewer - View chat playground (read-only)",
"de": "Chat Playground Betrachter - Chat Playground ansehen (nur lesen)",
"fr": "Visualiseur Chat Playground - Consulter le chat playground (lecture seule)"
},
"accessRules": [
# UI access to all views
# UI: only playground view, NO workflows
{"context": "UI", "item": "ui.feature.chatplayground.playground", "view": True},
# RESOURCE: NO access (viewer cannot start/stop/access chat data)
# DATA access (own records, read-only)
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
]
},
{
"roleLabel": "user",
"description": {
"en": "Chat Playground User - Use chat playground and workflows",
"de": "Chat Playground Benutzer - Chat Playground und Workflows nutzen",
"fr": "Utilisateur Chat Playground - Utiliser le chat playground et les workflows"
},
"accessRules": [
# UI: full access to all views
{"context": "UI", "item": "ui.feature.chatplayground.playground", "view": True},
{"context": "UI", "item": "ui.feature.chatplayground.workflows", "view": True},
# Resource access
# Resource access: can start/stop workflows and access chat data
{"context": "RESOURCE", "item": "resource.feature.chatplayground.start", "view": True},
{"context": "RESOURCE", "item": "resource.feature.chatplayground.stop", "view": True},
{"context": "RESOURCE", "item": "resource.feature.chatplayground.chatData", "view": True},
@ -94,7 +109,8 @@ def getFeatureDefinition() -> Dict[str, Any]:
return {
"code": FEATURE_CODE,
"label": FEATURE_LABEL,
"icon": FEATURE_ICON
"icon": FEATURE_ICON,
"autoCreateInstance": True, # Automatically create instance in root mandate during bootstrap
}
@ -179,8 +195,6 @@ def _syncTemplateRolesToDb() -> int:
if roleLabel in existingRoleLabels:
roleId = existingRoleLabels[roleLabel]
logger.debug(f"Template role '{roleLabel}' already exists with ID {roleId}")
# Ensure AccessRules exist for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
else:

View file

@ -66,7 +66,6 @@ class InterfaceFeatureNeutralizer:
dbPort=dbPort,
userId=self.userId,
)
self.db.initDbSystem()
logger.debug("Neutralizer database initialized successfully")
except Exception as e:
logger.error(f"Error initializing Neutralizer database: {str(e)}")

View file

@ -9,7 +9,7 @@ from modules.auth import limiter, getRequestContext, RequestContext
# Import interfaces
from .datamodelFeatureNeutralizer import DataNeutraliserConfig, DataNeutralizerAttributes
from .mainNeutralizePlayground import NeutralizationPlayground
from .neutralizePlayground import NeutralizationPlayground
# Configure logger
logger = logging.getLogger(__name__)

View file

@ -85,11 +85,6 @@ class RealEstateObjects:
userId=self.userId if self.userId else None,
)
# Initialize database system (creates database and system table if needed)
# Note: This is also called in DatabaseConnector.__init__, but we call it explicitly
# for consistency with other interfaces and to ensure proper initialization
self.db.initDbSystem()
# Ensure all supporting tables are created (Land, Kanton, Gemeinde, Dokument)
# These tables are needed for foreign key relationships
self._ensureSupportingTablesExist()

View file

@ -155,7 +155,6 @@ class TrusteeObjects:
userId=self.userId,
)
self.db.initDbSystem()
logger.info(f"Trustee database initialized successfully for user {self.userId}")
except Exception as e:
logger.error(f"Failed to initialize Trustee database: {str(e)}")

View file

@ -144,12 +144,11 @@ TEMPLATE_ROLES = [
"fr": "Comptable fiduciaire - Gérer les données comptables et financières"
},
"accessRules": [
# UI access to main views (not admin views) - vollqualifizierte ObjectKeys
# UI access to main views (not admin views, not expense-import) - vollqualifizierte ObjectKeys
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.position-documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True},
# Group-level DATA access
{"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "g"},
]
@ -162,11 +161,12 @@ TEMPLATE_ROLES = [
"fr": "Client fiduciaire - Consulter ses propres données comptables et documents"
},
"accessRules": [
# UI access to main views only (read-only focus) - vollqualifizierte ObjectKeys
# UI access to main views + expense-import - vollqualifizierte ObjectKeys
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.position-documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True},
# Own records only (MY level) - explizite Regeln pro Tabelle
{"context": "DATA", "item": "data.feature.trustee.TrusteePosition", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
{"context": "DATA", "item": "data.feature.trustee.TrusteeDocument", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
@ -279,8 +279,6 @@ def _syncTemplateRolesToDb() -> int:
if roleLabel in existingRoleLabels:
roleId = existingRoleLabels[roleLabel]
logger.debug(f"Template role '{roleLabel}' already exists with ID {roleId}")
# Ensure AccessRules exist for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
else:

View file

@ -51,12 +51,16 @@ def initBootstrap(db: DatabaseConnector) -> None:
# Initialize root mandate
mandateId = initRootMandate(db)
# Initialize roles FIRST (needed for AccessRules)
# Initialize system role TEMPLATES (mandateId=None, isSystemRole=True)
initRoles(db)
# Initialize RBAC rules (uses roleIds from roles)
# Initialize RBAC rules for template roles
initRbacRules(db)
# Copy system template roles to ALL mandates as mandate-instance roles
# This also serves as migration for existing mandates that don't have instance roles yet
_ensureAllMandatesHaveSystemRoles(db)
# Initialize admin user
adminUserId = initAdminUser(db, mandateId)
@ -64,6 +68,7 @@ def initBootstrap(db: DatabaseConnector) -> None:
eventUserId = initEventUser(db, mandateId)
# Assign initial user memberships (via UserMandate + UserMandateRole)
# Uses mandate-instance roles (not template roles)
if adminUserId and eventUserId and mandateId:
assignInitialUserMemberships(db, mandateId, adminUserId, eventUserId)
@ -163,8 +168,8 @@ def initAutomationTemplates(dbApp: DatabaseConnector, adminUserId: Optional[str]
def initRootMandateFeatures(db: DatabaseConnector, mandateId: str) -> None:
"""
Create feature instances for root mandate (chatplayground, automation).
These features are available to all users by default.
Create feature instances for root mandate.
Dynamically discovers all feature modules with autoCreateInstance=True.
Args:
db: Database connector instance
@ -172,14 +177,29 @@ def initRootMandateFeatures(db: DatabaseConnector, mandateId: str) -> None:
"""
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.interfaces.interfaceFeatures import getFeatureInterface
from modules.system.registry import loadFeatureMainModules
logger.info("Initializing root mandate features")
# Features to create instances for
featuresToCreate = [
{"code": "chatplayground", "label": "Chat Playground"},
{"code": "automation", "label": "Automation"},
]
# Dynamically discover features with autoCreateInstance=True
featuresToCreate = []
mainModules = loadFeatureMainModules()
for featureName, module in mainModules.items():
if hasattr(module, "getFeatureDefinition"):
try:
featureDef = module.getFeatureDefinition()
if featureDef.get("autoCreateInstance", False):
featureCode = featureDef.get("code", featureName)
featureLabel = featureDef.get("label", {}).get("en", featureName)
featuresToCreate.append({"code": featureCode, "label": featureLabel})
logger.debug(f"Feature '{featureCode}' marked for auto-creation in root mandate")
except Exception as e:
logger.warning(f"Could not read feature definition for '{featureName}': {e}")
if not featuresToCreate:
logger.info("No features marked for auto-creation in root mandate")
return
featureInterface = getFeatureInterface(db)
@ -225,6 +245,7 @@ def initRootMandateFeatures(db: DatabaseConnector, mandateId: str) -> None:
def initRootMandate(db: DatabaseConnector) -> Optional[str]:
"""
Creates the Root mandate if it doesn't exist.
Root mandate is identified by name='root' AND isSystem=True.
Args:
db: Database connector instance
@ -232,14 +253,23 @@ def initRootMandate(db: DatabaseConnector) -> Optional[str]:
Returns:
Mandate ID if created or found, None otherwise
"""
existingMandates = db.getRecordset(Mandate)
# Find existing root mandate by name AND isSystem flag
existingMandates = db.getRecordset(Mandate, recordFilter={"name": "root", "isSystem": True})
if existingMandates:
mandateId = existingMandates[0].get("id")
logger.info(f"Root mandate already exists with ID {mandateId}")
return mandateId
# Check for legacy root mandates (name="Root" without isSystem flag) and migrate
legacyMandates = db.getRecordset(Mandate, recordFilter={"name": "Root"})
if legacyMandates:
mandateId = legacyMandates[0].get("id")
logger.info(f"Migrating legacy Root mandate {mandateId}: setting name='root', isSystem=True")
db.recordModify(Mandate, mandateId, {"name": "root", "isSystem": True})
return mandateId
logger.info("Creating Root mandate")
rootMandate = Mandate(name="Root", enabled=True)
rootMandate = Mandate(name="root", isSystem=True, enabled=True)
createdMandate = db.recordCreate(Mandate, rootMandate)
mandateId = createdMandate.get("id")
logger.info(f"Root mandate created with ID {mandateId}")
@ -383,11 +413,113 @@ def initRoles(db: DatabaseConnector) -> None:
logger.warning(f"Error creating role {role.roleLabel}: {e}")
else:
_roleIdCache[role.roleLabel] = existingRoleLabels[role.roleLabel]
logger.debug(f"Role {role.roleLabel} already exists with ID {existingRoleLabels[role.roleLabel]}")
logger.info("Roles initialization completed")
def _ensureAllMandatesHaveSystemRoles(db: DatabaseConnector) -> None:
"""
Ensure all existing mandates have system-instance roles.
Serves as both initial setup and migration for existing mandates.
"""
allMandates = db.getRecordset(Mandate)
if not allMandates:
return
for mandate in allMandates:
mandateId = mandate.get("id")
copySystemRolesToMandate(db, mandateId)
def copySystemRolesToMandate(db: DatabaseConnector, mandateId: str) -> int:
"""
Copy system template roles (mandateId=None, isSystemRole=True) to a mandate
as mandate-instance roles. Also copies all AccessRules for each role.
This is analogous to how feature template roles are copied to feature instances.
Each mandate gets its own instances of admin/user/viewer with their AccessRules.
Args:
db: Database connector instance
mandateId: Target mandate ID
Returns:
Number of roles copied
"""
import uuid as _uuid
# Find system template roles (global, no mandateId)
templateRoles = db.getRecordset(
Role,
recordFilter={"isSystemRole": True, "mandateId": None}
)
if not templateRoles:
logger.debug("No system template roles found to copy")
return 0
# Check which roles already exist for this mandate
existingMandateRoles = db.getRecordset(
Role,
recordFilter={"mandateId": mandateId, "featureInstanceId": None}
)
existingLabels = {r.get("roleLabel") for r in existingMandateRoles}
# Load all AccessRules for template roles
templateRoleIds = [r.get("id") for r in templateRoles]
rulesByRoleId = {}
for roleId in templateRoleIds:
rules = db.getRecordset(AccessRule, recordFilter={"roleId": roleId})
rulesByRoleId[roleId] = rules
copiedCount = 0
for templateRole in templateRoles:
roleLabel = templateRole.get("roleLabel")
# Skip if mandate already has this role
if roleLabel in existingLabels:
logger.debug(f"Mandate {mandateId} already has role '{roleLabel}', skipping")
continue
newRoleId = str(_uuid.uuid4())
# Create mandate-instance role
newRole = Role(
id=newRoleId,
roleLabel=roleLabel,
description=templateRole.get("description", {}),
mandateId=mandateId,
featureInstanceId=None,
featureCode=None,
isSystemRole=True # Still a system role, but bound to this mandate
)
db.recordCreate(Role, newRole.model_dump())
# Copy AccessRules
templateRules = rulesByRoleId.get(templateRole.get("id"), [])
for rule in templateRules:
newRule = AccessRule(
id=str(_uuid.uuid4()),
roleId=newRoleId,
context=rule.get("context"),
item=rule.get("item"),
view=rule.get("view", False),
read=rule.get("read"),
create=rule.get("create"),
update=rule.get("update"),
delete=rule.get("delete")
)
db.recordCreate(AccessRule, newRule.model_dump())
copiedCount += 1
logger.info(f"Copied system role '{roleLabel}' to mandate {mandateId} with {len(templateRules)} AccessRules")
if copiedCount > 0:
logger.info(f"Copied {copiedCount} system roles to mandate {mandateId}")
return copiedCount
def _getRoleId(db: DatabaseConnector, roleLabel: str) -> Optional[str]:
"""
Get role ID by label, using cache or database lookup.
@ -861,6 +993,117 @@ def _createTableSpecificRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
# -------------------------------------------------------------------------
# Billing Namespace - Billing accounts and transactions
# -------------------------------------------------------------------------
# BillingAccount: User sees own accounts (MY), Admin sees all in mandate (GROUP)
# Each user must see all billing accounts assigned to them
if adminId:
tableRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item="data.billing.BillingAccount",
view=True,
read=AccessLevel.GROUP,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if userId:
tableRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item="data.billing.BillingAccount",
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if viewerId:
tableRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item="data.billing.BillingAccount",
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# BillingTransaction: User sees own transactions (MY), Admin sees all in mandate (GROUP)
if adminId:
tableRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item="data.billing.BillingTransaction",
view=True,
read=AccessLevel.GROUP,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if userId:
tableRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item="data.billing.BillingTransaction",
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if viewerId:
tableRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item="data.billing.BillingTransaction",
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# BillingSettings: Only admin can view mandate settings (read-only)
# SysAdmin (flag) manages settings, roles only read
if adminId:
tableRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item="data.billing.BillingSettings",
view=True,
read=AccessLevel.GROUP,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if userId:
tableRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item="data.billing.BillingSettings",
view=False,
read=AccessLevel.NONE,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if viewerId:
tableRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item="data.billing.BillingSettings",
view=False,
read=AccessLevel.NONE,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# Create all table-specific rules
for rule in tableRules:
db.recordCreate(AccessRule, rule)
@ -992,8 +1235,7 @@ def _ensureUiContextRules(db: DatabaseConnector) -> None:
for rule in missingRules:
db.recordCreate(AccessRule, rule)
logger.info(f"Created {len(missingRules)} missing UI context rules")
else:
logger.debug("All UI context rules already exist")
# All UI context rules already exist (nothing to create)
def _ensureDataContextRules(db: DatabaseConnector) -> None:
@ -1034,6 +1276,13 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
"data.automation.AutomationTemplate",
]
# Billing tables: read-only for all roles, scoped by role level
# Users see their own accounts/transactions (MY), Admins see mandate-wide (GROUP)
billingReadOnlyTables = [
"data.billing.BillingAccount",
"data.billing.BillingTransaction",
]
missingRules = []
# MY-level rules for user-owned tables
@ -1077,9 +1326,9 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
# ALL-level rules for admin on system templates
# Admin rules for system templates (read ALL, write GROUP-scoped)
for objectKey in tablesNeedingAllRulesForAdmin:
# Admin: ALL-level access (sees all templates)
# Admin: read ALL templates, create/update/delete within GROUP (mandate-scoped)
if adminId and (adminId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=adminId,
@ -1087,9 +1336,9 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
item=objectKey,
view=True,
read=AccessLevel.ALL,
create=AccessLevel.ALL,
update=AccessLevel.ALL,
delete=AccessLevel.ALL,
create=AccessLevel.GROUP,
update=AccessLevel.GROUP,
delete=AccessLevel.GROUP,
))
# User: MY-level access
@ -1118,13 +1367,89 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
# Billing read-only rules: Admin=GROUP, User/Viewer=MY (own accounts/transactions)
for objectKey in billingReadOnlyTables:
# Admin: GROUP-level read (sees all accounts in their mandates)
if adminId and (adminId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item=objectKey,
view=True,
read=AccessLevel.GROUP,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# User: MY-level read (sees only own billing accounts/transactions)
if userId and (userId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item=objectKey,
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# Viewer: MY-level read-only (sees only own billing accounts/transactions)
if viewerId and (viewerId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item=objectKey,
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# BillingSettings: Admin can view (GROUP), User/Viewer have no access
billingSettingsKey = "data.billing.BillingSettings"
if adminId and (adminId, billingSettingsKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item=billingSettingsKey,
view=True,
read=AccessLevel.GROUP,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if userId and (userId, billingSettingsKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item=billingSettingsKey,
view=False,
read=AccessLevel.NONE,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
if viewerId and (viewerId, billingSettingsKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item=billingSettingsKey,
view=False,
read=AccessLevel.NONE,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# Create missing rules
if missingRules:
for rule in missingRules:
db.recordCreate(AccessRule, rule)
logger.info(f"Created {len(missingRules)} missing DATA context rules")
else:
logger.debug("All DATA context rules already exist")
# All DATA context rules already exist (nothing to create)
# Update existing AutomationTemplate rules for admin/viewer to ALL access
_updateAutomationTemplateRulesToAll(db, adminId, viewerId)
@ -1132,8 +1457,9 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
def _updateAutomationTemplateRulesToAll(db: DatabaseConnector, adminId: Optional[str], viewerId: Optional[str]) -> None:
"""
Update existing AutomationTemplate RBAC rules from MY to ALL for admin and viewer.
This ensures sysadmins can see all templates (including system-seeded ones).
Update existing AutomationTemplate RBAC rules to correct levels.
- Admin: read=ALL, create/update/delete=GROUP (mandate-scoped writes)
- Viewer: read=ALL (read-only)
"""
if not adminId and not viewerId:
return
@ -1155,14 +1481,29 @@ def _updateAutomationTemplateRulesToAll(db: DatabaseConnector, adminId: Optional
roleId = rule.get("roleId")
currentReadLevel = rule.get("read")
# Update admin and viewer rules from MY to ALL
if roleId in [adminId, viewerId] and currentReadLevel == AccessLevel.MY.value:
if roleId == adminId:
# Admin: read ALL, write GROUP
updates = {}
if currentReadLevel != AccessLevel.ALL.value:
updates["read"] = AccessLevel.ALL.value
currentCreate = rule.get("create")
if currentCreate == AccessLevel.ALL.value:
updates["create"] = AccessLevel.GROUP.value
updates["update"] = AccessLevel.GROUP.value
updates["delete"] = AccessLevel.GROUP.value
if updates:
db.recordModify(AccessRule, ruleId, updates)
updatedCount += 1
logger.debug(f"Updated AutomationTemplate rule {ruleId} for admin to read=ALL, write=GROUP")
elif roleId == viewerId and currentReadLevel == AccessLevel.MY.value:
# Viewer: read ALL (read-only)
db.recordModify(AccessRule, ruleId, {"read": AccessLevel.ALL.value})
updatedCount += 1
logger.debug(f"Updated AutomationTemplate rule {ruleId} for role {roleId} to ALL access")
logger.debug(f"Updated AutomationTemplate rule {ruleId} for viewer to read=ALL")
if updatedCount > 0:
logger.info(f"Updated {updatedCount} AutomationTemplate RBAC rules to ALL access")
logger.info(f"Updated {updatedCount} AutomationTemplate RBAC rules")
def _createResourceContextRules(db: DatabaseConnector) -> None:
@ -1177,8 +1518,8 @@ def _createResourceContextRules(db: DatabaseConnector) -> None:
"""
resourceRules = []
# All roles get full resource access by default (no sysadmin - that's a flag)
for roleLabel in ["admin", "user", "viewer"]:
# Admin and User get default resource access; Viewer gets NO resource access
for roleLabel in ["admin", "user"]:
roleId = _getRoleId(db, roleLabel)
if roleId:
resourceRules.append(AccessRule(
@ -1192,6 +1533,8 @@ def _createResourceContextRules(db: DatabaseConnector) -> None:
delete=None,
))
# Viewer: no default RESOURCE access (viewer cannot use system resources)
for rule in resourceRules:
db.recordCreate(AccessRule, rule)
@ -1204,7 +1547,11 @@ def _createResourceContextRules(db: DatabaseConnector) -> None:
def _createAicoreProviderRules(db: DatabaseConnector) -> None:
"""
Create RBAC rules for AICore providers (resource.aicore.{provider}).
All roles get access to all providers by default.
Provider access per role:
- admin: all providers allowed
- user: all providers EXCEPT anthropic (view=False)
- viewer: NO provider access (viewer has no RESOURCE permissions)
NOTE: Provider list is dynamically discovered from AICore model registry.
@ -1226,37 +1573,54 @@ def _createAicoreProviderRules(db: DatabaseConnector) -> None:
providerRules = []
# All roles get access to all providers (as per requirement)
for roleLabel in ["admin", "user", "viewer"]:
roleId = _getRoleId(db, roleLabel)
if not roleId:
continue
# Admin: access to ALL providers
adminId = _getRoleId(db, "admin")
if adminId:
for provider in providers:
resourceKey = f"resource.aicore.{provider}"
# Check if rule already exists
existingRules = db.getRecordset(
AccessRule,
recordFilter={
"roleId": roleId,
"roleId": adminId,
"context": AccessRuleContext.RESOURCE.value,
"item": resourceKey
}
)
if not existingRules:
providerRules.append(AccessRule(
roleId=roleId,
roleId=adminId,
context=AccessRuleContext.RESOURCE,
item=resourceKey,
view=True, # view=True means "can use" for RESOURCE context
read=None,
create=None,
update=None,
delete=None,
view=True,
read=None, create=None, update=None, delete=None,
))
# User: access to all providers EXCEPT anthropic
userId = _getRoleId(db, "user")
if userId:
for provider in providers:
resourceKey = f"resource.aicore.{provider}"
existingRules = db.getRecordset(
AccessRule,
recordFilter={
"roleId": userId,
"context": AccessRuleContext.RESOURCE.value,
"item": resourceKey
}
)
if not existingRules:
# Anthropic is not allowed for user role
isAllowed = provider != "anthropic"
providerRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.RESOURCE,
item=resourceKey,
view=isAllowed,
read=None, create=None, update=None, delete=None,
))
# Viewer: NO provider access (viewer has no RESOURCE permissions at all)
for rule in providerRules:
db.recordCreate(AccessRule, rule)
@ -1273,7 +1637,7 @@ def initRootMandateBilling(mandateId: str) -> None:
"""
Initialize billing settings for root mandate.
Root mandate uses PREPAY_USER model with 10 CHF initial credit per user.
Also creates billing accounts for all users of the mandate.
Creates billing accounts for ALL users regardless of billing model (for audit trail).
Args:
mandateId: Root mandate ID
@ -1291,11 +1655,10 @@ def initRootMandateBilling(mandateId: str) -> None:
if existingSettings:
logger.info("Billing settings for root mandate already exist")
else:
# Create billing settings for root mandate
settings = BillingSettings(
mandateId=mandateId,
billingModel=BillingModelEnum.PREPAY_USER,
defaultUserCredit=10.0, # 10 CHF initial credit per user
defaultUserCredit=10.0,
warningThresholdPercent=10.0,
blockOnZeroBalance=True,
notifyOnWarning=True
@ -1305,11 +1668,18 @@ def initRootMandateBilling(mandateId: str) -> None:
logger.info(f"Created billing settings for root mandate: PREPAY_USER with 10 CHF default credit")
existingSettings = billingInterface.getSettings(mandateId)
# Create billing accounts for all users of the mandate
# Always create user accounts for all users (audit trail)
if existingSettings:
billingModel = existingSettings.get("billingModel", "UNLIMITED")
if billingModel == BillingModelEnum.UNLIMITED.value:
return # No accounts needed for UNLIMITED
# Initial balance depends on billing model
if billingModel == BillingModelEnum.PREPAY_USER.value:
defaultCredit = existingSettings.get("defaultUserCredit", 10.0)
initialBalance = existingSettings.get("defaultUserCredit", 10.0)
else:
initialBalance = 0.0 # PREPAY_MANDATE / CREDIT_POSTPAY: budget on pool
userMandates = appInterface.getUserMandatesByMandate(mandateId)
accountsCreated = 0
@ -1318,15 +1688,14 @@ def initRootMandateBilling(mandateId: str) -> None:
if userId:
existingAccount = billingInterface.getUserAccount(mandateId, userId)
if not existingAccount:
billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=defaultCredit)
billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=initialBalance)
accountsCreated += 1
logger.debug(f"Created billing account for user {userId}")
if accountsCreated > 0:
logger.info(f"Created {accountsCreated} billing accounts for root mandate users with {defaultCredit} CHF each")
logger.info(f"Created {accountsCreated} billing accounts for root mandate users with {initialBalance} CHF each")
except Exception as e:
# Don't fail bootstrap if billing init fails
logger.warning(f"Failed to initialize root mandate billing (non-critical): {e}")
@ -1349,10 +1718,14 @@ def assignInitialUserMemberships(
adminUserId: Admin user ID
eventUserId: Event user ID
"""
# Use "admin" role for mandate membership (SysAdmin is a flag, not a role!)
adminRoleId = _getRoleId(db, "admin")
# Use mandate-instance "admin" role (not the global template)
mandateAdminRoles = db.getRecordset(
Role,
recordFilter={"roleLabel": "admin", "mandateId": mandateId, "featureInstanceId": None}
)
adminRoleId = mandateAdminRoles[0].get("id") if mandateAdminRoles else None
if not adminRoleId:
logger.warning("Admin role not found, skipping membership assignment")
logger.warning(f"Admin role not found for mandate {mandateId}, skipping membership assignment")
return
for userId, userName in [(adminUserId, "admin"), (eventUserId, "event")]:
@ -1364,7 +1737,6 @@ def assignInitialUserMemberships(
if existingMemberships:
userMandateId = existingMemberships[0].get("id")
logger.debug(f"UserMandate already exists for {userName} user")
else:
# Create UserMandate
userMandate = UserMandate(

View file

@ -153,9 +153,6 @@ class AppObjects:
userId=self.userId,
)
# Initialize database system
self.db.initDbSystem()
logger.info(f"Database initialized successfully for user {self.userId}")
except Exception as e:
logger.error(f"Failed to initialize database: {str(e)}")
@ -482,17 +479,12 @@ class AppObjects:
"""Returns the initial ID for a table."""
return self.db.getInitialId(model_class)
def _getDefaultMandateId(self) -> str:
"""Get the default mandate ID, creating it if necessary."""
defaultMandateId = self.getInitialId(Mandate)
if not defaultMandateId:
# If no default mandate exists, create one
logger.warning("No default mandate found, creating Root mandate")
self._initRootMandate()
defaultMandateId = self.getInitialId(Mandate)
if not defaultMandateId:
raise ValueError("Failed to get or create default mandate")
return defaultMandateId
def _getRootMandateId(self) -> Optional[str]:
"""Get the root mandate ID (name='root', isSystem=True)."""
rootMandates = self.db.getRecordset(Mandate, recordFilter={"name": "root", "isSystem": True})
if rootMandates:
return rootMandates[0].get("id")
return None
def _getPasswordHash(self, password: str) -> str:
"""Creates a hash for a password."""
@ -757,8 +749,9 @@ class AppObjects:
# Clear cache to ensure fresh data (already done above)
# Grant access to root mandate features (chatplayground, automation)
self._grantRootMandateFeatureAccess(createdUser[0]["id"])
# Assign new user to the root mandate with system 'viewer' role
userId = createdUser[0]["id"]
self._assignUserToRootMandate(userId)
return User(**createdUser[0])
@ -823,98 +816,47 @@ class AppObjects:
logger.error(f"Error updating user: {str(e)}")
raise ValueError(f"Failed to update user: {str(e)}")
def _grantRootMandateFeatureAccess(self, userId: str) -> None:
def _assignUserToRootMandate(self, userId: str) -> None:
"""
Grant a new user access to root mandate features (chatplayground, automation).
Creates FeatureAccess with viewer role for each feature instance.
Assign a new user to the root mandate with the mandate-instance 'viewer' role.
This ensures every user has a base membership in the system mandate.
Uses the mandate-instance role (mandateId=rootMandateId), not the global template.
Feature instance access is NOT granted here - it is managed separately
via invitations or admin assignment.
Args:
userId: User ID to grant access to
userId: User ID to assign
"""
try:
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.datamodels.datamodelMembership import FeatureAccess, FeatureAccessRole
from modules.datamodels.datamodelRbac import Role
# Get root mandate ID (first mandate in system)
allMandates = self.db.getRecordset(Mandate)
if not allMandates:
logger.debug("No mandates found, skipping feature access grant")
return
rootMandateId = allMandates[0].get("id")
# Feature codes to grant access to
rootFeatureCodes = ["chatplayground", "automation"]
# Get feature instances for root mandate
allInstances = self.db.getRecordset(FeatureInstance)
featureInstances = [
inst for inst in allInstances
if inst.get("mandateId") == rootMandateId
and inst.get("featureCode") in rootFeatureCodes
and inst.get("enabled") == True
]
if not featureInstances:
logger.debug("No root mandate feature instances found, skipping feature access grant")
rootMandateId = self._getRootMandateId()
if not rootMandateId:
logger.warning("No root mandate found, skipping root mandate assignment")
return
# Grant access to each feature instance
for instance in featureInstances:
instanceId = instance.get("id")
featureCode = instance.get("featureCode")
# Check if user already has a mandate membership
existing = self.getUserMandate(userId, rootMandateId)
if existing:
logger.debug(f"User {userId} already assigned to root mandate")
return
# Check if user already has access
existingAccess = self.db.getRecordset(
FeatureAccess,
recordFilter={
"userId": userId,
"featureInstanceId": instanceId
}
# Find the mandate-instance 'viewer' role (bound to this mandate, not a global template)
mandateViewerRoles = self.db.getRecordset(
Role,
recordFilter={"roleLabel": "viewer", "mandateId": rootMandateId, "featureInstanceId": None}
)
viewerRoleId = mandateViewerRoles[0].get("id") if mandateViewerRoles else None
if existingAccess:
logger.debug(f"User {userId} already has access to feature instance {instanceId}")
continue
roleIds = [viewerRoleId] if viewerRoleId else []
# Create FeatureAccess
featureAccess = FeatureAccess(
userId=userId,
featureInstanceId=instanceId,
enabled=True
)
createdAccess = self.db.recordCreate(FeatureAccess, featureAccess.model_dump())
if not createdAccess:
logger.warning(f"Failed to create FeatureAccess for user {userId} to instance {instanceId}")
continue
featureAccessId = createdAccess.get("id")
# Get viewer role for this feature instance
allRoles = self.db.getRecordset(Role)
viewerRoles = [
r for r in allRoles
if r.get("featureInstanceId") == instanceId
and r.get("roleLabel") == "viewer"
]
if viewerRoles:
# Create FeatureAccessRole junction
featureAccessRole = FeatureAccessRole(
featureAccessId=featureAccessId,
roleId=viewerRoles[0].get("id")
)
self.db.recordCreate(FeatureAccessRole, featureAccessRole.model_dump())
logger.debug(f"Granted viewer role for {featureCode} to user {userId}")
else:
logger.warning(f"No viewer role found for feature instance {instanceId} ({featureCode})")
logger.info(f"Granted root mandate feature access to user {userId}")
self.createUserMandate(userId, rootMandateId, roleIds)
logger.info(f"Assigned user {userId} to root mandate with viewer role")
except Exception as e:
# Log but don't fail user creation
logger.error(f"Error granting root mandate feature access to user {userId}: {e}")
logger.error(f"Error assigning user {userId} to root mandate: {e}")
def disableUser(self, userId: str) -> User:
"""Disables a user if current user has permission."""
@ -1500,7 +1442,10 @@ class AppObjects:
return Mandate(**filteredMandates[0])
def createMandate(self, name: str, description: str = None, enabled: bool = True) -> Mandate:
"""Creates a new mandate if user has permission."""
"""
Creates a new mandate if user has permission.
Automatically copies system template roles (admin, user, viewer) to the new mandate.
"""
if not self.checkRbacPermission(Mandate, "create"):
raise PermissionError("No permission to create mandates")
@ -1512,6 +1457,16 @@ class AppObjects:
if not createdRecord or not createdRecord.get("id"):
raise ValueError("Failed to create mandate record")
mandateId = createdRecord.get("id")
# Copy system template roles to new mandate (admin, user, viewer + AccessRules)
try:
from modules.interfaces.interfaceBootstrap import copySystemRolesToMandate
copiedCount = copySystemRolesToMandate(self.db, mandateId)
logger.info(f"Copied {copiedCount} system roles to new mandate {mandateId}")
except Exception as e:
logger.error(f"Error copying system roles to mandate {mandateId}: {e}")
return Mandate(**createdRecord)
def updateMandate(self, mandateId: str, updateData: Dict[str, Any]) -> Mandate:
@ -1526,9 +1481,13 @@ class AppObjects:
if not mandate:
raise ValueError(f"Mandate {mandateId} not found")
# Strip immutable/protected fields from update data
_protectedFields = {"id", "isSystem"}
_sanitizedData = {k: v for k, v in updateData.items() if k not in _protectedFields}
# Update mandate data using model
updatedData = mandate.model_dump()
updatedData.update(updateData)
updatedData.update(_sanitizedData)
updatedMandate = Mandate(**updatedData)
# Update mandate record
@ -1548,13 +1507,17 @@ class AppObjects:
raise ValueError(f"Failed to update mandate: {str(e)}")
def deleteMandate(self, mandateId: str) -> bool:
"""Deletes a mandate if user has access."""
"""Deletes a mandate if user has access. System mandates cannot be deleted."""
try:
# Check if mandate exists and user has access
mandate = self.getMandate(mandateId)
if not mandate:
return False
# System mandates (isSystem=True) cannot be deleted
if getattr(mandate, "isSystem", False):
raise ValueError(f"System mandate '{mandate.name}' cannot be deleted")
if not self.checkRbacPermission(Mandate, "delete", mandateId):
raise PermissionError(f"No permission to delete mandate {mandateId}")
@ -1677,7 +1640,10 @@ class AppObjects:
def _ensureUserBillingAccount(self, userId: str, mandateId: str) -> None:
"""
Ensure a user has a billing account for the mandate if billing is configured.
Creates account with default credit from settings if billingModel is PREPAY_USER.
User accounts are always created for all billing models (for audit trail).
Initial balance depends on billing model:
- PREPAY_USER: defaultUserCredit from settings
- PREPAY_MANDATE / CREDIT_POSTPAY: 0.0 (budget is on mandate pool)
Args:
userId: User ID
@ -1694,15 +1660,19 @@ class AppObjects:
return # No billing configured for this mandate
billingModel = settings.get("billingModel", "UNLIMITED")
if billingModel != BillingModelEnum.PREPAY_USER.value:
return # Only create user accounts for PREPAY_USER model
if billingModel == BillingModelEnum.UNLIMITED.value:
return # No accounts needed for UNLIMITED
defaultCredit = settings.get("defaultUserCredit", 10.0)
billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=defaultCredit)
logger.info(f"Created billing account for user {userId} in mandate {mandateId} with {defaultCredit} CHF")
# Initial balance depends on billing model
if billingModel == BillingModelEnum.PREPAY_USER.value:
initialBalance = settings.get("defaultUserCredit", 10.0)
else:
initialBalance = 0.0 # PREPAY_MANDATE / CREDIT_POSTPAY: budget is on pool
billingInterface.getOrCreateUserAccount(mandateId, userId, initialBalance=initialBalance)
logger.info(f"Ensured billing account for user {userId} in mandate {mandateId} (model={billingModel}, initial={initialBalance} CHF)")
except Exception as e:
# Don't fail user mandate creation if billing account creation fails
logger.warning(f"Failed to create billing account for user {userId} (non-critical): {e}")
def deleteUserMandate(self, userId: str, mandateId: str) -> bool:

View file

@ -417,7 +417,12 @@ class BillingObjects:
def ensureAllUserAccountsExist(self) -> int:
"""
Efficiently ensure all users across all mandates have billing accounts.
Ensure all users across all mandates have billing accounts.
User accounts are always created regardless of billing model (for audit trail).
Initial balance depends on billing model:
- PREPAY_USER: defaultUserCredit from settings
- PREPAY_MANDATE / CREDIT_POSTPAY: 0.0 (budget is on pool)
Uses bulk queries to minimize database connections.
Returns:
@ -426,29 +431,31 @@ class BillingObjects:
try:
accountsCreated = 0
# Step 1: Get all billing settings in one query (only PREPAY_USER mandates need user accounts)
# Step 1: Get all billing settings (all models except UNLIMITED need user accounts)
allSettings = self.db.getRecordset(BillingSettings)
prepayUserMandates = {}
billingMandates = {} # mandateId -> (billingModel, defaultCredit)
for s in allSettings:
if s.get("billingModel") == BillingModelEnum.PREPAY_USER.value:
prepayUserMandates[s.get("mandateId")] = s.get("defaultUserCredit", 10.0)
billingModel = s.get("billingModel", BillingModelEnum.UNLIMITED.value)
if billingModel == BillingModelEnum.UNLIMITED.value:
continue
defaultCredit = s.get("defaultUserCredit", 10.0) if billingModel == BillingModelEnum.PREPAY_USER.value else 0.0
billingMandates[s.get("mandateId")] = (billingModel, defaultCredit)
if not prepayUserMandates:
logger.debug("No PREPAY_USER mandates found, skipping account check")
if not billingMandates:
logger.debug("No billable mandates found, skipping account check")
return 0
# Step 2: Get all existing USER accounts in one query (from billing DB)
# Step 2: Get all existing USER accounts in one query
allAccounts = self.db.getRecordset(
BillingAccount,
recordFilter={"accountType": AccountTypeEnum.USER.value}
)
# Build set of existing (mandateId, userId) pairs
existingAccountKeys = set()
for acc in allAccounts:
key = (acc.get("mandateId"), acc.get("userId"))
existingAccountKeys.add(key)
# Step 3: Get all user-mandate combinations from APP database (separate connection)
# Step 3: Get all user-mandate combinations from APP database
appDb = DatabaseConnector(
dbDatabase=APP_CONFIG.get('DB_DATABASE', 'poweron_app'),
dbHost=APP_CONFIG.get('DB_HOST', 'localhost'),
@ -461,7 +468,7 @@ class BillingObjects:
recordFilter={"enabled": True}
)
# Step 4: Find missing accounts and create them
# Step 4: Create missing accounts
for um in allUserMandates:
mandateId = um.get("mandateId")
userId = um.get("userId")
@ -469,17 +476,15 @@ class BillingObjects:
if not mandateId or not userId:
continue
# Only process mandates with PREPAY_USER billing
if mandateId not in prepayUserMandates:
if mandateId not in billingMandates:
continue
# Check if account already exists (in memory, no DB call)
key = (mandateId, userId)
if key in existingAccountKeys:
continue
# Create missing account
defaultCredit = prepayUserMandates[mandateId]
billingModel, defaultCredit = billingMandates[mandateId]
account = BillingAccount(
mandateId=mandateId,
userId=userId,
@ -489,7 +494,6 @@ class BillingObjects:
)
created = self.createAccount(account)
# Create initial credit transaction
if defaultCredit > 0:
self.createTransaction(BillingTransaction(
accountId=created["id"],
@ -499,7 +503,7 @@ class BillingObjects:
referenceType=ReferenceTypeEnum.SYSTEM
))
existingAccountKeys.add(key) # Track newly created
existingAccountKeys.add(key)
accountsCreated += 1
if accountsCreated > 0:
@ -515,22 +519,37 @@ class BillingObjects:
# BillingTransaction Operations
# =========================================================================
def createTransaction(self, transaction: BillingTransaction) -> Dict[str, Any]:
def createTransaction(self, transaction: BillingTransaction, balanceAccountId: str = None) -> Dict[str, Any]:
"""
Create a new billing transaction and update account balance.
The transaction is always recorded against transaction.accountId (audit trail).
The balance is updated on balanceAccountId if provided, otherwise on transaction.accountId.
This allows recording a transaction on a user account (audit) while deducting
from a mandate pool account (shared budget).
Args:
transaction: BillingTransaction object
balanceAccountId: Optional account ID for balance update (defaults to transaction.accountId)
Returns:
Created transaction dict
"""
# Get current account
account = self.getAccount(transaction.accountId)
if not account:
raise ValueError(f"Account {transaction.accountId} not found")
# Validate that the transaction's account exists
txAccount = self.getAccount(transaction.accountId)
if not txAccount:
raise ValueError(f"Transaction account {transaction.accountId} not found")
currentBalance = account.get("balance", 0.0)
# Determine which account to update balance on
targetBalanceAccountId = balanceAccountId or transaction.accountId
if targetBalanceAccountId == transaction.accountId:
balanceAccount = txAccount
else:
balanceAccount = self.getAccount(targetBalanceAccountId)
if not balanceAccount:
raise ValueError(f"Balance account {targetBalanceAccountId} not found")
currentBalance = balanceAccount.get("balance", 0.0)
# Calculate new balance
if transaction.transactionType == TransactionTypeEnum.CREDIT:
@ -538,17 +557,17 @@ class BillingObjects:
elif transaction.transactionType == TransactionTypeEnum.DEBIT:
newBalance = currentBalance - transaction.amount
else: # ADJUSTMENT
newBalance = currentBalance + transaction.amount # Can be positive or negative
newBalance = currentBalance + transaction.amount
# Create transaction
# Create transaction record (always on transaction.accountId for audit)
transactionDict = transaction.model_dump(exclude_none=True)
created = self.db.recordCreate(BillingTransaction, transactionDict)
# Update account balance
self.updateAccountBalance(transaction.accountId, newBalance)
# Update balance on the target account
self.updateAccountBalance(targetBalanceAccountId, newBalance)
logger.info(f"Billing transaction created: {transaction.transactionType.value} {transaction.amount} CHF, "
f"balance: {currentBalance} -> {newBalance}")
f"audit={transaction.accountId}, balance on {targetBalanceAccountId}: {currentBalance} -> {newBalance}")
return created
@ -631,6 +650,14 @@ class BillingObjects:
"""
Check if there's sufficient balance for an operation.
Budget logic:
- PREPAY_USER: check user's own account balance
- PREPAY_MANDATE: check mandate pool balance (shared by all users)
- CREDIT_POSTPAY: check mandate pool credit limit
- UNLIMITED: always allowed
User accounts are always ensured to exist (for audit trail).
Args:
mandateId: Mandate ID
userId: User ID
@ -641,43 +668,29 @@ class BillingObjects:
"""
settings = self.getSettings(mandateId)
if not settings:
# No settings = no billing = allowed
return BillingCheckResult(allowed=True, billingModel=BillingModelEnum.UNLIMITED)
billingModel = BillingModelEnum(settings.get("billingModel", BillingModelEnum.UNLIMITED.value))
# UNLIMITED = always allowed
if billingModel == BillingModelEnum.UNLIMITED:
return BillingCheckResult(allowed=True, billingModel=billingModel)
# Get the relevant account
# Always ensure user account exists (for audit trail)
defaultCredit = settings.get("defaultUserCredit", 10.0)
initialBalance = defaultCredit if billingModel == BillingModelEnum.PREPAY_USER else 0.0
self.getOrCreateUserAccount(mandateId, userId, initialBalance=initialBalance)
# Determine which balance to check based on billing model
if billingModel == BillingModelEnum.PREPAY_USER:
account = self.getUserAccount(mandateId, userId)
# Auto-create user account if not exists (with default credit from settings)
if not account:
defaultCredit = settings.get("defaultUserCredit", 10.0)
logger.info(f"Auto-creating billing account for user {userId} in mandate {mandateId} with {defaultCredit} CHF initial credit")
account = self.getOrCreateUserAccount(mandateId, userId, initialBalance=defaultCredit)
else:
account = self.getMandateAccount(mandateId)
if not account:
# No account (only happens for mandate-level accounts) = potentially blocked
if settings.get("blockOnZeroBalance", True):
return BillingCheckResult(
allowed=False,
reason="NO_ACCOUNT",
currentBalance=0.0,
requiredAmount=estimatedCost,
billingModel=billingModel
)
return BillingCheckResult(allowed=True, currentBalance=0.0, billingModel=billingModel)
currentBalance = account.get("balance", 0.0)
# CREDIT_POSTPAY with credit limit check
if billingModel == BillingModelEnum.CREDIT_POSTPAY:
creditLimit = account.get("creditLimit")
currentBalance = account.get("balance", 0.0) if account else 0.0
elif billingModel == BillingModelEnum.PREPAY_MANDATE:
poolAccount = self.getOrCreateMandateAccount(mandateId)
currentBalance = poolAccount.get("balance", 0.0)
elif billingModel == BillingModelEnum.CREDIT_POSTPAY:
poolAccount = self.getOrCreateMandateAccount(mandateId)
currentBalance = poolAccount.get("balance", 0.0)
creditLimit = poolAccount.get("creditLimit")
if creditLimit and abs(currentBalance) + estimatedCost > creditLimit:
return BillingCheckResult(
allowed=False,
@ -687,6 +700,8 @@ class BillingObjects:
billingModel=billingModel
)
return BillingCheckResult(allowed=True, currentBalance=currentBalance, billingModel=billingModel)
else:
return BillingCheckResult(allowed=True, billingModel=billingModel)
# PREPAY models - check balance
if currentBalance < estimatedCost:
@ -716,6 +731,12 @@ class BillingObjects:
"""
Record usage cost as a billing transaction.
Transaction is ALWAYS recorded on the user's account (clean audit trail).
Balance is deducted from the appropriate account based on billing model:
- PREPAY_USER: deduct from user's own balance
- PREPAY_MANDATE: deduct from mandate pool balance
- CREDIT_POSTPAY: deduct from mandate pool balance
Args:
mandateId: Mandate ID
userId: User ID
@ -740,19 +761,14 @@ class BillingObjects:
billingModel = BillingModelEnum(settings.get("billingModel", BillingModelEnum.UNLIMITED.value))
# UNLIMITED = no transaction recording
if billingModel == BillingModelEnum.UNLIMITED:
return None
# Get or create the relevant account
if billingModel == BillingModelEnum.PREPAY_USER:
account = self.getOrCreateUserAccount(mandateId, userId)
else:
account = self.getOrCreateMandateAccount(mandateId)
# Transaction is ALWAYS on the user's account (audit trail)
userAccount = self.getOrCreateUserAccount(mandateId, userId)
# Create debit transaction
transaction = BillingTransaction(
accountId=account["id"],
accountId=userAccount["id"],
transactionType=TransactionTypeEnum.DEBIT,
amount=priceCHF,
description=description,
@ -765,7 +781,84 @@ class BillingObjects:
createdByUserId=userId
)
# Determine where to deduct balance
if billingModel == BillingModelEnum.PREPAY_USER:
# Deduct from user's own balance
return self.createTransaction(transaction)
else:
# PREPAY_MANDATE / CREDIT_POSTPAY: deduct from mandate pool
poolAccount = self.getOrCreateMandateAccount(mandateId)
return self.createTransaction(transaction, balanceAccountId=poolAccount["id"])
# =========================================================================
# Billing Model Switch Operations
# =========================================================================
def switchBillingModel(self, mandateId: str, oldModel: BillingModelEnum, newModel: BillingModelEnum) -> Dict[str, Any]:
"""
Switch billing model with automatic budget migration.
MANDATE -> USER: pool balance is distributed equally to all user accounts.
USER -> MANDATE: all user balances are consolidated into the pool, user balances set to 0.
Args:
mandateId: Mandate ID
oldModel: Current billing model
newModel: New billing model
Returns:
Migration result dict with details
"""
result = {"oldModel": oldModel.value, "newModel": newModel.value, "migratedAmount": 0.0, "userCount": 0}
if oldModel == newModel:
return result
if oldModel == BillingModelEnum.PREPAY_MANDATE and newModel == BillingModelEnum.PREPAY_USER:
# Pool -> distribute equally to users
poolAccount = self.getMandateAccount(mandateId)
if poolAccount and poolAccount.get("balance", 0.0) > 0:
poolBalance = poolAccount["balance"]
userAccounts = self.db.getRecordset(
BillingAccount,
recordFilter={"mandateId": mandateId, "accountType": AccountTypeEnum.USER.value}
)
if userAccounts:
perUser = poolBalance / len(userAccounts)
for acc in userAccounts:
newBalance = acc.get("balance", 0.0) + perUser
self.updateAccountBalance(acc["id"], newBalance)
self.updateAccountBalance(poolAccount["id"], 0.0)
result["migratedAmount"] = poolBalance
result["userCount"] = len(userAccounts)
logger.info(f"Switched {mandateId} MANDATE->USER: distributed {result['migratedAmount']} CHF to {result['userCount']} users")
elif oldModel == BillingModelEnum.PREPAY_USER and newModel == BillingModelEnum.PREPAY_MANDATE:
# Users -> consolidate into pool
userAccounts = self.db.getRecordset(
BillingAccount,
recordFilter={"mandateId": mandateId, "accountType": AccountTypeEnum.USER.value}
)
totalUserBalance = sum(acc.get("balance", 0.0) for acc in userAccounts)
poolAccount = self.getOrCreateMandateAccount(mandateId, initialBalance=0.0)
newPoolBalance = poolAccount.get("balance", 0.0) + totalUserBalance
self.updateAccountBalance(poolAccount["id"], newPoolBalance)
for acc in userAccounts:
self.updateAccountBalance(acc["id"], 0.0)
result["migratedAmount"] = totalUserBalance
result["userCount"] = len(userAccounts)
logger.info(f"Switched {mandateId} USER->MANDATE: consolidated {totalUserBalance} CHF from {len(userAccounts)} users into pool")
elif newModel == BillingModelEnum.PREPAY_MANDATE or newModel == BillingModelEnum.CREDIT_POSTPAY:
# Any -> MANDATE/CREDIT: ensure pool account exists
self.getOrCreateMandateAccount(mandateId, initialBalance=0.0)
return result
# =========================================================================
# Statistics Operations
@ -862,6 +955,11 @@ class BillingObjects:
"""
Get all billing balances for a user across mandates.
Shows the effective available budget:
- PREPAY_USER: user's own account balance
- PREPAY_MANDATE: mandate pool balance (shared budget visible to user)
- CREDIT_POSTPAY: mandate pool balance
Args:
userId: User ID
@ -872,13 +970,11 @@ class BillingObjects:
balances = []
# Get all mandates the user belongs to
try:
appInterface = getAppInterface(self.currentUser)
userMandates = appInterface.getUserMandates(userId)
for um in userMandates:
# Handle both Pydantic models and dicts
mandateId = getattr(um, 'mandateId', None) or (um.get("mandateId") if isinstance(um, dict) else None)
if not mandateId:
continue
@ -887,7 +983,6 @@ class BillingObjects:
if not mandate:
continue
# Get mandate name (handle both Pydantic and dict)
mandateName = getattr(mandate, 'name', None) or (mandate.get("name", "") if isinstance(mandate, dict) else "")
settings = self.getSettings(mandateId)
@ -895,20 +990,26 @@ class BillingObjects:
continue
billingModel = BillingModelEnum(settings.get("billingModel", BillingModelEnum.UNLIMITED.value))
if billingModel == BillingModelEnum.UNLIMITED:
continue
# Get the relevant account
# Determine effective balance based on billing model
if billingModel == BillingModelEnum.PREPAY_USER:
account = self.getUserAccount(mandateId, userId)
elif billingModel in [BillingModelEnum.PREPAY_MANDATE, BillingModelEnum.CREDIT_POSTPAY]:
account = self.getMandateAccount(mandateId)
else:
continue
if not account:
continue
balance = account.get("balance", 0.0)
warningThreshold = account.get("warningThreshold", 0.0)
creditLimit = account.get("creditLimit")
elif billingModel in [BillingModelEnum.PREPAY_MANDATE, BillingModelEnum.CREDIT_POSTPAY]:
poolAccount = self.getMandateAccount(mandateId)
if not poolAccount:
continue
balance = poolAccount.get("balance", 0.0)
warningThreshold = poolAccount.get("warningThreshold", 0.0)
creditLimit = poolAccount.get("creditLimit")
else:
continue
balances.append(BillingBalanceResponse(
mandateId=mandateId,
@ -917,7 +1018,7 @@ class BillingObjects:
balance=balance,
warningThreshold=warningThreshold,
isWarning=balance <= warningThreshold,
creditLimit=account.get("creditLimit")
creditLimit=creditLimit
))
except Exception as e:
logger.error(f"Error getting balances for user: {e}")
@ -927,6 +1028,8 @@ class BillingObjects:
def getTransactionsForUser(self, userId: str, limit: int = 100) -> List[Dict[str, Any]]:
"""
Get all transactions for a user across all mandates they belong to.
Since transactions are always recorded on user accounts, we query
directly by user account - clean and simple.
Args:
userId: User ID
@ -944,20 +1047,22 @@ class BillingObjects:
userMandates = appInterface.getUserMandates(userId)
for um in userMandates:
# Handle both Pydantic models and dicts
mandateId = getattr(um, 'mandateId', None) or (um.get("mandateId") if isinstance(um, dict) else None)
if not mandateId:
continue
# Only include mandates with billing settings
settings = self.getSettings(mandateId)
if not settings:
continue
# Get transactions for this mandate
transactions = self.getTransactionsByMandate(mandateId, limit=limit)
# Get user's account in this mandate
userAccount = self.getUserAccount(mandateId, userId)
if not userAccount:
continue
# Get transactions for user's account (all transactions are on user accounts now)
transactions = self.getTransactions(userAccount["id"], limit=limit)
# Add mandate context to each transaction
mandate = appInterface.getMandate(mandateId)
mandateName = ""
if mandate:
@ -971,7 +1076,6 @@ class BillingObjects:
except Exception as e:
logger.error(f"Error getting transactions for user: {e}")
# Sort by creation date descending and limit
allTransactions.sort(key=lambda x: x.get("_createdAt", ""), reverse=True)
return allTransactions[:limit]
@ -1016,23 +1120,23 @@ class BillingObjects:
if mandate:
mandateName = getattr(mandate, 'name', None) or (mandate.get("name", "") if isinstance(mandate, dict) else "")
# For PREPAY_MANDATE, get the mandate account balance
# For PREPAY_USER, aggregate all user balances
if billingModel == BillingModelEnum.PREPAY_MANDATE:
account = self.getMandateAccount(mandateId)
totalBalance = account.get("balance", 0.0) if account else 0.0
userCount = 0
elif billingModel == BillingModelEnum.PREPAY_USER:
# Get all user accounts for this mandate
# Get user accounts count (always exist now for audit trail)
userAccounts = self.db.getRecordset(
BillingAccount,
recordFilter={"mandateId": mandateId, "accountType": AccountTypeEnum.USER.value}
)
totalBalance = sum(acc.get("balance", 0.0) for acc in userAccounts)
userCount = len(userAccounts)
# Total balance depends on billing model
if billingModel == BillingModelEnum.PREPAY_USER:
# Budget is distributed across user accounts
totalBalance = sum(acc.get("balance", 0.0) for acc in userAccounts)
elif billingModel in [BillingModelEnum.PREPAY_MANDATE, BillingModelEnum.CREDIT_POSTPAY]:
# Budget is in the mandate pool
poolAccount = self.getMandateAccount(mandateId)
totalBalance = poolAccount.get("balance", 0.0) if poolAccount else 0.0
else:
totalBalance = 0.0
userCount = 0
balances.append({
"mandateId": mandateId,
@ -1183,7 +1287,8 @@ class BillingObjects:
def getUserTransactionsForMandates(self, mandateIds: List[str] = None, limit: int = 100) -> List[Dict[str, Any]]:
"""
Get all transactions for specified mandates (both USER and MANDATE accounts).
Get all transactions for specified mandates.
All usage transactions are on user accounts (audit trail).
Args:
mandateIds: Optional list of mandate IDs to filter. If None, returns all.

View file

@ -329,9 +329,6 @@ class ChatObjects:
userId=self.userId
)
# Initialize database system
self.db.initDbSystem()
logger.info("Database initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize database: {str(e)}")

View file

@ -141,9 +141,6 @@ class ComponentObjects:
userId=self.userId if hasattr(self, 'userId') else None
)
# Initialize database system
self.db.initDbSystem()
logger.info("Database initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize database: {str(e)}")

View file

@ -455,11 +455,8 @@ def getStatistics(
billingModel = BillingModelEnum(settings.get("billingModel", BillingModelEnum.UNLIMITED.value))
# Get the relevant account
if billingModel == BillingModelEnum.PREPAY_USER:
# Transactions are always on user accounts (audit trail)
account = billingInterface.getUserAccount(ctx.mandateId, ctx.user.id)
else:
account = billingInterface.getMandateAccount(ctx.mandateId)
if not account:
return UsageReportResponse(
@ -578,14 +575,20 @@ def createOrUpdateSettings(
existingSettings = billingInterface.getSettings(targetMandateId)
if existingSettings:
# Update existing settings
updates = settingsUpdate.model_dump(exclude_none=True)
if updates:
# Check if billing model is changing - trigger budget migration
if "billingModel" in updates:
oldModel = BillingModelEnum(existingSettings.get("billingModel", BillingModelEnum.UNLIMITED.value))
newModel = BillingModelEnum(updates["billingModel"]) if isinstance(updates["billingModel"], str) else updates["billingModel"]
if oldModel != newModel:
migrationResult = billingInterface.switchBillingModel(targetMandateId, oldModel, newModel)
logger.info(f"Billing model migration for {targetMandateId}: {migrationResult}")
result = billingInterface.updateSettings(existingSettings["id"], updates)
return result or existingSettings
return existingSettings
else:
# Create new settings
from modules.datamodels.datamodelBilling import BillingSettings
newSettings = BillingSettings(

View file

@ -41,6 +41,7 @@ class InvitationCreate(BaseModel):
email: Optional[str] = Field(None, description="Email address to send invitation link (optional)")
roleIds: List[str] = Field(..., description="Role IDs to assign to the invited user")
featureInstanceId: Optional[str] = Field(None, description="Optional feature instance access")
frontendUrl: str = Field(..., description="Frontend URL for building the invite link (provided by frontend)")
expiresInHours: int = Field(
72,
ge=1,
@ -178,10 +179,9 @@ def create_invitation(
if not createdRecord:
raise ValueError("Failed to create invitation record")
# Build invite URL
from modules.shared.configuration import APP_CONFIG
frontendUrl = APP_CONFIG.get("APP_FRONTEND_URL", "http://localhost:8080")
inviteUrl = f"{frontendUrl}/invite/{invitation.token}"
# Build invite URL using frontend URL provided by the caller
baseUrl = data.frontendUrl.rstrip("/")
inviteUrl = f"{baseUrl}/invite/{invitation.token}"
# Send email if email address is provided
emailSent = False
@ -302,6 +302,7 @@ def create_invitation(
@limiter.limit("60/minute")
def list_invitations(
request: Request,
frontendUrl: str = Query(..., description="Frontend URL for building invite links (provided by frontend)"),
includeUsed: bool = Query(False, description="Include already used invitations"),
includeExpired: bool = Query(False, description="Include expired invitations"),
context: RequestContext = Depends(getRequestContext)
@ -353,10 +354,9 @@ def list_invitations(
if not includeExpired and expiresAt < currentTime:
continue
# Build invite URL
from modules.shared.configuration import APP_CONFIG
frontendUrl = APP_CONFIG.get("APP_FRONTEND_URL", "http://localhost:8080")
inviteUrl = f"{frontendUrl}/invite/{inv.token}"
# Build invite URL using frontend URL provided by the caller
baseUrl = frontendUrl.rstrip("/")
inviteUrl = f"{baseUrl}/invite/{inv.token}"
result.append({
**inv.model_dump(),

View file

@ -13,7 +13,7 @@ Multi-Tenant Design:
import logging
from typing import List, Optional, TYPE_CHECKING
from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext, Role
from modules.datamodels.datamodelUam import User, UserPermissions, AccessLevel, Mandate
from modules.datamodels.datamodelUam import User, UserPermissions, AccessLevel
from modules.datamodels.datamodelMembership import (
UserMandate,
UserMandateRole,
@ -155,10 +155,16 @@ class RbacClass:
) -> List[str]:
"""
Get all role IDs for a user in the given context.
Uses UserMandate + UserMandateRole for the new multi-tenant model.
Uses UserMandate + UserMandateRole for the multi-tenant model.
Also includes roles from the Root mandate (first mandate) if different
from the requested mandate, so system-level permissions are always available.
Each mandate has its own instances of system roles (admin, user, viewer)
which are copied from the global templates during mandate creation.
Therefore, only the requested mandate's roles are loaded - no need to
load root mandate roles separately.
Loads roles from:
1. The requested mandate (if provided) - includes mandate-instance system roles
2. Feature instance roles (if featureInstanceId provided)
Args:
user: User object
@ -171,24 +177,11 @@ class RbacClass:
roleIds = set() # Use set to avoid duplicates
try:
# Get Root mandate ID (first mandate in system)
allMandates = self.dbApp.getRecordset(Mandate)
rootMandateId = allMandates[0]["id"] if allMandates else None
# Collect mandates to check:
# - If mandateId provided: current mandate + Root mandate (if different)
# - If no mandateId: just Root mandate (for system-level access)
mandatesToCheck = []
# Load roles from the requested mandate
if mandateId:
mandatesToCheck.append(mandateId)
if rootMandateId and rootMandateId not in mandatesToCheck:
mandatesToCheck.append(rootMandateId)
# Load roles from each mandate
for checkMandateId in mandatesToCheck:
userMandateRecords = self.dbApp.getRecordset(
UserMandate,
recordFilter={"userId": user.id, "mandateId": checkMandateId, "enabled": True}
recordFilter={"userId": user.id, "mandateId": mandateId, "enabled": True}
)
if userMandateRecords:

View file

@ -27,8 +27,8 @@ from modules.interfaces.interfaceDbBilling import getInterface as getBillingInte
logger = logging.getLogger(__name__)
# Markup percentage for internal pricing (50% = 1.5x)
BILLING_MARKUP_PERCENT = 50
# Markup percentage for internal pricing (+50% für Infrastruktur und Platform Service + 50% für Währungsrisiko ==> Faktor 2.0)
BILLING_MARKUP_PERCENT = 100
# Singleton cache
_billingServices: Dict[str, "BillingService"] = {}

View file

@ -113,7 +113,7 @@ class EventManagement:
self.scheduler.remove_job(jobId)
logger.info(f"Removed job '{jobId}'")
except Exception as exc:
logger.warning(f"Could not remove job '{jobId}': {exc}")
logger.debug(f"Could not remove job '{jobId}': {exc}")
# Singleton instance for easy import and reuse

View file

@ -363,6 +363,43 @@ RESOURCE_OBJECTS = [
]
def _discoverAicoreProviderObjects() -> List[Dict[str, Any]]:
"""
Dynamically discover AICore provider resources for the RBAC catalog.
Providers are discovered from the model registry at startup.
"""
providerLabels = {
"anthropic": {"en": "Anthropic (Claude)", "de": "Anthropic (Claude)", "fr": "Anthropic (Claude)"},
"openai": {"en": "OpenAI (GPT)", "de": "OpenAI (GPT)", "fr": "OpenAI (GPT)"},
"perplexity": {"en": "Perplexity", "de": "Perplexity", "fr": "Perplexity"},
"tavily": {"en": "Tavily (Web Search)", "de": "Tavily (Websuche)", "fr": "Tavily (Recherche Web)"},
"privatellm": {"en": "Private LLM", "de": "Private LLM", "fr": "LLM Privé"},
"internal": {"en": "Internal", "de": "Intern", "fr": "Interne"},
}
try:
from modules.aicore.aicoreModelRegistry import modelRegistry
connectors = modelRegistry.discoverConnectors()
providers = [c.getConnectorType() for c in connectors]
objects = []
for provider in providers:
label = providerLabels.get(provider, {"en": provider, "de": provider, "fr": provider})
objects.append({
"objectKey": f"resource.aicore.{provider}",
"label": label,
"meta": {"provider": provider, "category": "aicore"}
})
if objects:
logger.info(f"Discovered {len(objects)} AICore provider catalog objects: {providers}")
return objects
except Exception as e:
logger.warning(f"Failed to discover AICore providers for catalog: {e}")
return []
def registerFeature(catalogService) -> bool:
"""
Register system RBAC objects in the catalog.
@ -401,6 +438,16 @@ def registerFeature(catalogService) -> bool:
meta=resObj.get("meta")
)
# Register dynamically discovered AICore provider resources
aicoreObjects = _discoverAicoreProviderObjects()
for aicoreObj in aicoreObjects:
catalogService.registerResourceObject(
featureCode=FEATURE_CODE,
objectKey=aicoreObj["objectKey"],
label=aicoreObj["label"],
meta=aicoreObj.get("meta")
)
# Register feature definition
catalogService.registerFeatureDefinition(
featureCode=FEATURE_CODE,

View file

@ -86,22 +86,20 @@ def loadFeatureRouters(app: FastAPI) -> Dict[str, Any]:
logger.error(f"Failed to load router from {featureDir}: {e}")
results[featureDir] = {"status": "error", "error": str(e)}
# Register features in RBAC catalog and sync template roles to database
from modules.security.rbacCatalog import getCatalogService
catalogService = getCatalogService()
registrationResults = registerAllFeaturesInCatalog(catalogService)
for featureName, success in registrationResults.items():
if featureName in results:
results[featureName]["rbac_registered"] = success
return results
_cachedMainModules = None
def loadFeatureMainModules() -> Dict[str, Any]:
"""
Dynamically load main modules from all discovered feature containers.
Results are cached after the first call.
"""
global _cachedMainModules
if _cachedMainModules is not None:
return _cachedMainModules
mainModules = {}
pattern = os.path.join(FEATURES_DIR, "*", "main*.py")
@ -114,6 +112,10 @@ def loadFeatureMainModules() -> Dict[str, Any]:
if featureDir.startswith("_"):
continue
# Skip if this feature already has a main module loaded (avoid duplicates)
if featureDir in mainModules:
continue
mainFile = filename[:-3] # Remove .py
try:
@ -124,6 +126,7 @@ def loadFeatureMainModules() -> Dict[str, Any]:
except Exception as e:
logger.error(f"Failed to load main module from {featureDir}: {e}")
_cachedMainModules = mainModules
return mainModules

View file

@ -188,7 +188,6 @@ class WorkflowManager:
detectedLanguage = None # No language detection in automation mode
normalizedRequest = userInput.prompt
intentText = userInput.prompt
contextItems = []
workflowIntent = None
else:
# Process user-uploaded documents from userInput for combined analysis
@ -206,7 +205,6 @@ class WorkflowManager:
detectedLanguage = analysisResult.get('detectedLanguage')
normalizedRequest = analysisResult.get('normalizedRequest')
intentText = analysisResult.get('intent') or userInput.prompt
contextItems = analysisResult.get('contextItems', [])
complexity = analysisResult.get('complexity', 'moderate')
needsWorkflowHistory = analysisResult.get('needsWorkflowHistory', False)
fastTrack = analysisResult.get('fastTrack', False)
@ -251,8 +249,6 @@ class WorkflowManager:
# Fallback only if normalizedRequest is None or empty
logger.warning(f"normalizedRequest is None or empty, falling back to intentText. normalizedRequest={normalizedRequest}, intentText={intentText[:100] if intentText else None}...")
self.services.currentUserPromptNormalized = intentText or userInput.prompt
if contextItems is not None:
self.services.currentUserContextItems = contextItems
# Set detected language
if detectedLanguage and isinstance(detectedLanguage, str):
@ -305,7 +301,6 @@ class WorkflowManager:
- detectedLanguage: ISO 639-1 Sprachcode
- normalizedRequest: Vollständige, explizite Umformulierung
- intent: Kurze Kern-Anfrage
- contextItems: Große Datenblöcke als separate Dokumente
- complexity: "simple" | "moderate" | "complex"
- needsWorkflowHistory: bool
- fastTrack: bool
@ -323,24 +318,22 @@ class WorkflowManager:
analysisPrompt = f"""You are an input analyzer. From the user's message, perform ALL of the following in one pass:
1. detectedLanguage: Detect ISO 639-1 language code (e.g., de, en, fr, it)
2. normalizedRequest: Full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details
2. normalizedRequest: Full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details. Include all data and context from the original message
3. intent: Concise single-paragraph core request in the detected language for high-level routing
4. contextItems: Supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content
5. complexity: "simple" | "moderate" | "complex"
4. complexity: "simple" | "moderate" | "complex"
- "simple": Only if NO documents AND NO web search required. Single question, straightforward answer (5-15s)
- "moderate": Multiple steps, some documents, structured response requiring some processing, or web search needed (30-60s)
- "complex": Multi-task workflow, many documents, research needed, content generation required, multi-step planning (60-120s)
6. needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work)
7. fastTrack: Boolean indicating if Fast Track is possible (simple requests without documents and without workflow history)
8. dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown)
9. expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., ["xlsx", "pdf"]). If format is unclear or not specified, use empty list []
10. qualityRequirements: Quality requirements they have (accuracy, completeness) as {{accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}}
11. successCriteria: Specific success criteria that define completion (array of strings)
12. workflowName: Create a concise, descriptive name for this workflow in the detected language. The name should summarize the main task or goal (e.g., "Service Report January 2026", "Email Analysis", "Document Generation"). Keep it short (max 60 characters) and meaningful.
5. needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work)
6. fastTrack: Boolean indicating if Fast Track is possible (simple requests without documents and without workflow history)
7. dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown)
8. expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., ["xlsx", "pdf"]). If format is unclear or not specified, use empty list []
9. qualityRequirements: Quality requirements they have (accuracy, completeness) as {{accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}}
10. successCriteria: Specific success criteria that define completion (array of strings)
11. workflowName: Create a concise, descriptive name for this workflow in the detected language. The name should summarize the main task or goal (e.g., "Service Report January 2026", "Email Analysis", "Document Generation"). Keep it short (max 60 characters) and meaningful.
Rules:
- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained
- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear
- normalizedRequest must contain the COMPLETE restatement including all data references - do NOT strip or extract content
- Preserve critical references (URLs, filenames) in intent
- Normalize to the primary detected language if mixed-language
- Consider number of documents provided when determining complexity
@ -354,13 +347,6 @@ Return ONLY JSON (no markdown) with this exact structure:
"detectedLanguage": "de|en|fr|it|...",
"normalizedRequest": "Full explicit instruction in detected language",
"intent": "Concise normalized request...",
"contextItems": [
{{
"title": "User context 1",
"mimeType": "text/plain",
"content": "Full extracted content block here"
}}
],
"complexity": "simple" | "moderate" | "complex",
"needsWorkflowHistory": true|false,
"fastTrack": true|false,
@ -375,7 +361,7 @@ Return ONLY JSON (no markdown) with this exact structure:
}}
## User Message
The following is the user's original input message. Analyze intent, normalize the request, determine complexity, and identify any large context blocks that should be moved to separate documents:
The following is the user's original input message. Analyze intent, normalize the request, and determine complexity:
################ USER INPUT START #################
{userPrompt.replace('{', '{{').replace('}', '}}') if userPrompt else ''}
@ -410,7 +396,6 @@ The following is the user's original input message. Analyze intent, normalize th
"detectedLanguage": "en",
"normalizedRequest": "",
"intent": "",
"contextItems": [],
"complexity": "moderate",
"needsWorkflowHistory": False,
"fastTrack": False,
@ -450,7 +435,41 @@ The following is the user's original input message. Analyze intent, normalize th
"taskProgress": "pending",
"actionProgress": "pending"
}
self.services.chat.storeMessageWithDocuments(workflow, firstMessageData, [])
# Create user prompt original document + user-uploaded documents for "first" message
firstMessageDocs = []
if userInput.prompt:
try:
originalPromptBytes = userInput.prompt.encode('utf-8')
originalPromptBytes = await self._neutralizeContentIfEnabled(originalPromptBytes, "text/markdown")
fileItem = self.services.interfaceDbComponent.createFile(
name="user_prompt_original.md",
mimeType="text/markdown",
content=originalPromptBytes
)
self.services.interfaceDbComponent.createFileData(fileItem.id, originalPromptBytes)
fileInfo = self.services.chat.getFileInfo(fileItem.id)
doc = {
"fileId": fileItem.id,
"fileName": fileInfo.get("fileName", "user_prompt_original.md") if fileInfo else "user_prompt_original.md",
"fileSize": fileInfo.get("size", len(originalPromptBytes)) if fileInfo else len(originalPromptBytes),
"mimeType": fileInfo.get("mimeType", "text/markdown") if fileInfo else "text/markdown"
}
firstMessageDocs.append(doc)
logger.debug("Fast path: Stored original user prompt as document")
except Exception as e:
logger.warning(f"Fast path: Failed to store original prompt as document: {e}")
# Process user-uploaded documents (fileIds)
if userInput.listFileId:
try:
userDocs = await self._processFileIds(userInput.listFileId, None)
if userDocs:
firstMessageDocs.extend(userDocs)
except Exception as e:
logger.warning(f"Fast path: Failed to process user fileIds: {e}")
self.services.chat.storeMessageWithDocuments(workflow, firstMessageData, firstMessageDocs)
# Get user language if available
userLanguage = getattr(self.services, 'currentUserLanguage', None)
@ -587,7 +606,7 @@ The following is the user's original input message. Analyze intent, normalize th
"actionProgress": "pending"
}
# Analyze the user's input to detect language, normalize request, extract intent, and offload bulky context into documents
# Analyze the user's input to detect language, normalize request, and extract intent
# SKIP user intention analysis if already done in combined analysis (skipIntentionAnalysis=True)
# or for AUTOMATION mode - it uses predefined JSON plans
createdDocs = []
@ -600,61 +619,49 @@ The following is the user's original input message. Analyze intent, normalize th
detectedLanguage = getattr(self.services, 'currentUserLanguage', None)
normalizedRequest = getattr(self.services, 'currentUserPromptNormalized', None) or userInput.prompt
intentText = getattr(self.services, 'currentUserPrompt', None) or userInput.prompt
contextItems = getattr(self.services, 'currentUserContextItems', None) or []
workflowIntent = getattr(workflow, '_workflowIntent', None)
# Create documents for context items (if available from combined analysis)
if contextItems and isinstance(contextItems, list):
for idx, item in enumerate(contextItems):
# Use normalizedRequest as message, attach original prompt as document
if normalizedRequest and normalizedRequest != userInput.prompt:
messageData["message"] = normalizedRequest
logger.debug(f"Using normalized request as message (length: {len(normalizedRequest)})")
# Store original user prompt as .md document
if userInput.prompt:
try:
title = item.get('title') if isinstance(item, dict) else None
mime = item.get('mimeType') if isinstance(item, dict) else None
content = item.get('content') if isinstance(item, dict) else None
if not content:
continue
fileName = (title or f"user_context_{idx+1}.txt").strip()
mimeType = (mime or "text/plain").strip()
# Neutralize content before storing if neutralization is enabled
contentBytes = content.encode('utf-8')
contentBytes = await self._neutralizeContentIfEnabled(contentBytes, mimeType)
# Create file in component storage
originalPromptBytes = userInput.prompt.encode('utf-8')
originalPromptBytes = await self._neutralizeContentIfEnabled(originalPromptBytes, "text/markdown")
fileItem = self.services.interfaceDbComponent.createFile(
name=fileName,
mimeType=mimeType,
content=contentBytes
name="user_prompt_original.md",
mimeType="text/markdown",
content=originalPromptBytes
)
# Persist file data
self.services.interfaceDbComponent.createFileData(fileItem.id, contentBytes)
# Collect file info
self.services.interfaceDbComponent.createFileData(fileItem.id, originalPromptBytes)
fileInfo = self.services.chat.getFileInfo(fileItem.id)
doc = ChatDocument(
fileId=fileItem.id,
fileName=fileInfo.get("fileName", fileName) if fileInfo else fileName,
fileSize=fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
mimeType=fileInfo.get("mimeType", mimeType) if fileInfo else mimeType
)
doc = {
"fileId": fileItem.id,
"fileName": fileInfo.get("fileName", "user_prompt_original.md") if fileInfo else "user_prompt_original.md",
"fileSize": fileInfo.get("size", len(originalPromptBytes)) if fileInfo else len(originalPromptBytes),
"mimeType": fileInfo.get("mimeType", "text/markdown") if fileInfo else "text/markdown"
}
createdDocs.append(doc)
except Exception:
continue
logger.debug("Stored original user prompt as document")
except Exception as e:
logger.warning(f"Failed to store original prompt as document: {e}")
else:
try:
analyzerPrompt = (
"You are an input analyzer. From the user's message, perform ALL of the following in one pass:\n"
"1) detectedLanguage: detect ISO 639-1 language code (e.g., de, en).\n"
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details.\n"
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details. Include all data and context from the original message.\n"
"3) intent: concise single-paragraph core request in the detected language for high-level routing.\n"
"4) contextItems: supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content.\n"
"5) dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown).\n"
"6) expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., [\"xlsx\", \"pdf\"]). If format is unclear or not specified, use empty list [].\n"
"7) qualityRequirements: Quality requirements they have (accuracy, completeness) as {accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}.\n"
"8) successCriteria: Specific success criteria that define completion (array of strings).\n"
"9) needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history to be understood or completed (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work). Return true if the request is a continuation, retry, modification, or builds upon previous work.\n\n"
"4) dataType: What type of data/content they want (numbers|text|documents|analysis|code|unknown).\n"
"5) expectedFormats: What file format(s) they expect - provide matching file format extensions list (e.g., [\"xlsx\", \"pdf\"]). If format is unclear or not specified, use empty list [].\n"
"6) qualityRequirements: Quality requirements they have (accuracy, completeness) as {accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}.\n"
"7) successCriteria: Specific success criteria that define completion (array of strings).\n"
"8) needsWorkflowHistory: Boolean indicating if this request needs previous workflow rounds/history to be understood or completed (e.g., 'continue', 'retry', 'fix', 'improve', 'update', 'modify', 'based on previous', 'build on', references to earlier work). Return true if the request is a continuation, retry, modification, or builds upon previous work.\n\n"
"Rules:\n"
"- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained.\n"
"- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear.\n"
"- normalizedRequest must contain the COMPLETE restatement including all data references - do NOT strip or extract content.\n"
"- Preserve critical references (URLs, filenames) in intent.\n"
"- Normalize to the primary detected language if mixed-language.\n\n"
"Return ONLY JSON (no markdown) with this shape:\n"
@ -662,13 +669,6 @@ The following is the user's original input message. Analyze intent, normalize th
" \"detectedLanguage\": \"de|en|fr|it|...\",\n"
" \"normalizedRequest\": \"Full explicit instruction in detected language\",\n"
" \"intent\": \"Concise normalized request...\",\n"
" \"contextItems\": [\n"
" {\n"
" \"title\": \"User context 1\",\n"
" \"mimeType\": \"text/plain\",\n"
" \"content\": \"Full extracted content block here\"\n"
" }\n"
" ],\n"
" \"dataType\": \"numbers|text|documents|analysis|code|unknown\",\n"
" \"expectedFormats\": [\"pdf\", \"docx\", \"xlsx\", \"txt\", \"json\", \"csv\", \"html\", \"md\"],\n"
" \"qualityRequirements\": {\n"
@ -679,7 +679,7 @@ The following is the user's original input message. Analyze intent, normalize th
" \"needsWorkflowHistory\": true|false\n"
"}\n\n"
"## User Message\n"
"The following is the user's original input message. Extract intent, normalize the request, and identify any large context blocks that should be moved to separate documents:\n\n"
"The following is the user's original input message. Analyze intent, normalize the request, and determine complexity:\n\n"
"################ USER INPUT START #################\n"
f"{userInput.prompt.replace('{', '{{').replace('}', '}}') if userInput.prompt else ''}\n"
"################ USER INPUT FINISH #################"
@ -695,7 +695,6 @@ The following is the user's original input message. Analyze intent, normalize th
detectedLanguage = None
normalizedRequest = None
intentText = userInput.prompt
contextItems = []
workflowIntent = None
# Parse analyzer response (JSON expected)
@ -706,14 +705,11 @@ The following is the user's original input message. Analyze intent, normalize th
parsed = json.loads(aiResponse[jsonStart:jsonEnd])
detectedLanguage = parsed.get('detectedLanguage') or None
normalizedRequest = parsed.get('normalizedRequest') or None
if parsed.get('intent'):
intentText = parsed.get('intent')
contextItems = parsed.get('contextItems') or []
# Extract intent analysis fields and store as workflowIntent
intentText = parsed.get('intent') or userInput.prompt
workflowIntent = {
'intent': intentText, # Use intent instead of primaryGoal
'intent': intentText,
'dataType': parsed.get('dataType', 'unknown'),
'expectedFormats': parsed.get('expectedFormats', []),
'qualityRequirements': parsed.get('qualityRequirements', {}),
@ -724,32 +720,23 @@ The following is the user's original input message. Analyze intent, normalize th
# Store needsWorkflowHistory in services for fast path decision
needsHistoryFromIntention = parsed.get('needsWorkflowHistory', False)
# Always set the value - default to False if not a boolean
setattr(self.services, '_needsWorkflowHistory', bool(needsHistoryFromIntention) if isinstance(needsHistoryFromIntention, bool) else False)
# Store workflowIntent in workflow object for reuse
if hasattr(self.services, 'workflow') and self.services.workflow:
self.services.workflow._workflowIntent = workflowIntent
except Exception:
contextItems = []
workflowIntent = None
# Ensure needsWorkflowHistory is False if parsing fails
setattr(self.services, '_needsWorkflowHistory', False)
# Update services state
# CRITICAL: Validate language from AI response
# If AI didn't return language or invalid → use user language
# If user language not set → use "en"
# Validate language from AI response
validatedLanguage = None
# Validate AI-detected language
if detectedLanguage and isinstance(detectedLanguage, str):
detectedLanguage = detectedLanguage.strip().lower()
# Check if it's a valid 2-character ISO code
if len(detectedLanguage) == 2 and detectedLanguage.isalpha():
validatedLanguage = detectedLanguage
# If AI didn't return valid language, use user language
if not validatedLanguage:
userLanguage = getattr(self.services.user, 'language', None) if hasattr(self.services, 'user') and self.services.user else None
if userLanguage and isinstance(userLanguage, str):
@ -757,12 +744,10 @@ The following is the user's original input message. Analyze intent, normalize th
if len(userLanguage) == 2 and userLanguage.isalpha():
validatedLanguage = userLanguage
# Final fallback to "en"
if not validatedLanguage:
validatedLanguage = "en"
logger.warning("Language not detected from AI and user language not set - using default 'en'")
# Set validated language
self._setUserLanguage(validatedLanguage)
try:
setattr(self.services, 'currentUserLanguage', validatedLanguage)
@ -770,60 +755,40 @@ The following is the user's original input message. Analyze intent, normalize th
except Exception:
pass
self.services.currentUserPrompt = intentText or userInput.prompt
# Always set currentUserPromptNormalized - use normalizedRequest if available, otherwise fallback to currentUserPrompt
# CRITICAL: normalizedRequest MUST be used if available, do NOT fall back to intent
if normalizedRequest and normalizedRequest.strip():
# Use normalizedRequest if available and not empty
self.services.currentUserPromptNormalized = normalizedRequest
logger.debug(f"Stored normalized request from analysis (length: {len(normalizedRequest)})")
else:
# Fallback only if normalizedRequest is None or empty
logger.warning(f"normalizedRequest is None or empty in analysis, falling back to intentText. normalizedRequest={normalizedRequest}, intentText={intentText}")
logger.warning(f"normalizedRequest is None or empty in analysis, falling back to intentText")
self.services.currentUserPromptNormalized = intentText or userInput.prompt
if contextItems is not None:
self.services.currentUserContextItems = contextItems
# Update message with normalized request if analysis produced one
# Use normalizedRequest as the chat message (transformed user input)
if normalizedRequest and normalizedRequest != userInput.prompt:
messageData["message"] = normalizedRequest
logger.debug(f"Updated first message with normalized request (length: {len(normalizedRequest)})")
# Create documents for context items
if contextItems and isinstance(contextItems, list):
for idx, item in enumerate(contextItems):
# Store original user prompt as .md document
if userInput.prompt:
try:
title = item.get('title') if isinstance(item, dict) else None
mime = item.get('mimeType') if isinstance(item, dict) else None
content = item.get('content') if isinstance(item, dict) else None
if not content:
continue
fileName = (title or f"user_context_{idx+1}.txt").strip()
mimeType = (mime or "text/plain").strip()
# Neutralize content before storing if neutralization is enabled
contentBytes = content.encode('utf-8')
contentBytes = await self._neutralizeContentIfEnabled(contentBytes, mimeType)
# Create file in component storage
originalPromptBytes = userInput.prompt.encode('utf-8')
originalPromptBytes = await self._neutralizeContentIfEnabled(originalPromptBytes, "text/markdown")
fileItem = self.services.interfaceDbComponent.createFile(
name=fileName,
mimeType=mimeType,
content=contentBytes
name="user_prompt_original.md",
mimeType="text/markdown",
content=originalPromptBytes
)
# Persist file data
self.services.interfaceDbComponent.createFileData(fileItem.id, contentBytes)
# Collect file info
self.services.interfaceDbComponent.createFileData(fileItem.id, originalPromptBytes)
fileInfo = self.services.chat.getFileInfo(fileItem.id)
doc = ChatDocument(
fileId=fileItem.id,
fileName=fileInfo.get("fileName", fileName) if fileInfo else fileName,
fileSize=fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
mimeType=fileInfo.get("mimeType", mimeType) if fileInfo else mimeType
)
doc = {
"fileId": fileItem.id,
"fileName": fileInfo.get("fileName", "user_prompt_original.md") if fileInfo else "user_prompt_original.md",
"fileSize": fileInfo.get("size", len(originalPromptBytes)) if fileInfo else len(originalPromptBytes),
"mimeType": fileInfo.get("mimeType", "text/markdown") if fileInfo else "text/markdown"
}
createdDocs.append(doc)
except Exception:
continue
logger.debug("Stored original user prompt as document")
except Exception as e:
logger.warning(f"Failed to store original prompt as document: {e}")
except Exception as e:
logger.warning(f"Prompt analysis failed or skipped: {str(e)}")