data source fixes

This commit is contained in:
ValueOn AG 2026-04-21 00:50:36 +02:00
parent f29e0c9edc
commit cdca242f82
33 changed files with 2357 additions and 341 deletions

10
app.py
View file

@ -310,10 +310,18 @@ async def lifespan(app: FastAPI):
# Register all feature definitions in RBAC catalog (for /api/features/ endpoint)
try:
from modules.security.rbacCatalog import getCatalogService
from modules.system.registry import registerAllFeaturesInCatalog
from modules.system.registry import registerAllFeaturesInCatalog, syncCatalogFeaturesToDb
catalogService = getCatalogService()
registerAllFeaturesInCatalog(catalogService)
logger.info("Feature catalog registration completed")
# Persist the in-memory feature registry into the Feature DB-table so
# the FeatureInstance.featureCode FK has real targets. Without this
# every FeatureInstance row would be flagged as orphan by the
# SysAdmin DB-health scan (cf. interfaceFeatures.upsertFeature).
try:
syncCatalogFeaturesToDb(catalogService)
except Exception as e:
logger.error(f"Feature DB sync failed: {e}")
except Exception as e:
logger.error(f"Feature catalog registration failed: {e}")

View file

@ -55,6 +55,8 @@ Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/conn
STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5aHNGejgzQmpTdmprdzQxR19KZkh3MlhYUTNseFN3WnlaWjh2SDZyalN6aU9xSktkbUQwUnZrVnlvbGVRQm4yZFdiRU5aSEk5WVJuUnR4VUwtTm9OVk1WWmJQeU5QaDdib0hfVWV5U1BfYTFXRmdoOWdnOWxkb3JFQmF3bm45UjFUVUxmWGtGRkFKUGd6bmhpQlFnaVI3Q2lLdDlsY1VESk1vOEM0ZFBJNW1qcVZ0N2tPYmRLNmVKajZ2M3o3S05lWnRRVG5LdkRseW4wQ3VjNHNQZTZUdz09
STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5dDJMSHBrVk8wTzJhU2xzTTZCZWdvWmU2NGI2WklfRXRJZVUzaVYyOU9GLUZsalUwa2lPdEgtUHo0dVVvRDU1cy1saHJyU0Rxa2xQZjBuakExQzk3bmxBcU9WbEIxUEtpR1JoUFMxZG9ISGRZUXFhdFpSMGxvQUV3a0VLQllfUUtCOHZwTGdteV9rYTFOazBfSlN3ekNWblFpakJlZVlCTmNkWWQ4Sm01a1RCWTlnTlFHWVA0MkZYMlprUExrWFN2V0NVU1BTd1NKczFJbVo3VHpLdlc4UT09
STRIPE_API_VERSION = 2026-01-28.clover
STRIPE_AUTOMATIC_TAX_ENABLED = false
STRIPE_TAX_RATE_ID_CH_VAT = txr_1TOQd14OUoIL0Osj7A0ZQlr0
# AI configuration
Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQnBaSnM4TWFRRmxVQmNQblVIYmc1Y0Q3aW9zZUtDWlNWdGZjbFpncGp2NHN2QjkxMWxibUJnZDBId252MWk5TXN3Yk14ajFIdi1CTkx2ZWx2QzF5OFR6LUx5azQ3dnNLaXJBOHNxc0tlWmtZcTFVelF4eXBSM2JkbHd2eTM0VHNXdHNtVUprZWtPVzctNlJsZHNmM20tU1N6Q1Q2cHFYSi1tNlhZNDNabTVuaEVGWmIydEhadTcyMlBURmw2aUJxOF9GTzR0dTZiNGZfOFlHaVpPZ1A1LXhhOEFtN1J5TEVNNWtMcGpyNkMzSl8xRnZsaTF1WTZrOUZmb0cxVURjSGFLS2dIYTQyZEJtTm90bEYxVWxNNXVPdTVjaVhYbXhxT3JsVDM5VjZMVFZKSE1tZnM9

View file

@ -55,6 +55,8 @@ Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/conn
STRIPE_SECRET_KEY_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5ekdBaGNGVUlOQUpncTlzLWlTV0V5OWZzQkpDczhCUGw4U1JpTHZ0d3pfYlFNWElLRlNiNlNsaDRYTGZUTkg2OUFrTW1GZXpOUjBVbmRQWjN6ekhHd2ZSQ195OHlaeWh1TmxrUm10V2R3YmdncmFLbFMzVjdqcWJMSUJPR2xuSEozclNoZG1rZVBTaWg3OFQ1Qzdxb0wyQ2RKazc2dG1aZXBUTXlvbDZqLS1KOVI5M3BGc3NQZkZRbnFpRjIwWmh2ZHlVNlpxZVo2dWNmMjQ5eW02QmtzUT09
STRIPE_WEBHOOK_SECRET = whsec_2agCQEbDPSOn2C40EJcwoPCqlvaPLF7M
STRIPE_API_VERSION = 2026-01-28.clover
STRIPE_AUTOMATIC_TAX_ENABLED = false
STRIPE_TAX_RATE_ID_CH_VAT = txr_1TOQd14OUoIL0Osj7A0ZQlr0
# AI configuration
Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQnBaSnM4MENkQ2xJVmE5WFZKUkh2SHJFby1YVXN3ZmVxRkptS3ZWRmlwdU93ZEJjSjlMV2NGbU5mS3NCdmFfcmFYTEJNZXFIQ3ozTWE4ZC1pemlQNk9wbjU1d3BPS0ZCTTZfOF8yWmVXMWx0TU1DamlJLVFhSTJXclZsY3hMVWlPcXVqQWtMdER4T252NHZUWEhUOTdIN1VGR3ltazEweXFqQ0lvb0hYWmxQQnpxb0JwcFNhRDNGWXdoRTVJWm9FalZpTUF5b1RqZlRaYnVKYkp0NWR5Vko1WWJ0Wmg2VWJzYXZ0Z3Q4UkpsTldDX2dsekhKMmM4YjRoa2RwemMwYVQwM2cyMFlvaU5mOTVTWGlROU8xY2ZVRXlxZzJqWkxURWlGZGI2STZNb0NpdEtWUnM9

View file

@ -55,6 +55,9 @@ Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/conn
STRIPE_SECRET_KEY_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6aVA3R3VRS3VHMUgzUEVjYkR4eUZKWFhPUzFTTVlHNnBvT3FienNQaUlBWVpPLXJyVGpGMWk4LXktMXphX0J6ZTVESkJxdjNNa3ZJbF9wX2ppYzdjYlF0cmdVamlEWWJDSmJYYkJseHctTlh4dnNoQWs4SG5haVl2TTNDdXpuaFpqeDBtNkFCbUxMa0RaWG14dmxyOEdILTNrZ2licmNpbXVkN2lFSWoxZW1BODNpV0ZTQ0VaeXRmR1d4RjExMlVFS3MtQU9zZXZlZE1mTmY3OWctUXJHdz09
STRIPE_WEBHOOK_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGNUpTWldsakYydFhFelBrR1lSaWxYT3kyMENOMUljZTJUZHBWcEhhdWVCMzYxZXQ5b3VlTFVRalFiTVdsbGxrdUx0RDFwSEpsOC1sTDJRTEJNQlA3S3ZaQzBtV1h6bWp5VnlMZUgwUlF3cXYxcnljZVE5SWdzLVg3V0syOWRYS08=
STRIPE_API_VERSION = 2026-01-28.clover
STRIPE_AUTOMATIC_TAX_ENABLED = false
STRIPE_TAX_RATE_ID_CH_VAT = txr_1TOQZG8WqlVsabrfFEu49pah
# AI configuration
Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQnBaSnM4TWJOVm4xVkx6azRlNDdxN3UxLUdwY2hhdGYxRGp4VFJqYXZIcmkxM1ZyOWV2M0Z4MHdFNkVYQ0ROb1d6LUZFUEdvMHhLMEtXYVBCRzM5TlYyY3ROYWtJRk41cDZxd0tYYi00MjVqMTh4QVcyTXl0bmVocEFHbXQwREpwNi1vODdBNmwzazE5bkpNelE2WXpvblIzWlQwbGdEelI2WXFqT1RibXVHcjNWbVhwYzBOM25XTzNmTDAwUjRvYk4yNjIyZHc5c2RSZzREQUFCdUwyb0ZuOXN1dzI2c2FKdXI4NGxEbk92czZWamJXU3ZSbUlLejZjRklRRk4tLV9aVUFZekI2bTU4OHYxNTUybDg3RVo0ZTh6dXNKRW5GNXVackZvcm9laGI0X3R6V3M9

View file

@ -537,13 +537,19 @@ class DatabaseConnector:
try:
cursor.execute(
"""
SELECT column_name FROM information_schema.columns
SELECT column_name, data_type
FROM information_schema.columns
WHERE LOWER(table_name) = LOWER(%s) AND table_schema = 'public'
""",
(table,),
)
existing_column_rows = cursor.fetchall()
existing_columns = {
row["column_name"] for row in cursor.fetchall()
row["column_name"] for row in existing_column_rows
}
existing_column_types = {
row["column_name"]: (row["data_type"] or "").lower()
for row in existing_column_rows
}
# Desired columns based on model
@ -569,6 +575,31 @@ class DatabaseConnector:
logger.warning(
f"Could not add column '{col}' to '{table}': {add_err}"
)
# Targeted type-downgrade: if a model field has been
# changed from a structured type (JSONB) to a plain
# TEXT field, alter the column so writes don't fail.
# JSONB -> TEXT is a safe, lossless cast (JSONB is
# rendered as its JSON-text representation; the
# corresponding Pydantic ``@field_validator`` is
# responsible for re-decoding legacy data on read).
for col in sorted(desired_columns & existing_columns):
if col == "id":
continue
desired_sql = (model_fields.get(col) or "").upper()
currentType = existing_column_types.get(col, "")
if desired_sql == "TEXT" and currentType == "jsonb":
try:
cursor.execute(
f'ALTER TABLE "{table}" ALTER COLUMN "{col}" TYPE TEXT USING "{col}"::text'
)
logger.info(
f"Downgraded column '{col}' from JSONB to TEXT on '{table}'"
)
except Exception as alter_err:
logger.warning(
f"Could not downgrade column '{col}' on '{table}': {alter_err}"
)
except Exception as ensure_err:
logger.warning(
f"Could not ensure columns for existing table '{table}': {ensure_err}"

View file

@ -24,8 +24,21 @@ class ServiceAdapter(ABC):
"""Standardized operations for a single service of a provider."""
@abstractmethod
async def browse(self, path: str, filter: Optional[str] = None) -> list:
"""List items (files/folders) at the given path."""
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> list:
"""List items (files/folders) at the given path.
``limit`` is an optional upper bound for the number of returned entries.
Adapters that talk to paginated APIs should keep paging until either
the API is exhausted OR ``limit`` is reached. ``None`` means "use the
adapter's sensible default" (NOT "unlimited") so an over-eager caller
cannot accidentally pull millions of records. Adapters that have no
pagination (single page result) may ignore this parameter.
"""
...
@abstractmethod
@ -39,8 +52,16 @@ class ServiceAdapter(ABC):
...
@abstractmethod
async def search(self, query: str, path: Optional[str] = None) -> list:
"""Search for items matching the query."""
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> list:
"""Search for items matching the query.
See :meth:`browse` for the semantics of ``limit``.
"""
...

View file

@ -54,7 +54,12 @@ class ClickupListsAdapter(ServiceAdapter):
self._svc = ClickupService(context=None, get_service=lambda _: None)
self._svc.setAccessToken(access_token)
async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]:
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
p = _norm(path)
out: List[ExternalEntry] = []
@ -173,7 +178,11 @@ class ClickupListsAdapter(ServiceAdapter):
)
if len(tasks) < 100:
break
if limit is not None and len(out) >= int(limit):
break
page += 1
if limit is not None:
out = out[: max(1, int(limit))]
return out
m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p)
@ -213,7 +222,12 @@ class ClickupListsAdapter(ServiceAdapter):
task_id = m.group(3)
return await self._svc.uploadTaskAttachment(task_id, data, fileName)
async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
base = _norm(path or "/")
team_id: Optional[str] = None
mt = re.match(r"^/team/([^/]+)", base)
@ -252,7 +266,11 @@ class ClickupListsAdapter(ServiceAdapter):
)
if len(tasks) < 25:
break
if limit is not None and len(out) >= int(limit):
break
page += 1
if limit is not None:
out = out[: max(1, int(limit))]
return out

View file

@ -21,7 +21,12 @@ class FtpFilesAdapter(ServiceAdapter):
def __init__(self, accessToken: str):
self._accessToken = accessToken
async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]:
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
logger.info(f"FTP browse stub: {path}")
return []
@ -32,7 +37,12 @@ class FtpFilesAdapter(ServiceAdapter):
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
return {"error": "FTP upload not yet implemented"}
async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
return []

View file

@ -37,11 +37,17 @@ class DriveAdapter(ServiceAdapter):
def __init__(self, accessToken: str):
self._token = accessToken
async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]:
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
folderId = (path or "").strip("/") or "root"
query = f"'{folderId}' in parents and trashed=false"
fields = "files(id,name,mimeType,size,modifiedTime,parents)"
url = f"{_DRIVE_BASE}/files?q={query}&fields={fields}&pageSize=100&orderBy=folder,name"
pageSize = max(1, min(int(limit or 100), 1000))
url = f"{_DRIVE_BASE}/files?q={query}&fields={fields}&pageSize={pageSize}&orderBy=folder,name"
result = await _googleGet(self._token, url)
if "error" in result:
@ -111,14 +117,20 @@ class DriveAdapter(ServiceAdapter):
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
return {"error": "Google Drive upload not yet implemented"}
async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
safeQuery = query.replace("'", "\\'")
folderId = (path or "").strip("/")
qParts = [f"name contains '{safeQuery}'", "trashed=false"]
if folderId:
qParts.append(f"'{folderId}' in parents")
qStr = " and ".join(qParts)
url = f"{_DRIVE_BASE}/files?q={qStr}&fields=files(id,name,mimeType,size)&pageSize=25"
pageSize = max(1, min(int(limit or 100), 1000))
url = f"{_DRIVE_BASE}/files?q={qStr}&fields=files(id,name,mimeType,size)&pageSize={pageSize}"
logger.debug(f"Google Drive search: q={qStr}")
result = await _googleGet(self._token, url)
if "error" in result:
@ -140,7 +152,15 @@ class GmailAdapter(ServiceAdapter):
def __init__(self, accessToken: str):
self._token = accessToken
async def browse(self, path: str, filter: Optional[str] = None) -> list:
_DEFAULT_MESSAGE_LIMIT = 100
_MAX_MESSAGE_LIMIT = 500
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> list:
cleanPath = (path or "").strip("/")
if not cleanPath:
@ -165,13 +185,14 @@ class GmailAdapter(ServiceAdapter):
labels.sort(key=lambda e: (0 if e.metadata.get("type") == "system" else 1, e.name))
return labels
url = f"{_GMAIL_BASE}/users/me/messages?labelIds={cleanPath}&maxResults=25"
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
url = f"{_GMAIL_BASE}/users/me/messages?labelIds={cleanPath}&maxResults={effectiveLimit}"
result = await _googleGet(self._token, url)
if "error" in result:
return []
entries = []
for msg in result.get("messages", [])[:25]:
for msg in result.get("messages", [])[:effectiveLimit]:
msgId = msg.get("id", "")
detailUrl = f"{_GMAIL_BASE}/users/me/messages/{msgId}?format=metadata&metadataHeaders=Subject&metadataHeaders=From&metadataHeaders=Date"
detail = await _googleGet(self._token, detailUrl)
@ -231,8 +252,14 @@ class GmailAdapter(ServiceAdapter):
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
return {"error": "Gmail upload not applicable"}
async def search(self, query: str, path: Optional[str] = None) -> list:
url = f"{_GMAIL_BASE}/users/me/messages?q={query}&maxResults=10"
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> list:
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
url = f"{_GMAIL_BASE}/users/me/messages?q={query}&maxResults={effectiveLimit}"
result = await _googleGet(self._token, url)
if "error" in result:
return []

View file

@ -104,6 +104,16 @@ async def _handleResponse(resp: aiohttp.ClientResponse) -> Dict[str, Any]:
return {"error": f"{resp.status}: {errorText}"}
def _stripGraphBase(url: str) -> str:
"""Convert an absolute Graph URL (used by @odata.nextLink) into the
relative endpoint that ``_makeGraphCall`` expects."""
if not url:
return ""
if url.startswith(_GRAPH_BASE):
return url[len(_GRAPH_BASE):].lstrip("/")
return url
def _graphItemToExternalEntry(item: Dict[str, Any], basePath: str = "") -> ExternalEntry:
isFolder = "folder" in item
return ExternalEntry(
@ -128,7 +138,12 @@ def _graphItemToExternalEntry(item: Dict[str, Any], basePath: str = "") -> Exter
class SharepointAdapter(_GraphApiMixin, ServiceAdapter):
"""ServiceAdapter for SharePoint (files, sites) via Microsoft Graph."""
async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]:
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
"""List items in a SharePoint folder.
Path format: /sites/<SiteName>/<FolderPath>
@ -155,6 +170,8 @@ class SharepointAdapter(_GraphApiMixin, ServiceAdapter):
entries = [_graphItemToExternalEntry(item, path) for item in result.get("value", [])]
if filter:
entries = [e for e in entries if _matchFilter(e, filter)]
if limit is not None:
entries = entries[: max(1, int(limit))]
return entries
async def _discoverSites(self) -> List[ExternalEntry]:
@ -197,7 +214,12 @@ class SharepointAdapter(_GraphApiMixin, ServiceAdapter):
result = await self._graphPut(endpoint, data)
return result
async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
siteId, _ = _parseSharepointPath(path or "")
if not siteId:
return []
@ -206,7 +228,10 @@ class SharepointAdapter(_GraphApiMixin, ServiceAdapter):
result = await self._graphGet(endpoint)
if "error" in result:
return []
return [_graphItemToExternalEntry(item) for item in result.get("value", [])]
entries = [_graphItemToExternalEntry(item) for item in result.get("value", [])]
if limit is not None:
entries = entries[: max(1, int(limit))]
return entries
# ---------------------------------------------------------------------------
@ -216,31 +241,89 @@ class SharepointAdapter(_GraphApiMixin, ServiceAdapter):
class OutlookAdapter(_GraphApiMixin, ServiceAdapter):
"""ServiceAdapter for Outlook (mail, calendar) via Microsoft Graph."""
async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]:
# Default upper bound for messages returned from a single browse() call.
# Graph allows $top up to 1000 per page; we keep the default modest so
# accidental "browse all" calls don't blow up the LLM context. Callers
# (e.g. the agent's browseDataSource tool) can override via ``limit``.
_DEFAULT_MESSAGE_LIMIT = 100
_MAX_MESSAGE_LIMIT = 1000
_PAGE_SIZE = 100
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
"""List mail folders or messages.
path = "" or "/" list mail folders
path = "/Inbox" list messages in Inbox
path = "" or "/" list ALL top-level mail folders (paginated)
path = "/<folderId>" list messages in that folder (paginated, up to ``limit``)
"""
if not path or path == "/":
result = await self._graphGet("me/mailFolders")
if "error" in result:
return []
# Graph default page size for /me/mailFolders is 10. Mailboxes with
# localized + many system folders (Posteingang, Gesendet, Archiv, …)
# often exceed that, so the well-known Inbox can fall off the first
# page. We page through all results AND hard-fall-back to the
# well-known shortcut /me/mailFolders/inbox so the default folder
# is always visible regardless of locale/order.
folders: List[Dict[str, Any]] = []
seenIds: set = set()
endpoint: Optional[str] = "me/mailFolders?$top=100"
while endpoint:
result = await self._graphGet(endpoint)
if "error" in result:
break
for f in result.get("value", []):
fid = f.get("id")
if fid and fid not in seenIds:
seenIds.add(fid)
folders.append(f)
nextLink = result.get("@odata.nextLink")
if not nextLink:
endpoint = None
else:
endpoint = _stripGraphBase(nextLink)
# Guarantee Inbox is present (well-known name, locale-independent)
if not any((f.get("displayName") or "").lower() in ("inbox", "posteingang") for f in folders):
inbox = await self._graphGet("me/mailFolders/inbox")
if "error" not in inbox and inbox.get("id") and inbox.get("id") not in seenIds:
folders.insert(0, inbox)
return [
ExternalEntry(
name=f.get("displayName", ""),
path=f"/{f.get('id', '')}",
isFolder=True,
metadata={"id": f.get("id"), "totalItemCount": f.get("totalItemCount")},
metadata={
"id": f.get("id"),
"totalItemCount": f.get("totalItemCount"),
"unreadItemCount": f.get("unreadItemCount"),
"childFolderCount": f.get("childFolderCount"),
},
)
for f in result.get("value", [])
for f in folders
]
folderId = path.strip("/")
endpoint = f"me/mailFolders/{folderId}/messages?$top=25&$orderby=receivedDateTime desc"
result = await self._graphGet(endpoint)
if "error" in result:
return []
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
pageSize = min(self._PAGE_SIZE, effectiveLimit)
endpoint: Optional[str] = (
f"me/mailFolders/{folderId}/messages"
f"?$top={pageSize}&$orderby=receivedDateTime desc"
)
messages: List[Dict[str, Any]] = []
while endpoint and len(messages) < effectiveLimit:
result = await self._graphGet(endpoint)
if "error" in result:
break
for m in result.get("value", []):
messages.append(m)
if len(messages) >= effectiveLimit:
break
nextLink = result.get("@odata.nextLink")
endpoint = _stripGraphBase(nextLink) if nextLink else None
return [
ExternalEntry(
name=m.get("subject", "(no subject)"),
@ -253,7 +336,7 @@ class OutlookAdapter(_GraphApiMixin, ServiceAdapter):
"hasAttachments": m.get("hasAttachments", False),
},
)
for m in result.get("value", [])
for m in messages
]
async def download(self, path: str) -> DownloadResult:
@ -279,9 +362,17 @@ class OutlookAdapter(_GraphApiMixin, ServiceAdapter):
"""Not applicable for Outlook in the file sense."""
return {"error": "Upload not supported for Outlook"}
async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
safeQuery = query.replace("'", "''")
endpoint = f"me/messages?$search=\"{safeQuery}\"&$top=25"
effectiveLimit = self._DEFAULT_MESSAGE_LIMIT if limit is None else max(1, min(int(limit), self._MAX_MESSAGE_LIMIT))
# NOTE: Graph $search does not support $orderby and may return a single
# page (no @odata.nextLink). We still pass $top to lift the implicit 25.
endpoint = f"me/messages?$search=\"{safeQuery}\"&$top={effectiveLimit}"
result = await self._graphGet(endpoint)
if "error" in result:
return []
@ -366,7 +457,12 @@ class OutlookAdapter(_GraphApiMixin, ServiceAdapter):
class TeamsAdapter(_GraphApiMixin, ServiceAdapter):
"""ServiceAdapter for Microsoft Teams -- browse joined teams and channels."""
async def browse(self, path: str, filter: Optional[str] = None) -> list:
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> list:
cleanPath = (path or "").strip("/")
if not cleanPath:
@ -408,7 +504,12 @@ class TeamsAdapter(_GraphApiMixin, ServiceAdapter):
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
return {"error": "Teams upload not implemented"}
async def search(self, query: str, path: Optional[str] = None) -> list:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> list:
return []
@ -419,7 +520,12 @@ class TeamsAdapter(_GraphApiMixin, ServiceAdapter):
class OneDriveAdapter(_GraphApiMixin, ServiceAdapter):
"""ServiceAdapter stub for OneDrive (personal drive)."""
async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]:
async def browse(
self,
path: str,
filter: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
cleanPath = (path or "").strip("/")
if not cleanPath:
endpoint = "me/drive/root/children"
@ -432,6 +538,8 @@ class OneDriveAdapter(_GraphApiMixin, ServiceAdapter):
entries = [_graphItemToExternalEntry(item, path) for item in result.get("value", [])]
if filter:
entries = [e for e in entries if _matchFilter(e, filter)]
if limit is not None:
entries = entries[: max(1, int(limit))]
return entries
async def download(self, path: str) -> bytes:
@ -447,13 +555,21 @@ class OneDriveAdapter(_GraphApiMixin, ServiceAdapter):
endpoint = f"me/drive/root:/{uploadPath}:/content"
return await self._graphPut(endpoint, data)
async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]:
async def search(
self,
query: str,
path: Optional[str] = None,
limit: Optional[int] = None,
) -> List[ExternalEntry]:
safeQuery = query.replace("'", "''")
endpoint = f"me/drive/root/search(q='{safeQuery}')"
result = await self._graphGet(endpoint)
if "error" in result:
return []
return [_graphItemToExternalEntry(item) for item in result.get("value", [])]
entries = [_graphItemToExternalEntry(item) for item in result.get("value", [])]
if limit is not None:
entries = entries[: max(1, int(limit))]
return entries
# ---------------------------------------------------------------------------

View file

@ -176,7 +176,13 @@ class ChatWorkflow(PowerOnModel):
]})
maxSteps: int = Field(default=10, description="Maximum number of iterations in dynamic mode", json_schema_extra={"label": "Max. Schritte", "frontend_type": "integer", "frontend_readonly": False, "frontend_required": False})
expectedFormats: Optional[List[str]] = Field(None, description="List of expected file format extensions from user request (e.g., ['xlsx', 'pdf']). Extracted during intent analysis.", json_schema_extra={"label": "Erwartete Formate", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
# Attached data sources (per-chat persistence so the chip-bar of the
# WorkspaceInput can be restored when the user re-opens the chat).
# Stored as JSONB list of UUIDs. Sources that no longer resolve (DS
# deleted in the meantime) are silently dropped on the frontend on load.
attachedDataSourceIds: Optional[List[str]] = Field(default_factory=list, description="IDs of DataSource records pinned to this chat (UDB attachments).", json_schema_extra={"label": "Angehängte Datenquellen", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
attachedFeatureDataSourceIds: Optional[List[str]] = Field(default_factory=list, description="IDs of FeatureDataSource records pinned to this chat (UDB feature attachments).", json_schema_extra={"label": "Angehängte Feature-Datenquellen", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
# Helper methods for execution state management
def getRoundIndex(self) -> int:
"""Get current round index"""

View file

@ -70,6 +70,57 @@ class UserPermissions(BaseModel):
)
class InvoiceAddress(BaseModel):
"""
Historische strukturierte Rechnungsadresse. NICHT MEHR aktiv verwendet
-- die Felder sind seit 2026-04-20 als ``invoiceCompanyName`` /
``invoiceLine1`` / ``invoicePostalCode`` / ... direkt auf ``Mandate``
deklariert (siehe dort). Diese Klasse bleibt nur noch erhalten, falls
Bestandscode irgendwo das Schema dokumentiert oder alte JSONB-Dicts
serialisiert; sie wird vom Mandate-Modell nicht mehr referenziert.
"""
companyName: Optional[str] = Field(
default=None,
description="Firmenname / Empfaenger der Rechnung (falls abweichend vom Mandate.label)",
)
contactName: Optional[str] = Field(
default=None,
description="Ansprechperson (z. B. Buchhaltung)",
)
email: Optional[EmailStr] = Field(
default=None,
description="E-Mail-Adresse fuer den Versand der Stripe-Rechnung",
)
line1: Optional[str] = Field(
default=None,
description="Strasse + Nr. (Adresszeile 1)",
)
line2: Optional[str] = Field(
default=None,
description="Adresszeile 2 (z. B. c/o, Postfach)",
)
postalCode: Optional[str] = Field(
default=None,
description="PLZ",
)
city: Optional[str] = Field(
default=None,
description="Ort",
)
state: Optional[str] = Field(
default=None,
description="Kanton / Bundesland",
)
country: Optional[str] = Field(
default="CH",
description="ISO-3166 Alpha-2 Laendercode (Default: CH)",
)
vatNumber: Optional[str] = Field(
default=None,
description="UID / MWST-Nummer des Empfaengers (z. B. CHE-123.456.789 MWST)",
)
@i18nModel("Mandant")
class Mandate(PowerOnModel):
"""
@ -123,6 +174,169 @@ class Mandate(PowerOnModel):
description="Timestamp when the mandate was soft-deleted. After 30 days, hard-delete is triggered.",
json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Gelöscht am"},
)
# ------------------------------------------------------------------
# Rechnungsadresse (CH-Treuhand-konform, strukturiert)
# ------------------------------------------------------------------
# Einzelne Felder statt eines nested Objekts/Freitexts, damit
# (a) der FormGenerator sie automatisch als Eingabezeilen rendert,
# (b) der Stripe-Checkout sie 1:1 in `customer.address`,
# `customer.email`, `customer.tax_id_data` mappen kann
# (Stripe verlangt die Adresse strukturiert, nicht als Freitext).
# ``order`` 200-209 gruppiert die Felder visuell am Ende des Formulars.
invoiceCompanyName: Optional[str] = Field(
default=None,
description="Firmenname / Empfaenger der Rechnung (falls abweichend vom Voller Name).",
max_length=200,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - Firma",
"order": 200,
"placeholder": "Muster Treuhand AG",
},
)
invoiceContactName: Optional[str] = Field(
default=None,
description="Ansprechperson z. H. (z. B. Buchhaltung).",
max_length=200,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - z. H.",
"order": 201,
"placeholder": "Buchhaltung",
},
)
invoiceEmail: Optional[str] = Field(
default=None,
description="E-Mail-Adresse fuer den Versand der Stripe-Rechnung.",
max_length=254,
json_schema_extra={
"frontend_type": "email",
"frontend_required": False,
"label": "Rechnungsadresse - E-Mail",
"order": 202,
"placeholder": "rechnungen@firma.ch",
},
)
invoiceLine1: Optional[str] = Field(
default=None,
description="Adresszeile 1 (Strasse + Nr.). Pflichtfeld fuer Stripe-Customer-Adresse.",
max_length=200,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - Strasse + Nr.",
"order": 203,
"placeholder": "Bahnhofstrasse 1",
},
)
invoiceLine2: Optional[str] = Field(
default=None,
description="Adresszeile 2 (z. B. c/o, Postfach).",
max_length=200,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - Adresszusatz",
"order": 204,
"placeholder": "c/o Buchhaltung",
},
)
invoicePostalCode: Optional[str] = Field(
default=None,
description="PLZ.",
max_length=20,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - PLZ",
"order": 205,
"placeholder": "8000",
},
)
invoiceCity: Optional[str] = Field(
default=None,
description="Ort.",
max_length=100,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - Ort",
"order": 206,
"placeholder": "Zuerich",
},
)
invoiceState: Optional[str] = Field(
default=None,
description="Kanton / Bundesland (optional).",
max_length=100,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - Kanton",
"order": 207,
"placeholder": "ZH",
},
)
invoiceCountry: Optional[str] = Field(
default="CH",
description="ISO-3166 Alpha-2 Laendercode (Default: CH).",
max_length=2,
pattern=r"^[A-Z]{2}$",
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - Land (ISO)",
"order": 208,
"placeholder": "CH",
},
)
invoiceVatNumber: Optional[str] = Field(
default=None,
description="UID / MWST-Nummer des Empfaengers (z. B. CHE-123.456.789 MWST). Wird Stripe als `tax_id_data` mitgegeben.",
max_length=50,
json_schema_extra={
"frontend_type": "text",
"frontend_required": False,
"label": "Rechnungsadresse - UID-Nr.",
"order": 209,
"placeholder": "CHE-123.456.789 MWST",
},
)
@field_validator(
"invoiceCompanyName",
"invoiceContactName",
"invoiceEmail",
"invoiceLine1",
"invoiceLine2",
"invoicePostalCode",
"invoiceCity",
"invoiceState",
"invoiceVatNumber",
mode="before",
)
@classmethod
def _coerceInvoiceTextField(cls, v):
"""Trim incoming address strings; treat empty as ``None``."""
if v is None:
return None
if isinstance(v, str):
trimmed = v.strip()
return trimmed or None
return v
@field_validator("invoiceCountry", mode="before")
@classmethod
def _coerceInvoiceCountry(cls, v):
"""Normalize country code: trim, upper-case, empty -> default ``CH``."""
if v is None:
return "CH"
if isinstance(v, str):
trimmed = v.strip().upper()
return trimmed or "CH"
return v
@field_validator('isSystem', mode='before')
@classmethod

View file

@ -5,7 +5,7 @@ Creates a complete demo environment with two mandates, one user,
and all feature instances needed for the investor live demo.
Mandates:
- HappyLife AG (happylife) Dokumentenablage, Buchhaltung, Automationen, Chatbot, Datenschutz
- HappyLife AG (happylife) Dokumentenablage, Buchhaltung, Automationen, Datenschutz
- Alpina Treuhand AG (alpina) Dokumentenablage, 3x Treuhand-Kunden, Automationen, Datenschutz
User:
@ -45,7 +45,6 @@ _FEATURES_HAPPYLIFE = [
{"code": "workspace", "label": "Dokumentenablage"},
{"code": "trustee", "label": "Buchhaltung"},
{"code": "graphicalEditor", "label": "Automationen"},
{"code": "chatbot", "label": "Chatbot"},
{"code": "neutralization", "label": "Datenschutz"},
]
_FEATURES_ALPINA = [
@ -63,7 +62,7 @@ class InvestorDemo2026(_BaseDemoConfig):
label = "Investor Demo April 2026"
description = (
"Two mandates (HappyLife AG + Alpina Treuhand AG), one SysAdmin user, "
"trustee with RMA, workspace, graph editor, chatbot, and neutralization."
"trustee with RMA, workspace, graph editor, and neutralization."
)
# ------------------------------------------------------------------

View file

@ -33,12 +33,13 @@ UI_OBJECTS = [
]
DATA_OBJECTS = [
# ── Record-Hierarchie: Context → Session → Message/Score, Context → Task ──
{
"objectKey": "data.feature.commcoach.CoachingContext",
"label": "Coaching-Kontext",
"meta": {
"table": "CoachingContext",
"fields": ["id", "title", "category", "status"],
"fields": ["id", "title", "category", "status", "lastSessionAt"],
"isParent": True,
"displayFields": ["title", "category", "status"],
}
@ -48,45 +49,75 @@ DATA_OBJECTS = [
"label": "Coaching-Session",
"meta": {
"table": "CoachingSession",
"fields": ["id", "contextId", "status", "summary"],
"fields": ["id", "contextId", "status", "summary", "startedAt", "endedAt", "competenceScore"],
"isParent": True,
"parentTable": "CoachingContext",
"parentKey": "contextId",
"displayFields": ["startedAt", "status"],
}
},
{
"objectKey": "data.feature.commcoach.CoachingMessage",
"label": "Coaching-Nachricht",
"meta": {"table": "CoachingMessage", "fields": ["id", "sessionId", "role", "content"]}
"meta": {
"table": "CoachingMessage",
"fields": ["id", "sessionId", "contextId", "role", "content", "contentType"],
"parentTable": "CoachingSession",
"parentKey": "sessionId",
}
},
{
"objectKey": "data.feature.commcoach.CoachingScore",
"label": "Coaching-Score",
"meta": {
"table": "CoachingScore",
"fields": ["id", "sessionId", "contextId", "dimension", "score", "trend"],
"parentTable": "CoachingSession",
"parentKey": "sessionId",
}
},
{
"objectKey": "data.feature.commcoach.CoachingTask",
"label": "Coaching-Aufgabe",
"meta": {
"table": "CoachingTask",
"fields": ["id", "contextId", "title", "status"],
"fields": ["id", "contextId", "title", "status", "priority", "dueDate"],
"parentTable": "CoachingContext",
"parentKey": "contextId",
}
},
# ── Stammdaten (sessionübergreifend, scoped per userId) ──────────────────
{
"objectKey": "data.feature.commcoach.CoachingScore",
"label": "Coaching-Score",
"meta": {"table": "CoachingScore", "fields": ["id", "dimension", "score", "trend"]}
"objectKey": "data.feature.commcoach.userData",
"label": "Stammdaten",
"meta": {"isGroup": True}
},
{
"objectKey": "data.feature.commcoach.CoachingUserProfile",
"label": "Benutzerprofil",
"meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "dailyReminderEnabled"]}
"meta": {
"table": "CoachingUserProfile",
"group": "data.feature.commcoach.userData",
"fields": ["id", "userId", "dailyReminderEnabled", "streakDays", "totalSessions"],
}
},
{
"objectKey": "data.feature.commcoach.CoachingPersona",
"label": "Coaching-Persona",
"meta": {"table": "CoachingPersona", "fields": ["id", "key", "label", "gender"]}
"meta": {
"table": "CoachingPersona",
"group": "data.feature.commcoach.userData",
"fields": ["id", "key", "label", "gender", "category"],
}
},
{
"objectKey": "data.feature.commcoach.CoachingBadge",
"label": "Coaching-Auszeichnung",
"meta": {"table": "CoachingBadge", "fields": ["id", "badgeKey", "awardedAt"]}
"meta": {
"table": "CoachingBadge",
"group": "data.feature.commcoach.userData",
"fields": ["id", "badgeKey", "awardedAt"],
}
},
{
"objectKey": "data.feature.commcoach.*",

View file

@ -23,25 +23,20 @@ UI_OBJECTS = [
"label": "Dashboard",
"meta": {"area": "dashboard"}
},
# Note: ui.feature.trustee.positions and .documents removed.
# Positionen and Dokumente are now consolidated tabs inside the
# ui.feature.trustee.data-tables view (TrusteeDataTablesView).
# Data-level RBAC (data.feature.trustee.TrusteePosition / .TrusteeDocument)
# remains and continues to gate per-row access.
{
"objectKey": "ui.feature.trustee.positions",
"label": "Positionen",
"meta": {"area": "positions"}
"objectKey": "ui.feature.trustee.data-tables",
"label": "Daten-Tabellen",
"meta": {"area": "data-tables"}
},
{
"objectKey": "ui.feature.trustee.documents",
"label": "Dokumente",
"meta": {"area": "documents"}
},
{
"objectKey": "ui.feature.trustee.expense-import",
"label": "Spesen Import",
"meta": {"area": "expense-import"}
},
{
"objectKey": "ui.feature.trustee.scan-upload",
"label": "Scannen / Hochladen",
"meta": {"area": "scan-upload"}
"objectKey": "ui.feature.trustee.import-process",
"label": "Import & Verarbeitung",
"meta": {"area": "import-process"}
},
{
"objectKey": "ui.feature.trustee.analyse",
@ -66,72 +61,110 @@ UI_OBJECTS = [
]
# DATA Objects for RBAC catalog (tables/entities)
# Used for AccessRules on data-level permissions
# Used for AccessRules on data-level permissions.
# Architecture note: a feature instance IS the organisation. There is no
# TrusteeOrganisation parent grouping in the UDB — all tables are scoped
# to the feature instance via featureInstanceId.
DATA_OBJECTS = [
# ── Categorical Groups (UDB folders) ─────────────────────────────────────
{
"objectKey": "data.feature.trustee.TrusteeOrganisation",
"label": "Organisation",
"meta": {
"table": "TrusteeOrganisation",
"fields": ["id", "label", "enabled"],
"isParent": True,
"displayFields": ["label"],
}
"objectKey": "data.feature.trustee.localData",
"label": "Lokale Daten",
"meta": {"isGroup": True}
},
{
"objectKey": "data.feature.trustee.config",
"label": "Konfiguration",
"meta": {"isGroup": True}
},
{
"objectKey": "data.feature.trustee.accountingData",
"label": "Daten aus Buchhaltungssystem",
"meta": {"isGroup": True}
},
# ── Lokale Daten ─────────────────────────────────────────────────────────
{
"objectKey": "data.feature.trustee.TrusteePosition",
"label": "Position",
"meta": {
"table": "TrusteePosition",
"fields": ["id", "label", "description", "organisationId"],
"parentTable": "TrusteeOrganisation",
"parentKey": "organisationId",
"group": "data.feature.trustee.localData",
"fields": ["id", "valuta", "company", "desc", "bookingAmount", "bookingCurrency", "debitAccountNumber", "creditAccountNumber"],
}
},
{
"objectKey": "data.feature.trustee.TrusteeDocument",
"label": "Dokument",
"meta": {"table": "TrusteeDocument", "fields": ["id", "filename", "mimeType", "fileSize", "uploadDate"]}
"meta": {
"table": "TrusteeDocument",
"group": "data.feature.trustee.localData",
"fields": ["id", "documentName", "documentMimeType", "documentType", "sourceType"],
}
},
# ── Konfiguration ────────────────────────────────────────────────────────
{
"objectKey": "data.feature.trustee.TrusteeAccountingConfig",
"label": "Buchhaltungs-Konfiguration",
"label": "Buchhaltungs-Verbindung",
"meta": {
"table": "TrusteeAccountingConfig",
"fields": ["id", "connectorType", "displayLabel", "encryptedConfig", "isActive"],
"parentTable": "TrusteeOrganisation",
"parentKey": "organisationId",
"group": "data.feature.trustee.config",
"fields": ["id", "connectorType", "displayLabel", "isActive", "lastSyncAt", "lastSyncStatus"],
}
},
{
"objectKey": "data.feature.trustee.TrusteeAccountingSync",
"label": "Buchhaltungs-Synchronisation",
"meta": {"table": "TrusteeAccountingSync", "fields": ["id", "positionId", "syncStatus", "externalId"]}
"label": "Sync-Protokoll",
"meta": {
"table": "TrusteeAccountingSync",
"group": "data.feature.trustee.config",
"fields": ["id", "positionId", "syncStatus", "externalId"],
}
},
# ── Daten aus Buchhaltungssystem ─────────────────────────────────────────
{
"objectKey": "data.feature.trustee.TrusteeDataAccount",
"label": "Kontenplan (Sync)",
"meta": {"table": "TrusteeDataAccount", "fields": ["id", "accountNumber", "label", "accountType", "accountGroup", "currency", "isActive"]}
"label": "Kontenplan",
"meta": {
"table": "TrusteeDataAccount",
"group": "data.feature.trustee.accountingData",
"fields": ["id", "accountNumber", "label", "accountType", "accountGroup", "currency", "isActive"],
}
},
{
"objectKey": "data.feature.trustee.TrusteeDataJournalEntry",
"label": "Buchungen (Sync)",
"meta": {"table": "TrusteeDataJournalEntry", "fields": ["id", "externalId", "bookingDate", "reference", "description", "currency", "totalAmount"]}
"label": "Buchungen",
"meta": {
"table": "TrusteeDataJournalEntry",
"group": "data.feature.trustee.accountingData",
"fields": ["id", "externalId", "bookingDate", "reference", "description", "currency", "totalAmount"],
}
},
{
"objectKey": "data.feature.trustee.TrusteeDataJournalLine",
"label": "Buchungszeilen (Sync)",
"meta": {"table": "TrusteeDataJournalLine", "fields": ["id", "journalEntryId", "accountNumber", "debitAmount", "creditAmount", "currency", "taxCode", "costCenter", "description"]}
"label": "Buchungszeilen",
"meta": {
"table": "TrusteeDataJournalLine",
"group": "data.feature.trustee.accountingData",
"fields": ["id", "journalEntryId", "accountNumber", "debitAmount", "creditAmount", "currency", "taxCode", "costCenter", "description"],
}
},
{
"objectKey": "data.feature.trustee.TrusteeDataContact",
"label": "Kontakte (Sync)",
"meta": {"table": "TrusteeDataContact", "fields": ["id", "externalId", "contactType", "contactNumber", "name", "address", "zip", "city", "country", "email", "phone", "vatNumber"]}
"label": "Kontakte",
"meta": {
"table": "TrusteeDataContact",
"group": "data.feature.trustee.accountingData",
"fields": ["id", "externalId", "contactType", "contactNumber", "name", "address", "zip", "city", "country", "email", "phone", "vatNumber"],
}
},
{
"objectKey": "data.feature.trustee.TrusteeDataAccountBalance",
"label": "Kontosalden (Sync)",
"meta": {"table": "TrusteeDataAccountBalance", "fields": ["id", "accountNumber", "periodYear", "periodMonth", "openingBalance", "debitTotal", "creditTotal", "closingBalance", "currency"]}
"label": "Kontosalden",
"meta": {
"table": "TrusteeDataAccountBalance",
"group": "data.feature.trustee.accountingData",
"fields": ["id", "accountNumber", "periodYear", "periodMonth", "openingBalance", "debitTotal", "creditTotal", "closingBalance", "currency"],
}
},
{
"objectKey": "data.feature.trustee.*",
@ -229,22 +262,10 @@ QUICK_ACTIONS = [
"color": "#4CAF50",
"category": "import",
"actionType": "link",
"config": {"targetView": "expense-import"},
"config": {"targetView": "import-process", "tab": "receipts"},
"requiredRoles": ["trustee-user", "trustee-accountant", "trustee-admin"],
"sortOrder": 1,
},
{
"id": "trustee-sync-accounting",
"label": "Daten synchronisieren",
"description": "Buchhaltungsdaten aus dem externen System aktualisieren",
"icon": "mdi-sync",
"color": "#FF9800",
"category": "import",
"actionType": "link",
"config": {"targetView": "settings"},
"requiredRoles": ["trustee-accountant", "trustee-admin"],
"sortOrder": 2,
},
{
"id": "trustee-upload-receipt",
"label": "Beleg hochladen",
@ -253,8 +274,20 @@ QUICK_ACTIONS = [
"color": "#607D8B",
"category": "import",
"actionType": "link",
"config": {"targetView": "scan-upload"},
"config": {"targetView": "import-process", "tab": "upload"},
"requiredRoles": ["trustee-user", "trustee-client", "trustee-accountant", "trustee-admin"],
"sortOrder": 2,
},
{
"id": "trustee-sync-accounting",
"label": "Daten einlesen",
"description": "Buchhaltungsdaten aus dem externen System aktualisieren",
"icon": "mdi-sync",
"color": "#FF9800",
"category": "import",
"actionType": "link",
"config": {"targetView": "settings", "tab": "import-data"},
"requiredRoles": ["trustee-accountant", "trustee-admin"],
"sortOrder": 3,
},
{
@ -489,8 +522,7 @@ TEMPLATE_ROLES = [
"description": "Treuhand-Betrachter - Treuhand-Daten einsehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.data-tables", "view": True},
{"context": "RESOURCE", "item": "resource.feature.trustee.workflows.view", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
],
@ -500,9 +532,8 @@ TEMPLATE_ROLES = [
"description": "Treuhand-Benutzer - Eigene Treuhand-Daten erstellen und verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True},
{"context": "UI", "item": "ui.feature.trustee.data-tables", "view": True},
{"context": "UI", "item": "ui.feature.trustee.import-process", "view": True},
{"context": "RESOURCE", "item": "resource.feature.trustee.workflows.view", "view": True},
{"context": "RESOURCE", "item": "resource.feature.trustee.workflows.execute", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
@ -525,8 +556,7 @@ TEMPLATE_ROLES = [
"description": "Treuhand-Buchhalter - Buchhaltungs- und Finanzdaten verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.data-tables", "view": True},
{"context": "UI", "item": "ui.feature.trustee.analyse", "view": True},
{"context": "UI", "item": "ui.feature.trustee.abschluss", "view": True},
{"context": "UI", "item": "ui.feature.trustee.settings", "view": True},
@ -542,10 +572,8 @@ TEMPLATE_ROLES = [
"description": "Treuhand-Kunde - Eigene Buchhaltungsdaten und Dokumente einsehen",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True},
{"context": "UI", "item": "ui.feature.trustee.scan-upload", "view": True},
{"context": "UI", "item": "ui.feature.trustee.data-tables", "view": True},
{"context": "UI", "item": "ui.feature.trustee.import-process", "view": True},
{"context": "DATA", "item": "data.feature.trustee.TrusteePosition", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
{"context": "DATA", "item": "data.feature.trustee.TrusteeDocument", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
],

View file

@ -30,6 +30,13 @@ from .datamodelFeatureTrustee import (
TrusteeContract,
TrusteeDocument,
TrusteePosition,
TrusteeDataAccount,
TrusteeDataJournalEntry,
TrusteeDataJournalLine,
TrusteeDataContact,
TrusteeDataAccountBalance,
TrusteeAccountingConfig,
TrusteeAccountingSync,
)
from modules.datamodels.datamodelPagination import (
PaginationParams,
@ -138,21 +145,24 @@ def getQuickActions(
from .mainTrustee import QUICK_ACTIONS, QUICK_ACTION_CATEGORIES
userRoleLabels: set = set()
rootInterface = getRootInterface()
if context.isPlatformAdmin:
userRoleLabels.add("trustee-admin")
else:
rootInterface = getRootInterface()
featureAccesses = rootInterface.getFeatureAccessesForUser(str(context.user.id))
for fa in featureAccesses:
if str(fa.featureInstanceId) == instanceId and fa.enabled:
roleIds = fa.roleIds if hasattr(fa, "roleIds") and fa.roleIds else []
for rid in roleIds:
role = rootInterface.getRole(str(rid))
if role and role.roleLabel:
userRoleLabels.add(role.roleLabel)
featureAccesses = rootInterface.getFeatureAccessesForUser(str(context.user.id))
for fa in featureAccesses:
if str(fa.featureInstanceId) == instanceId and fa.enabled:
# FeatureAccess (Pydantic) has no `roleIds` field; the join lives in
# FeatureAccessRole and must be looked up via the interface helper.
roleIds = rootInterface.getRoleIdsForFeatureAccess(str(fa.id))
for rid in roleIds:
role = rootInterface.getRole(str(rid))
if role and role.roleLabel:
userRoleLabels.add(role.roleLabel)
from modules.shared.i18nRegistry import resolveText
lang = (language or "de").strip() or "de"
filteredActions = []
for action in QUICK_ACTIONS:
required = set(action.get("requiredRoles", []))
@ -161,8 +171,8 @@ def getQuickActions(
if context.isPlatformAdmin or required.intersection(userRoleLabels):
resolved = {
"id": action["id"],
"label": resolveText(action.get("label", {})),
"description": resolveText(action.get("description", {})),
"label": resolveText(action.get("label", {}), lang=lang),
"description": resolveText(action.get("description", {}), lang=lang),
"icon": action.get("icon", ""),
"color": action.get("color", ""),
"category": action.get("category", ""),
@ -173,14 +183,14 @@ def getQuickActions(
if resolved["actionType"] == "agentPrompt" and "config" in resolved:
cfg = dict(resolved["config"])
if "uploadHint" in cfg:
cfg["uploadHint"] = resolveText(cfg["uploadHint"])
cfg["uploadHint"] = resolveText(cfg["uploadHint"], lang=lang)
resolved["config"] = cfg
filteredActions.append(resolved)
filteredActions.sort(key=lambda a: a["sortOrder"])
resolvedCategories = [
{"id": c["id"], "label": resolveText(c.get("label", {})), "sortOrder": c.get("sortOrder", 99)}
{"id": c["id"], "label": resolveText(c.get("label", {}), lang=lang), "sortOrder": c.get("sortOrder", 99)}
for c in QUICK_ACTION_CATEGORIES
]
@ -199,6 +209,14 @@ _TRUSTEE_ENTITY_MODELS = {
"TrusteeContract": TrusteeContract,
"TrusteeDocument": TrusteeDocument,
"TrusteePosition": TrusteePosition,
# Read-only sync tables (TrusteeData*) and accounting bookkeeping
"TrusteeDataAccount": TrusteeDataAccount,
"TrusteeDataJournalEntry": TrusteeDataJournalEntry,
"TrusteeDataJournalLine": TrusteeDataJournalLine,
"TrusteeDataContact": TrusteeDataContact,
"TrusteeDataAccountBalance": TrusteeDataAccountBalance,
"TrusteeAccountingConfig": TrusteeAccountingConfig,
"TrusteeAccountingSync": TrusteeAccountingSync,
}
@ -2097,3 +2115,277 @@ def delete_instance_role_rule(
except Exception as e:
logger.error(f"Error deleting AccessRule: {e}")
raise HTTPException(status_code=400, detail=f"Failed to delete rule: {str(e)}")
# =============================================================================
# Generic Read-Only Data Tables (consolidated TrusteeDataTablesView)
# =============================================================================
#
# These endpoints expose the seven additional Trustee tables that previously
# only had aggregate or specialised views. They are read-only:
# - TrusteeData* tables are populated by the accounting sync; manual edits
# would be overwritten on the next sync.
# - TrusteeAccountingConfig / TrusteeAccountingSync are operational records
# maintained by the connector layer.
#
# All seven endpoints share one helper (`_paginatedReadEndpoint`) that
# replicates the established pattern from `get_documents` / `get_positions`
# (Unified Filter API: mode=filterValues / mode=ids).
def _paginatedReadEndpoint(
*,
instanceId: str,
context: RequestContext,
modelClass,
pagination: Optional[str],
mode: Optional[str],
column: Optional[str],
):
"""Generic paginated, RBAC-aware GET handler for a Trustee data model.
Mirrors the pattern used by `get_documents` / `get_positions`:
- mode=filterValues: distinct column values for filter UI
- mode=ids: full id list for "select all matching"
- default: paginated result via `getRecordsetPaginatedWithRBAC`
"""
from modules.interfaces.interfaceRbac import (
getRecordsetPaginatedWithRBAC,
getDistinctColumnValuesWithRBAC,
)
from modules.routes.routeHelpers import (
handleFilterValuesInMemory,
handleIdsInMemory,
parseCrossFilterPagination,
)
from fastapi.responses import JSONResponse
mandateId = _validateInstanceAccess(instanceId, context)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
if mode == "filterValues":
if not column:
raise HTTPException(status_code=400, detail="column parameter required for mode=filterValues")
try:
crossFilterPagination = parseCrossFilterPagination(column, pagination)
values = getDistinctColumnValuesWithRBAC(
connector=interface.db,
modelClass=modelClass,
column=column,
currentUser=interface.currentUser,
pagination=crossFilterPagination,
recordFilter=None,
mandateId=interface.mandateId,
featureInstanceId=interface.featureInstanceId,
featureCode=interface.FEATURE_CODE,
)
return JSONResponse(content=sorted(values, key=lambda v: str(v).lower()))
except Exception:
result = getRecordsetPaginatedWithRBAC(
connector=interface.db,
modelClass=modelClass,
currentUser=interface.currentUser,
pagination=None,
recordFilter=None,
mandateId=interface.mandateId,
featureInstanceId=interface.featureInstanceId,
featureCode=interface.FEATURE_CODE,
)
items = result.items if hasattr(result, "items") else result
items = [r.model_dump() if hasattr(r, "model_dump") else r for r in items]
return handleFilterValuesInMemory(items, column, pagination)
if mode == "ids":
result = getRecordsetPaginatedWithRBAC(
connector=interface.db,
modelClass=modelClass,
currentUser=interface.currentUser,
pagination=None,
recordFilter=None,
mandateId=interface.mandateId,
featureInstanceId=interface.featureInstanceId,
featureCode=interface.FEATURE_CODE,
)
items = result.items if hasattr(result, "items") else result
items = [r.model_dump() if hasattr(r, "model_dump") else r for r in items]
return handleIdsInMemory(items, pagination)
paginationParams = _parsePagination(pagination)
result = getRecordsetPaginatedWithRBAC(
connector=interface.db,
modelClass=modelClass,
currentUser=interface.currentUser,
pagination=paginationParams,
recordFilter=None,
mandateId=interface.mandateId,
featureInstanceId=interface.featureInstanceId,
featureCode=interface.FEATURE_CODE,
)
if paginationParams and hasattr(result, "items"):
return PaginatedResponse(
items=result.items,
pagination=PaginationMetadata(
currentPage=paginationParams.page or 1,
pageSize=paginationParams.pageSize or 20,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort if paginationParams else [],
filters=paginationParams.filters if paginationParams else None,
),
)
items = result.items if hasattr(result, "items") else result
return PaginatedResponse(items=items, pagination=None)
@router.get("/{instanceId}/data/accounts", response_model=PaginatedResponse[TrusteeDataAccount])
@limiter.limit("30/minute")
def get_data_accounts(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of synced chart-of-accounts entries (TrusteeDataAccount)."""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeDataAccount,
pagination=pagination,
mode=mode,
column=column,
)
@router.get("/{instanceId}/data/journal-entries", response_model=PaginatedResponse[TrusteeDataJournalEntry])
@limiter.limit("30/minute")
def get_data_journal_entries(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of synced journal entries (TrusteeDataJournalEntry)."""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeDataJournalEntry,
pagination=pagination,
mode=mode,
column=column,
)
@router.get("/{instanceId}/data/journal-lines", response_model=PaginatedResponse[TrusteeDataJournalLine])
@limiter.limit("30/minute")
def get_data_journal_lines(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of synced journal lines (TrusteeDataJournalLine)."""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeDataJournalLine,
pagination=pagination,
mode=mode,
column=column,
)
@router.get("/{instanceId}/data/contacts", response_model=PaginatedResponse[TrusteeDataContact])
@limiter.limit("30/minute")
def get_data_contacts(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of synced contacts (TrusteeDataContact)."""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeDataContact,
pagination=pagination,
mode=mode,
column=column,
)
@router.get("/{instanceId}/data/account-balances", response_model=PaginatedResponse[TrusteeDataAccountBalance])
@limiter.limit("30/minute")
def get_data_account_balances(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of synced account balances (TrusteeDataAccountBalance)."""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeDataAccountBalance,
pagination=pagination,
mode=mode,
column=column,
)
@router.get("/{instanceId}/accounting/configs", response_model=PaginatedResponse[TrusteeAccountingConfig])
@limiter.limit("30/minute")
def get_accounting_configs(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of accounting connector configurations (TrusteeAccountingConfig).
Note: secret config fields are stored masked in the underlying record;
UI consumers must rely on the dedicated `/accounting/config` endpoint
for secret-aware editing.
"""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeAccountingConfig,
pagination=pagination,
mode=mode,
column=column,
)
@router.get("/{instanceId}/accounting/syncs", response_model=PaginatedResponse[TrusteeAccountingSync])
@limiter.limit("30/minute")
def get_accounting_syncs(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
pagination: Optional[str] = Query(None),
mode: Optional[str] = Query(None, description="'filterValues' or 'ids'"),
column: Optional[str] = Query(None, description="Column key (required when mode=filterValues)"),
context: RequestContext = Depends(getRequestContext),
):
"""Read-only list of accounting sync records (TrusteeAccountingSync)."""
return _paginatedReadEndpoint(
instanceId=instanceId,
context=context,
modelClass=TrusteeAccountingSync,
pagination=pagination,
mode=mode,
column=column,
)

View file

@ -603,6 +603,17 @@ async def streamWorkspaceStart(
chatInterface.createMessage(userMessageData)
# Persist the attached data sources on the workflow so the chip-bar can
# be restored when the user re-opens this chat (per-chat persistence).
# Sources that no longer resolve are filtered out client-side on load.
try:
chatInterface.updateWorkflow(workflowId, {
"attachedDataSourceIds": list(userInput.dataSourceIds or []),
"attachedFeatureDataSourceIds": list(userInput.featureDataSourceIds or []),
})
except Exception as persistErr:
logger.warning(f"Could not persist chat attachments for {workflowId}: {persistErr}")
agentTask = asyncio.ensure_future(
_runWorkspaceAgent(
workflowId=workflowId,
@ -1112,7 +1123,12 @@ async def getWorkspaceMessages(
workflowId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Get all messages for a workspace workflow/conversation."""
"""Get all messages for a workspace workflow/conversation.
Also returns the IDs of data sources that were attached the last time the
user sent a message in this chat, so the WorkspaceInput can rehydrate its
chip-bar (per-chat attachment persistence).
"""
_mandateId, _ = _validateInstanceAccess(instanceId, context)
chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
messages = chatInterface.getMessages(workflowId) or []
@ -1124,7 +1140,62 @@ async def getWorkspaceMessages(
str(m.get("id") or ""),
)
)
return JSONResponse({"messages": items})
attachedDsIds: List[str] = []
attachedFdsIds: List[str] = []
try:
wf = chatInterface.getWorkflow(workflowId)
if wf:
attachedDsIds = list(getattr(wf, "attachedDataSourceIds", None) or [])
attachedFdsIds = list(getattr(wf, "attachedFeatureDataSourceIds", None) or [])
except Exception as e:
logger.debug(f"getWorkspaceMessages: cannot read attachments for {workflowId}: {e}")
return JSONResponse({
"messages": items,
"attachedDataSourceIds": attachedDsIds,
"attachedFeatureDataSourceIds": attachedFdsIds,
})
class UpdateChatAttachmentsRequest(BaseModel):
"""Body for PATCH /workflows/{workflowId}/attachments.
Replaces the persisted attachment lists for the chat. Sent when the user
detaches a source via the WorkspaceInput chip-bar so the change survives
a chat reload without waiting for the next sendMessage round-trip.
"""
dataSourceIds: Optional[List[str]] = Field(default=None)
featureDataSourceIds: Optional[List[str]] = Field(default=None)
@router.patch("/{instanceId}/workflows/{workflowId}/attachments")
@limiter.limit("300/minute")
async def patchWorkspaceWorkflowAttachments(
request: Request,
instanceId: str = Path(...),
workflowId: str = Path(...),
body: UpdateChatAttachmentsRequest = Body(...),
context: RequestContext = Depends(getRequestContext),
):
"""Persist the chip-bar attachment IDs for a chat (per-chat sources)."""
_mandateId, _ = _validateInstanceAccess(instanceId, context)
chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
updateData: Dict[str, Any] = {}
if body.dataSourceIds is not None:
updateData["attachedDataSourceIds"] = list(body.dataSourceIds)
if body.featureDataSourceIds is not None:
updateData["attachedFeatureDataSourceIds"] = list(body.featureDataSourceIds)
if updateData:
chatInterface.updateWorkflow(workflowId, updateData)
return JSONResponse({
"workflowId": workflowId,
"attachedDataSourceIds": updateData.get("attachedDataSourceIds",
list(getattr(workflow, "attachedDataSourceIds", None) or [])),
"attachedFeatureDataSourceIds": updateData.get("attachedFeatureDataSourceIds",
list(getattr(workflow, "attachedFeatureDataSourceIds", None) or [])),
})
# ---------------------------------------------------------------------------
@ -1461,13 +1532,31 @@ async def listFeatureConnectionTables(
except Exception:
accessible = catalog.getDataObjects(inst.featureCode)
tables = []
accessibleKeys = {obj.get("objectKey", "") for obj in accessible}
referencedGroups = set()
for obj in accessible:
meta = obj.get("meta", {})
if meta.get("wildcard") or meta.get("isGroup"):
continue
if meta.get("group"):
referencedGroups.add(meta["group"])
tables = []
for obj in catalog.getDataObjects(inst.featureCode):
meta = obj.get("meta", {})
if meta.get("wildcard"):
continue
objectKey = obj.get("objectKey", "")
if meta.get("isGroup"):
# Groups are metadata-only; include if at least one child is accessible
# (regardless of whether the group itself was RBAC-granted).
if objectKey not in referencedGroups:
continue
else:
if objectKey not in accessibleKeys:
continue
node = {
"objectKey": obj.get("objectKey", ""),
"objectKey": objectKey,
"tableName": meta.get("table", ""),
"label": resolveText(obj.get("label", "")),
"fields": meta.get("fields", []),
@ -1475,6 +1564,8 @@ async def listFeatureConnectionTables(
"parentTable": meta.get("parentTable") or None,
"parentKey": meta.get("parentKey") or None,
"displayFields": meta.get("displayFields", []),
"isGroup": bool(meta.get("isGroup", False)),
"group": meta.get("group") or None,
}
tables.append(node)
@ -1488,9 +1579,15 @@ async def listParentObjects(
instanceId: str = Path(...),
fiId: str = Path(..., description="Feature instance ID"),
tableName: str = Path(..., description="Parent table name from DATA_OBJECTS"),
parentKey: Optional[str] = Query(None, description="Optional FK column name to filter by ancestor record (nested parent rendering)"),
parentValue: Optional[str] = Query(None, description="Optional FK value matching parentKey to filter children of a specific ancestor record"),
context: RequestContext = Depends(getRequestContext),
):
"""List records from a parent table so the user can pick a specific record to scope data."""
"""List records from a parent table so the user can pick a specific record to scope data.
When parentKey + parentValue are provided, results are additionally filtered by that FK,
enabling nested record hierarchies (e.g. Sessions OF Context X).
"""
wsMandateId, _ = _validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.security.rbacCatalog import getCatalogService
@ -1561,6 +1658,22 @@ async def listParentObjects(
if hasUserId:
sql += ' AND "userId" = %s'
params.append(str(context.user.id))
if parentKey and parentValue:
cur.execute(
"SELECT 1 FROM information_schema.columns "
"WHERE table_schema = 'public' AND LOWER(table_name) = LOWER(%s) "
"AND column_name = %s",
[tableName, parentKey],
)
if cur.rowcount > 0:
sql += f' AND "{parentKey}" = %s'
params.append(parentValue)
else:
logger.warning(
f"listParentObjects({tableName}): ignoring parentKey '{parentKey}' (column does not exist)"
)
sql += ' ORDER BY "id" DESC LIMIT 100'
cur.execute(sql, params)
rows = []

View file

@ -1839,10 +1839,14 @@ class BillingObjects:
userId: Optional[str] = None,
startTs: Optional[float] = None,
endTs: Optional[float] = None,
period: str = "month",
bucketSize: str = "month",
) -> Dict[str, Any]:
"""
Pure SQL aggregation for statistics. No row-level loading.
`bucketSize` controls only the time-series aggregation granularity
(`'day' | 'month' | 'year'`); the date range is set via `startTs`/`endTs`.
Returns: totalCost, transactionCount, costByProvider, costByModel,
costByFeature, costByAccountId, timeSeries
"""
@ -1909,10 +1913,17 @@ class BillingObjects:
]
# 6) Time series via DATE_TRUNC on epoch timestamp
if period == "day":
truncExpr = "DATE_TRUNC('day', TO_TIMESTAMP(\"sysCreatedAt\"))"
else:
truncExpr = "DATE_TRUNC('month', TO_TIMESTAMP(\"sysCreatedAt\"))"
_bucketSpec = {
"day": ("day", "%Y-%m-%d"),
"month": ("month", "%Y-%m"),
"year": ("year", "%Y"),
}.get(bucketSize)
if _bucketSpec is None:
raise ValueError(
f"Invalid bucketSize: {bucketSize!r} (expected day|month|year)"
)
_truncUnit, _labelFormat = _bucketSpec
truncExpr = f"DATE_TRUNC('{_truncUnit}', TO_TIMESTAMP(\"sysCreatedAt\"))"
cur.execute(
f'SELECT {truncExpr} AS bucket, SUM("amount") AS total, COUNT(*) AS cnt '
@ -1923,10 +1934,7 @@ class BillingObjects:
timeSeries = []
for r in cur.fetchall():
bucket = r["bucket"]
if period == "day":
label = bucket.strftime("%Y-%m-%d") if bucket else "unknown"
else:
label = bucket.strftime("%Y-%m") if bucket else "unknown"
label = bucket.strftime(_labelFormat) if bucket else "unknown"
timeSeries.append({
"date": label,
"cost": round(float(r["total"]), 4),

View file

@ -734,7 +734,9 @@ class ChatObjects:
lastActivity=_toFloat(workflow.get("lastActivity")),
startedAt=_toFloat(workflow.get("startedAt")),
logs=logs,
messages=messages
messages=messages,
attachedDataSourceIds=workflow.get("attachedDataSourceIds") or [],
attachedFeatureDataSourceIds=workflow.get("attachedFeatureDataSourceIds") or [],
)
except Exception as e:
logger.error(f"getWorkflow: data validation failed for {workflowId}: {e}")
@ -891,7 +893,13 @@ class ChatObjects:
lastActivity=updated.get("lastActivity", workflow.lastActivity),
startedAt=updated.get("startedAt", workflow.startedAt),
logs=logs,
messages=messages
messages=messages,
attachedDataSourceIds=updated.get("attachedDataSourceIds")
if updated.get("attachedDataSourceIds") is not None
else (getattr(workflow, "attachedDataSourceIds", None) or []),
attachedFeatureDataSourceIds=updated.get("attachedFeatureDataSourceIds")
if updated.get("attachedFeatureDataSourceIds") is not None
else (getattr(workflow, "attachedFeatureDataSourceIds", None) or []),
)
def deleteWorkflow(self, workflowId: str) -> bool:

View file

@ -104,6 +104,49 @@ class FeatureInterface:
logger.error(f"Error creating feature {code}: {e}")
raise ValueError(f"Failed to create feature: {e}")
def upsertFeature(self, code: str, label: Any, icon: str = "mdi-puzzle") -> str:
"""Insert or update a Feature row for ``code``.
Idempotent counterpart to :meth:`createFeature` used by the boot-time
sync (see ``modules.system.registry.syncCatalogFeaturesToDb``) so the
``Feature`` DB-table stays consistent with the in-memory feature
registry built from the code modules. Without this sync the
``FeatureInstance.featureCode`` FK would be dangling for every
feature whose definition lives only in code (the user-reported
false-positive orphans).
Args:
code: Unique feature code (e.g. ``"trustee"``).
label: Either a string (the source label, will be wrapped as
``{"xx": label}``), a dict ``{"xx": ..., "de": ..., ...}``
or an existing TextMultilingual instance.
icon: Icon identifier.
Returns:
One of ``"created"``, ``"updated"``, ``"unchanged"``.
"""
try:
normalizedLabel = coerce_text_multilingual(label) if not isinstance(label, dict) else label
existing = self.getFeature(code)
if existing is None:
self.createFeature(code, normalizedLabel.model_dump() if hasattr(normalizedLabel, "model_dump") else normalizedLabel, icon)
return "created"
existingLabel = existing.label.model_dump() if hasattr(existing.label, "model_dump") else existing.label
desiredLabel = normalizedLabel.model_dump() if hasattr(normalizedLabel, "model_dump") else normalizedLabel
updateData: Dict[str, Any] = {}
if existingLabel != desiredLabel:
updateData["label"] = desiredLabel
if (existing.icon or "") != (icon or ""):
updateData["icon"] = icon or ""
if not updateData:
return "unchanged"
self.db.recordModify(Feature, code, updateData)
return "updated"
except Exception as e:
logger.error(f"Error upserting feature {code}: {e}")
raise ValueError(f"Failed to upsert feature: {e}")
# ============================================
# Feature Instance Methods
# ============================================
@ -201,9 +244,21 @@ class FeatureInterface:
if copyTemplateRoles:
self._copyTemplateRoles(featureCode, mandateId, instanceId)
# Copy template workflows (if feature defines TEMPLATE_WORKFLOWS)
self._copyTemplateWorkflows(featureCode, mandateId, instanceId)
# Copy template workflows (if feature defines TEMPLATE_WORKFLOWS).
# WICHTIG: Workflow-Bootstrap darf die Instanz-Erstellung NICHT killen
# (Instanz + Rollen sind primaer; Workflows kann Admin via Sync nachladen).
# Fehler werden aber laut geloggt, damit sie nicht unbemerkt bleiben.
try:
self._copyTemplateWorkflows(featureCode, mandateId, instanceId)
except Exception as wfErr:
logger.error(
f"createFeatureInstance: workflow bootstrap FAILED for feature "
f"'{featureCode}' instance {instanceId} — instance was created but "
f"workflows are missing. Use POST /api/features/instances/{instanceId}"
f"/sync-workflows to recover. Reason: {wfErr}",
exc_info=True,
)
cleanedRecord = dict(createdInstance)
return FeatureInstance(**cleanedRecord)
@ -227,31 +282,57 @@ class FeatureInterface:
Returns:
Number of workflows copied
Raises:
RuntimeError: If templates exist but cannot be copied.
Caller decides whether to swallow or re-raise.
"""
import json
import importlib
from modules.system.registry import loadFeatureMainModules
mainModules = loadFeatureMainModules()
featureModule = mainModules.get(featureCode)
if not featureModule:
logger.debug(
f"_copyTemplateWorkflows: no main module loaded for feature '{featureCode}' — nothing to copy"
)
return 0
getTemplateWorkflows = getattr(featureModule, "getTemplateWorkflows", None)
if not getTemplateWorkflows:
logger.debug(
f"_copyTemplateWorkflows: feature '{featureCode}' has no getTemplateWorkflows() — nothing to copy"
)
return 0
try:
from modules.system.registry import loadFeatureMainModules
mainModules = loadFeatureMainModules()
featureModule = mainModules.get(featureCode)
if not featureModule:
return 0
getTemplateWorkflows = getattr(featureModule, "getTemplateWorkflows", None)
if not getTemplateWorkflows:
return 0
templateWorkflows = getTemplateWorkflows() or []
except Exception as e:
logger.error(
f"_copyTemplateWorkflows: getTemplateWorkflows() raised for feature '{featureCode}': {e}",
exc_info=True,
)
raise RuntimeError(
f"Feature '{featureCode}' getTemplateWorkflows() failed: {e}"
)
templateWorkflows = getTemplateWorkflows()
if not templateWorkflows:
return 0
if not templateWorkflows:
return 0
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
from modules.interfaces.interfaceDbApp import getRootInterface
rootUser = getRootInterface().currentUser
geInterface = getGraphicalEditorInterface(rootUser, mandateId, instanceId)
logger.info(
f"_copyTemplateWorkflows: copying {len(templateWorkflows)} template workflow(s) "
f"for feature '{featureCode}' to instance {instanceId} (mandate={mandateId})"
)
copied = 0
for template in templateWorkflows:
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
from modules.security.rootAccess import getRootUser
rootUser = getRootUser()
geInterface = getGraphicalEditorInterface(rootUser, mandateId, instanceId)
copied = 0
failed = 0
for template in templateWorkflows:
templateId = template.get("id", "<no-id>")
try:
graphJson = json.dumps(template.get("graph", {}))
graphJson = graphJson.replace("{{featureInstanceId}}", instanceId)
graph = json.loads(graphJson)
@ -263,22 +344,30 @@ class FeatureInterface:
"graph": graph,
"tags": template.get("tags", [f"feature:{featureCode}"]),
"isTemplate": False,
"templateSourceId": template["id"],
"templateSourceId": templateId,
"templateScope": "instance",
"active": True,
})
copied += 1
except Exception as e:
failed += 1
logger.error(
f"_copyTemplateWorkflows: failed to create workflow '{templateId}' for "
f"feature '{featureCode}' instance {instanceId}: {e}",
exc_info=True,
)
if copied > 0:
logger.info(f"Feature '{featureCode}': Copied {copied} template workflows to instance {instanceId}")
return copied
except ImportError:
logger.debug(f"No feature module found for '{featureCode}' — skipping workflow bootstrap")
return 0
except Exception as e:
logger.warning(f"Error copying template workflows for '{featureCode}' instance {instanceId}: {e}")
return 0
if copied:
logger.info(
f"_copyTemplateWorkflows: copied {copied}/{len(templateWorkflows)} workflow(s) "
f"for feature '{featureCode}' instance {instanceId} (failed={failed})"
)
if failed:
raise RuntimeError(
f"_copyTemplateWorkflows: {failed}/{len(templateWorkflows)} workflow(s) failed "
f"for feature '{featureCode}' instance {instanceId}"
)
return copied
def _copyTemplateRoles(self, featureCode: str, mandateId: str, instanceId: str) -> int:
"""

View file

@ -14,9 +14,11 @@ from modules.auth import limiter
from modules.auth.authentication import requireSysAdmin
from modules.datamodels.datamodelUam import User
from modules.system.databaseHealth import (
OrphanCleanupRefused,
_cleanAllOrphans,
_cleanOrphans,
_getTableStats,
_listOrphans,
_scanOrphans,
)
@ -34,6 +36,19 @@ class OrphanCleanRequest(BaseModel):
db: str = Field(..., description="Source database name (e.g. poweron_app)")
table: str = Field(..., description="Source table (Pydantic model class name)")
column: str = Field(..., description="FK column on the source table")
force: bool = Field(
False,
description="Override safety guards (empty target / >50%% of source). Use with care.",
)
class OrphanCleanAllRequest(BaseModel):
"""Body for cleaning all detected orphans."""
force: bool = Field(
False,
description="Override safety guards on every relationship. Use with extreme care.",
)
@router.get("/stats")
@ -60,6 +75,39 @@ def getDatabaseOrphans(
return {"orphans": rows}
@router.get("/orphans/list")
@limiter.limit("30/minute")
def getDatabaseOrphansList(
request: Request,
db: str,
table: str,
column: str,
limit: int = 1000,
currentUser: User = Depends(requireSysAdmin),
) -> Dict[str, Any]:
"""Return up to ``limit`` actual orphan source-rows for one FK relationship.
Used by the SysAdmin UI's per-row download button: a human can review the
orphan list (full source row + the unresolved FK value) before triggering
the destructive clean operation.
"""
try:
records = _listOrphans(db=db, table=table, column=column, limit=limit)
except ValueError as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
) from e
return {
"db": db,
"table": table,
"column": column,
"count": len(records),
"limit": limit,
"records": records,
}
@router.post("/orphans/clean")
@limiter.limit("10/minute")
def postDatabaseOrphansClean(
@ -69,19 +117,33 @@ def postDatabaseOrphansClean(
) -> Dict[str, Any]:
"""Delete orphaned rows for a single FK relationship."""
try:
deleted = _cleanOrphans(body.db, body.table, body.column)
deleted = _cleanOrphans(body.db, body.table, body.column, force=body.force)
except OrphanCleanupRefused as e:
logger.warning(
"SysAdmin orphan clean REFUSED: user=%s db=%s table=%s column=%s reason=%s",
currentUser.username,
body.db,
body.table,
body.column,
e,
)
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail={"refused": True, "reason": str(e)},
) from e
except ValueError as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
) from e
logger.info(
"SysAdmin orphan clean: user=%s db=%s table=%s column=%s deleted=%s",
"SysAdmin orphan clean: user=%s db=%s table=%s column=%s deleted=%s force=%s",
currentUser.username,
body.db,
body.table,
body.column,
deleted,
body.force,
)
return {"deleted": deleted}
@ -90,13 +152,26 @@ def postDatabaseOrphansClean(
@limiter.limit("2/minute")
def postDatabaseOrphansCleanAll(
request: Request,
body: Optional[OrphanCleanAllRequest] = None,
currentUser: User = Depends(requireSysAdmin),
) -> Dict[str, Any]:
"""Run orphan cleanup for every relationship that currently has orphans."""
results: List[dict] = _cleanAllOrphans()
"""Run orphan cleanup for every relationship that currently has orphans.
Returns per-relationship results. Each entry contains either `deleted` (success),
`skipped` (safety guard triggered, no force), or `error` (other failure).
"""
force = bool(body.force) if body is not None else False
results: List[dict] = _cleanAllOrphans(force=force)
skipped = sum(1 for r in results if "skipped" in r)
errored = sum(1 for r in results if "error" in r)
deletedTotal = sum(int(r.get("deleted", 0)) for r in results)
logger.info(
"SysAdmin orphan clean-all: user=%s batches=%s",
"SysAdmin orphan clean-all: user=%s batches=%s deleted=%s skipped=%s errored=%s force=%s",
currentUser.username,
len(results),
deletedTotal,
skipped,
errored,
force,
)
return {"results": results}
return {"results": results, "skipped": skipped, "errored": errored, "deleted": deletedTotal}

View file

@ -171,9 +171,16 @@ def get_my_feature_instances(
if mandate and not getattr(mandate, "enabled", True):
continue
if mandate:
mandateName = mandate.name if hasattr(mandate, 'name') else mandateId
mandateLabel = (
mandate.label
if hasattr(mandate, 'label') and mandate.label
else mandateName
)
mandatesMap[mandateId] = {
"id": mandateId,
"name": mandate.name if hasattr(mandate, 'name') else mandateId,
"name": mandateName,
"label": mandateLabel,
"code": mandate.code if hasattr(mandate, 'code') else None,
"features": []
}
@ -181,6 +188,7 @@ def get_my_feature_instances(
mandatesMap[mandateId] = {
"id": mandateId,
"name": mandateId,
"label": mandateId,
"code": None,
"features": []
}
@ -210,6 +218,7 @@ def get_my_feature_instances(
"featureCode": instance.featureCode,
"mandateId": mandateId,
"mandateName": mandatesMap[mandateId]["name"],
"mandateLabel": mandatesMap[mandateId]["label"],
"instanceLabel": instance.label,
"userRoles": userRoles,
"permissions": permissions

View file

@ -305,8 +305,10 @@ async def getAuditLog(
async def getAuditStats(
request: Request,
context: RequestContext = Depends(getRequestContext),
timeRange: int = Query(30, ge=1, le=365, description="Days to aggregate"),
groupBy: str = Query("model", description="Grouping: model, user, feature, day"),
dateFrom: str = Query(..., description="ISO YYYY-MM-DD (inclusive)"),
dateTo: str = Query(..., description="ISO YYYY-MM-DD (inclusive)"),
groupBy: str = Query("model", pattern="^(model|user|feature|day)$",
description="Grouping: model, user, feature, day"),
):
_requireAuditAccess(context)
mandateId = str(context.mandateId) if context.mandateId else ""
@ -314,7 +316,12 @@ async def getAuditStats(
raise HTTPException(status_code=400, detail=routeApiMsg("Mandanten-ID erforderlich"))
from modules.shared.aiAuditLogger import aiAuditLogger
return aiAuditLogger.getAiAuditStats(mandateId, timeRangeDays=timeRange, groupBy=groupBy)
from modules.shared.dateRange import isoDateRangeToLocalEpoch
fromTs, toTs = isoDateRangeToLocalEpoch(dateFrom, dateTo)
return aiAuditLogger.getAiAuditStats(
mandateId, fromTs=fromTs, toTs=toTs, groupBy=groupBy,
)
# ── Tab D: Neutralization Mappings ──

View file

@ -258,8 +258,10 @@ class AccountSummary(BaseModel):
class UsageReportResponse(BaseModel):
"""Usage report for a period."""
period: str
"""Usage report for an explicit date range."""
dateFrom: str
dateTo: str
bucketSize: str
totalCost: float
transactionCount: int
costByProvider: Dict[str, float]
@ -523,80 +525,69 @@ def getTransactions(
raise HTTPException(status_code=500, detail=str(e))
@router.get("/statistics/{period}", response_model=UsageReportResponse)
@router.get("/statistics", response_model=UsageReportResponse)
@limiter.limit("30/minute")
def getStatistics(
request: Request,
period: str = Path(..., description="Period: 'day', 'month', or 'year'"),
year: int = Query(..., description="Year"),
month: Optional[int] = Query(None, description="Month (1-12, required for 'day' period)"),
dateFrom: str = Query(..., description="ISO YYYY-MM-DD (inclusive)"),
dateTo: str = Query(..., description="ISO YYYY-MM-DD (inclusive)"),
bucketSize: str = Query(..., pattern="^(day|month|year)$",
description="Time-bucket granularity: day, month, or year"),
ctx: RequestContext = Depends(getRequestContext)
):
"""
Get usage statistics for a period.
Get usage statistics for an explicit date range.
`dateFrom`/`dateTo` are inclusive local-day boundaries.
`bucketSize` controls the time-series aggregation granularity and is
independent of the chosen range.
"""
from modules.shared.dateRange import parseIsoDateRange
try:
# Validate period
if period not in ["day", "month", "year"]:
raise HTTPException(status_code=400, detail=routeApiMsg("Invalid period. Use 'day', 'month', or 'year'"))
if period == "day" and not month:
raise HTTPException(status_code=400, detail=routeApiMsg("Month is required for 'day' period"))
startDate, toDateInclusive = parseIsoDateRange(dateFrom, dateTo)
# `calculateStatisticsFromTransactions` expects a half-open
# [startDate, endDate) interval, so widen the upper bound by one day.
from datetime import timedelta as _td
endDate = toDateInclusive + _td(days=1)
billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
settings = billingInterface.getSettings(ctx.mandateId)
emptyResponse = UsageReportResponse(
dateFrom=dateFrom,
dateTo=dateTo,
bucketSize=bucketSize,
totalCost=0.0,
transactionCount=0,
costByProvider={},
costByFeature={},
)
if not settings:
return UsageReportResponse(
period=period,
totalCost=0.0,
transactionCount=0,
costByProvider={},
costByFeature={}
)
return emptyResponse
# Transactions are always on user accounts (audit trail)
account = billingInterface.getUserAccount(ctx.mandateId, ctx.user.id)
if not account:
return UsageReportResponse(
period=period,
totalCost=0.0,
transactionCount=0,
costByProvider={},
costByFeature={}
)
# Calculate date range
if period == "day":
startDate = date(year, month, 1)
if month == 12:
endDate = date(year + 1, 1, 1)
else:
endDate = date(year, month + 1, 1)
elif period == "month":
startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1)
else: # year
startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1)
# Get statistics from transactions
return emptyResponse
stats = billingInterface.calculateStatisticsFromTransactions(
account["id"],
startDate,
endDate
endDate,
)
return UsageReportResponse(
period=period,
dateFrom=dateFrom,
dateTo=dateTo,
bucketSize=bucketSize,
totalCost=stats.get("totalCostCHF", 0.0),
transactionCount=stats.get("transactionCount", 0),
costByProvider=stats.get("costByProvider", {}),
costByModel=stats.get("costByModel", {}),
costByFeature=stats.get("costByFeature", {})
costByFeature=stats.get("costByFeature", {}),
)
except HTTPException:
raise
except Exception as e:
@ -778,6 +769,21 @@ def addCredit(
raise HTTPException(status_code=500, detail=str(e))
@router.get("/checkout/amounts", response_model=List[float])
@limiter.limit("60/minute")
def getCheckoutAmounts(
request: Request,
ctx: RequestContext = Depends(getRequestContext),
):
"""
Return the server-side allow-list of CHF top-up amounts for Stripe Checkout.
The frontend must populate its dropdown from this list values not in
the list are rejected by `create_checkout_session` (server-side validation).
"""
from modules.serviceCenter.services.serviceBilling.stripeCheckout import ALLOWED_AMOUNTS_CHF
return [float(a) for a in ALLOWED_AMOUNTS_CHF]
@router.post("/checkout/create/{targetMandateId}", response_model=CheckoutCreateResponse)
@limiter.limit("10/minute")
def createCheckoutSession(
@ -800,12 +806,37 @@ def createCheckoutSession(
if not _isAdminOfMandate(ctx, targetMandateId):
raise HTTPException(status_code=403, detail=routeApiMsg("Mandate admin role required to load mandate credit"))
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
appInterface = getAppInterface(ctx.user, mandateId=targetMandateId)
mandateRecord = appInterface.getMandate(targetMandateId)
if mandateRecord is not None:
mandateLabel = getattr(mandateRecord, "label", None) or getattr(mandateRecord, "name", None) or targetMandateId
invoiceAddress = {
"companyName": getattr(mandateRecord, "invoiceCompanyName", None),
"contactName": getattr(mandateRecord, "invoiceContactName", None),
"email": getattr(mandateRecord, "invoiceEmail", None),
"line1": getattr(mandateRecord, "invoiceLine1", None),
"line2": getattr(mandateRecord, "invoiceLine2", None),
"postalCode": getattr(mandateRecord, "invoicePostalCode", None),
"city": getattr(mandateRecord, "invoiceCity", None),
"state": getattr(mandateRecord, "invoiceState", None),
"country": getattr(mandateRecord, "invoiceCountry", None) or "CH",
"vatNumber": getattr(mandateRecord, "invoiceVatNumber", None),
}
else:
mandateLabel = targetMandateId
invoiceAddress = None
from modules.serviceCenter.services.serviceBilling.stripeCheckout import create_checkout_session
redirect_url = create_checkout_session(
mandate_id=targetMandateId,
user_id=checkoutRequest.userId,
amount_chf=checkoutRequest.amount,
return_url=checkoutRequest.returnUrl
return_url=checkoutRequest.returnUrl,
mandate_label=mandateLabel,
invoice_address=invoiceAddress,
settings=settings,
billing_interface=billingInterface,
)
return CheckoutCreateResponse(redirectUrl=redirect_url)
@ -1620,9 +1651,10 @@ class ViewStatisticsResponse(BaseModel):
@limiter.limit("30/minute")
def getUserViewStatistics(
request: Request,
period: str = Query(default="month", description="Period: 'day' or 'month'"),
year: int = Query(default=None, description="Year"),
month: Optional[int] = Query(None, description="Month (1-12, required for period='day')"),
dateFrom: str = Query(..., description="ISO YYYY-MM-DD (inclusive)"),
dateTo: str = Query(..., description="ISO YYYY-MM-DD (inclusive)"),
bucketSize: str = Query(..., pattern="^(day|month|year)$",
description="Time-bucket granularity: day, month, or year"),
scope: str = Query(default="all", description="Scope: 'personal' (own costs only), 'mandate' (filter by mandateId), 'all' (RBAC-filtered)"),
mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"),
onlyMine: Optional[bool] = Query(None, description="Additional filter: restrict to current user's transactions within the selected scope"),
@ -1630,24 +1662,23 @@ def getUserViewStatistics(
) -> ViewStatisticsResponse:
"""
Get aggregated usage statistics across all user's mandates.
Scope:
- personal: only the current user's own transactions (ignores admin role)
- mandate: transactions for a specific mandate (requires mandateId parameter)
- all: RBAC-filtered (SysAdmin sees everything, admin sees mandate, user sees own)
onlyMine: additional filter that restricts results to the current user's
transactions while keeping the scope-based mandate selection.
- period='month': returns monthly time series for the given year
- period='day': returns daily time series for the given month/year
"""
try:
if year is None:
year = datetime.now().year
if period == "day" and not month:
month = datetime.now().month
`dateFrom`/`dateTo` are inclusive local-day boundaries. `bucketSize`
controls the time-series aggregation granularity and is independent of
the chosen range.
"""
from modules.shared.dateRange import isoDateRangeToLocalEpoch
try:
startTs, endTs = isoDateRangeToLocalEpoch(dateFrom, dateTo)
billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
@ -1666,28 +1697,19 @@ def getUserViewStatistics(
personalUserId = str(ctx.user.id) if (scope == "personal" or onlyMine) else None
if period == "day":
startDate = date(year, month, 1)
endDate = date(year + 1, 1, 1) if month == 12 else date(year, month + 1, 1)
else:
startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1)
startTs = datetime.combine(startDate, datetime.min.time()).timestamp()
endTs = datetime.combine(endDate, datetime.min.time()).timestamp()
agg = billingInterface.getTransactionStatisticsAggregated(
mandateIds=loadMandateIds,
scope=scope,
userId=personalUserId,
startTs=startTs,
endTs=endTs,
period=period,
bucketSize=bucketSize,
)
logger.info(
f"View statistics (SQL-aggregated): totalCost={agg['totalCost']}, "
f"count={agg['transactionCount']}, period={period}, year={year}, month={month}"
f"count={agg['transactionCount']}, dateFrom={dateFrom}, dateTo={dateTo}, "
f"bucketSize={bucketSize}"
)
allAccounts = agg.get("_allAccounts", [])

View file

@ -366,7 +366,19 @@ def create_mandate(
detail=f"Failed to create mandate: {str(e)}"
)
_MANDATE_ADMIN_EDITABLE_FIELDS = {"label"}
_MANDATE_ADMIN_EDITABLE_FIELDS = {
"label",
"invoiceCompanyName",
"invoiceContactName",
"invoiceEmail",
"invoiceLine1",
"invoiceLine2",
"invoicePostalCode",
"invoiceCity",
"invoiceState",
"invoiceCountry",
"invoiceVatNumber",
}
def _isUserAdminOfMandate(userId: str, targetMandateId: str) -> bool:
"""Check mandate-admin without RequestContext (avoids Header param conflicts)."""

View file

@ -833,7 +833,7 @@ def _buildIntegrationsOverviewPayload(userId: str, user=None) -> Dict[str, Any]:
userId=userId,
startTs=startTs,
endTs=now,
period="month",
bucketSize="month",
)
liveStats["aiCallCount"] = stats.get("transactionCount", 0)
except Exception as e:

View file

@ -71,6 +71,11 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
subPath = args.get("subPath", "")
directConnId = args.get("connectionId", "")
directService = args.get("service", "")
rawLimit = args.get("limit")
try:
limit = int(rawLimit) if rawLimit is not None and str(rawLimit) != "" else None
except (TypeError, ValueError):
limit = None
if not dsId and not (directConnId and directService):
return ToolResult(toolCallId="", toolName="browseDataSource", success=False,
error="Provide either dataSourceId OR connectionId+service")
@ -92,7 +97,7 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
_buildResolverDbFromServices(services),
)
adapter = await resolver.resolveService(connectionId, service)
entries = await adapter.browse(browsePath, filter=args.get("filter"))
entries = await adapter.browse(browsePath, filter=args.get("filter"), limit=limit)
if not entries:
return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data="Empty directory.")
lines = []
@ -101,6 +106,11 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
sizeInfo = f" ({e.size} bytes)" if e.size else ""
lines.append(f"- {prefix} {e.name}{sizeInfo} path: {e.path}")
result = "\n".join(lines)
countLine = f"\n\n({len(entries)} entries returned"
if limit is not None:
countLine += f", limit={limit}"
countLine += ")"
result += countLine
if service in _MAIL_SERVICES:
result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data=result)
@ -112,6 +122,11 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
directConnId = args.get("connectionId", "")
directService = args.get("service", "")
query = args.get("query", "")
rawLimit = args.get("limit")
try:
limit = int(rawLimit) if rawLimit is not None and str(rawLimit) != "" else None
except (TypeError, ValueError):
limit = None
if not query:
return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error="query is required")
if not dsId and not (directConnId and directService):
@ -128,11 +143,16 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
_buildResolverDbFromServices(services),
)
adapter = await resolver.resolveService(connectionId, service)
entries = await adapter.search(query, path=basePath)
entries = await adapter.search(query, path=basePath, limit=limit)
if not entries:
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data="No results found.")
lines = [f"- {e.name} (path: {e.path})" for e in entries]
result = "\n".join(lines)
countLine = f"\n\n({len(entries)} entries returned"
if limit is not None:
countLine += f", limit={limit}"
countLine += ")"
result += countLine
if service in _MAIL_SERVICES:
result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data=result)
@ -217,7 +237,9 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
description=(
"Browse files and folders in a data source. Accepts either:\n"
"- dataSourceId (for attached data sources shown in the prompt), OR\n"
"- connectionId + service (for direct connection access via listConnections)."
"- connectionId + service (for direct connection access via listConnections).\n"
"Default page size is connector-specific (~100 entries). Use the `limit` parameter "
"to request more (e.g. when the user explicitly asks for ALL items in a folder)."
),
parameters={
"type": "object",
@ -228,6 +250,15 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
"path": {"type": "string", "description": "Root path (used with connectionId+service)"},
"subPath": {"type": "string", "description": "Sub-path within the data source to browse"},
"filter": {"type": "string", "description": "Filter pattern (e.g. '*.pdf')"},
"limit": {
"type": "integer",
"description": (
"Maximum number of entries to return (max 1000 for mail, "
"connector-specific elsewhere). Omit for the connector's default."
),
"minimum": 1,
"maximum": 1000,
},
},
},
readOnly=True,
@ -236,7 +267,8 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
registry.register(
"searchDataSource", _searchDataSource,
description=(
"Search for files within a data source. Accepts either dataSourceId OR connectionId+service."
"Search for files within a data source. Accepts either dataSourceId OR connectionId+service. "
"Use the `limit` parameter to control how many hits are returned."
),
parameters={
"type": "object",
@ -246,6 +278,12 @@ def _registerDataSourceTools(registry: ToolRegistry, services):
"service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
"path": {"type": "string", "description": "Scope path (used with connectionId+service)"},
"query": {"type": "string", "description": "Search query"},
"limit": {
"type": "integer",
"description": "Maximum number of search results (default ~100, max 1000).",
"minimum": 1,
"maximum": 1000,
},
},
"required": ["query"],
},

View file

@ -3,10 +3,22 @@
"""
Stripe Checkout service for billing credit top-ups.
Creates Checkout Sessions for redirect-based payment flow.
CH-Treuhand-Konformitaet (Issue 2026-04-20):
- Bei jedem Checkout wird ein Stripe-Customer mit der Mandanten-Rechnungsadresse
angelegt/aktualisiert (Name, Adresse, E-Mail, optional UID/MWST-Nr).
- Auf dem Checkout wird `invoice_creation` aktiviert, damit Stripe automatisch
eine Rechnung mit Status `paid` erzeugt (statt nur einer Quittung). Die Rechnung
enthaelt die volle Empfaengeradresse und einen Footer-Hinweis "bezahlt via
Kreditkarte am ...".
- MWST 8.1% (CH) wird ueber `automatic_tax: enabled=true` aufgeschlagen, sofern
Stripe Tax fuer den Account aktiviert ist (siehe wiki/d-guides/stripe-ch-vat.md).
Alternativ kann ueber APP_CONFIG `STRIPE_TAX_RATE_ID_CH_VAT` ein vordefinierter
Tax-Rate angehaengt werden (8.1% inclusive=false).
"""
import logging
from typing import Optional
from typing import Any, Dict, List, Optional
from urllib.parse import parse_qsl, urlencode, urlsplit, urlunsplit
from modules.shared.configuration import APP_CONFIG
@ -17,6 +29,155 @@ logger = logging.getLogger(__name__)
ALLOWED_AMOUNTS_CHF = [10, 25, 50, 100, 250, 500]
def _str(value: Any) -> Optional[str]:
"""Trim ``value`` to a non-empty string or return ``None``."""
if value is None:
return None
if not isinstance(value, str):
value = str(value)
trimmed = value.strip()
return trimmed or None
def _buildStripeAddress(invoiceAddress: Optional[Dict[str, Any]]) -> Optional[Dict[str, str]]:
"""Translate the structured Mandate invoice fields into a Stripe ``address`` object.
Returns ``None`` when the address is not complete enough for Stripe
(line1 + city are the practical minimum); the caller then falls back
to a guest checkout where Stripe collects the address from the user.
"""
if not invoiceAddress:
return None
address: Dict[str, Optional[str]] = {
"line1": _str(invoiceAddress.get("line1")),
"line2": _str(invoiceAddress.get("line2")),
"postal_code": _str(invoiceAddress.get("postalCode")),
"city": _str(invoiceAddress.get("city")),
"state": _str(invoiceAddress.get("state")),
"country": (_str(invoiceAddress.get("country")) or "CH").upper()[:2],
}
cleaned = {k: v for k, v in address.items() if v}
if not cleaned.get("line1") or not cleaned.get("city"):
return None
return cleaned
def _buildStripeTaxIdData(invoiceAddress: Optional[Dict[str, Any]]) -> List[Dict[str, str]]:
"""Translate UID/MWST-Nr into Stripe ``tax_id_data`` (CHE -> ``ch_vat``)."""
if not invoiceAddress:
return []
vat = _str(invoiceAddress.get("vatNumber"))
if not vat:
return []
upper = vat.upper()
if upper.startswith("CHE"):
return [{"type": "ch_vat", "value": vat[:50]}]
if upper.startswith("LI"):
return [{"type": "li_uid", "value": vat[:50]}]
if len(upper) >= 4 and upper[:2].isalpha() and any(c.isdigit() for c in upper):
return [{"type": "eu_vat", "value": vat[:50]}]
logger.info("Skipping unrecognized invoice VAT number format: %s", vat)
return []
def _ensureStripeCustomer(
mandateId: str,
mandateLabel: str,
invoiceAddress: Optional[Dict[str, Any]],
settings: Optional[Dict[str, Any]],
billingInterface,
) -> Optional[str]:
"""Create or update the Stripe Customer for the given mandate.
Maps the structured invoice fields from ``Mandate`` to:
- ``customer.name`` (companyName, fallback mandateLabel)
- ``customer.email`` (if invoiceEmail is set)
- ``customer.address`` (line1/line2/postal_code/city/state/country)
- ``customer.shipping`` (mirrors address with contactName, when set)
- ``customer.tax_id_data`` (UID/MWST-Nr; only on create -- Stripe API
does not allow modifying tax_id_data via Customer.modify, the
existing tax IDs would have to be replaced via tax_ids.create).
Returns the Stripe customer id (or ``None`` if creation failed and we
should fall back to a guest checkout).
"""
from modules.shared.stripeClient import getStripeClient, stripeToDict
stripe = getStripeClient()
address = _buildStripeAddress(invoiceAddress)
name = _str((invoiceAddress or {}).get("companyName")) or mandateLabel
email = _str((invoiceAddress or {}).get("email"))
contactName = _str((invoiceAddress or {}).get("contactName"))
vatNumber = _str((invoiceAddress or {}).get("vatNumber"))
metadata = {
"mandateId": mandateId,
"mandateLabel": mandateLabel,
}
if vatNumber:
metadata["vatNumber"] = vatNumber
if contactName:
metadata["contactName"] = contactName
customerId = (settings or {}).get("stripeCustomerId") if settings else None
payload: Dict[str, Any] = {
"name": name,
"metadata": metadata,
}
if email:
payload["email"] = email
if address:
payload["address"] = address
if contactName:
payload["shipping"] = {
"name": f"{contactName} ({name})" if name else contactName,
"address": address,
}
try:
if customerId:
stripe.Customer.modify(customerId, **payload)
else:
taxIdData = _buildStripeTaxIdData(invoiceAddress)
createPayload = dict(payload)
if taxIdData:
createPayload["tax_id_data"] = taxIdData
customer = stripe.Customer.create(**createPayload)
customerId = stripeToDict(customer).get("id") or getattr(customer, "id", None)
if customerId and billingInterface is not None and settings is not None:
try:
billingInterface.updateSettings(settings["id"], {"stripeCustomerId": customerId})
except Exception as ex:
logger.warning("Failed to persist stripeCustomerId for mandate %s: %s", mandateId, ex)
return customerId
except Exception as ex:
logger.error("Stripe Customer create/update failed for mandate %s: %s", mandateId, ex)
return customerId # may be None, falls back to guest checkout
def _resolveCheckoutTaxRates() -> List[str]:
"""Return tax-rate IDs to apply manually if Stripe Tax is not enabled."""
raw = APP_CONFIG.get("STRIPE_TAX_RATE_ID_CH_VAT") or ""
return [r.strip() for r in str(raw).split(",") if r.strip()]
def _isAutomaticTaxEnabled() -> bool:
"""Read STRIPE_AUTOMATIC_TAX_ENABLED as a real boolean.
APP_CONFIG._loadEnv stores all .env values as raw strings, so a naive
``bool(APP_CONFIG.get(key, False))`` would return True for the strings
``"false"``, ``"0"`` or ``"no"`` (any non-empty string is truthy in
Python). We therefore parse the value explicitly.
"""
raw = APP_CONFIG.get("STRIPE_AUTOMATIC_TAX_ENABLED", False)
if isinstance(raw, bool):
return raw
if raw is None:
return False
return str(raw).strip().lower() in {"true", "1", "yes", "on"}
def _normalizeReturnUrl(returnUrl: str) -> str:
"""
Validate and normalize an absolute frontend return URL.
@ -55,77 +216,143 @@ def create_checkout_session(
mandate_id: str,
user_id: Optional[str],
amount_chf: float,
return_url: str
return_url: str,
mandate_label: Optional[str] = None,
invoice_address: Optional[Dict[str, Any]] = None,
settings: Optional[Dict[str, Any]] = None,
billing_interface=None,
) -> str:
"""
Create a Stripe Checkout Session for credit top-up.
Amount and currency are validated server-side. The client-provided amount
must match an allowed preset.
CH-Treuhand-Konformitaet:
- Reuses or creates a Stripe Customer with the mandate's invoice address.
- Activates `invoice_creation` so Stripe issues a proper invoice (status
`paid`) carrying the full recipient address and VAT.
- Adds 8.1% Swiss VAT either via Stripe Tax (`automatic_tax`) or via a
manually configured `STRIPE_TAX_RATE_ID_CH_VAT` tax-rate.
Args:
mandate_id: Target mandate ID
user_id: Target user ID for audit trail (optional)
amount_chf: Amount in CHF (must be in ALLOWED_AMOUNTS_CHF)
return_url: Absolute frontend URL used for Stripe success/cancel redirects
mandate_label: Human-readable mandate name (used as Customer name fallback)
invoice_address: Dict assembled in routeBilling from the structured
``Mandate.invoice*`` fields. Recognised keys: companyName,
contactName, email, line1, line2, postalCode, city, state,
country, vatNumber. Pass ``None`` for guest checkout (Stripe
then collects line1/postal_code/city itself via
``billing_address_collection: required``).
settings: Mandate billing settings (carries `stripeCustomerId`, `id`)
billing_interface: Billing interface (for persisting stripeCustomerId)
Returns:
Stripe Checkout Session URL for redirect
Raises:
ValueError: If amount is invalid
"""
import stripe
# Validate amount server-side
if amount_chf not in ALLOWED_AMOUNTS_CHF:
raise ValueError(
f"Invalid amount {amount_chf} CHF. Allowed: {ALLOWED_AMOUNTS_CHF}"
)
from modules.shared.stripeClient import getStripeClient
stripe = getStripeClient()
base_return_url = _normalizeReturnUrl(return_url)
query_separator = "&" if "?" in base_return_url else "?"
success_url = f"{base_return_url}{query_separator}success=true&session_id={{CHECKOUT_SESSION_ID}}"
cancel_url = f"{base_return_url}{query_separator}canceled=true"
# Amount in cents for Stripe (CHF uses 2 decimal places)
amount_cents = int(round(amount_chf * 100))
metadata = {
"mandateId": mandate_id,
"amountChf": str(amount_chf),
}
if user_id:
metadata["userId"] = user_id
session = stripe.checkout.Session.create(
mode="payment",
line_items=[
{
"price_data": {
"currency": "chf",
"unit_amount": amount_cents,
"product_data": {
"name": "Guthaben aufladen",
"description": "AI Service Guthaben (CHF)",
},
},
"quantity": 1,
}
],
success_url=success_url,
cancel_url=cancel_url,
metadata=metadata,
customerId = _ensureStripeCustomer(
mandateId=mandate_id,
mandateLabel=mandate_label or mandate_id,
invoiceAddress=invoice_address,
settings=settings,
billingInterface=billing_interface,
)
taxRateIds = _resolveCheckoutTaxRates()
autoTaxEnabled = _isAutomaticTaxEnabled()
line_item: Dict[str, Any] = {
"price_data": {
"currency": "chf",
"unit_amount": amount_cents,
"product_data": {
"name": "Guthaben aufladen",
"description": "AI Service Guthaben (CHF) inkl. MWST 8.1%",
},
},
"quantity": 1,
}
if taxRateIds and not autoTaxEnabled:
line_item["tax_rates"] = taxRateIds
invoice_data: Dict[str, Any] = {
"description": f"Guthaben-Aufladung {amount_chf:.2f} CHF (Mandant: {mandate_label or mandate_id})",
"metadata": metadata,
"footer": (
"Diese Rechnung wurde bereits via Kreditkarte bezahlt. "
"MWST-Nr. PowerOn: siehe Stripe-Rechnungs-Template. "
"Bei Fragen: billing@poweron-center.net"
),
}
customFields: List[Dict[str, str]] = []
if invoice_address:
vat = _str(invoice_address.get("vatNumber"))
if vat:
customFields.append({"name": "UID-Nr. Empfaenger", "value": vat[:30]})
contactName = _str(invoice_address.get("contactName"))
if contactName:
customFields.append({"name": "z. H.", "value": contactName[:30]})
if customFields:
invoice_data["custom_fields"] = customFields[:4]
sessionPayload: Dict[str, Any] = {
"mode": "payment",
"line_items": [line_item],
"success_url": success_url,
"cancel_url": cancel_url,
"metadata": metadata,
"invoice_creation": {
"enabled": True,
"invoice_data": invoice_data,
},
}
if customerId:
sessionPayload["customer"] = customerId
sessionPayload["customer_update"] = {"address": "auto", "name": "auto", "shipping": "auto"}
else:
sessionPayload["billing_address_collection"] = "required"
if invoice_address and _str(invoice_address.get("email")):
sessionPayload["customer_email"] = _str(invoice_address.get("email"))
if autoTaxEnabled:
sessionPayload["automatic_tax"] = {"enabled": True}
session = stripe.checkout.Session.create(**sessionPayload)
if not session or not session.url:
raise ValueError("Stripe Checkout Session creation failed")
logger.info(
f"Created Stripe Checkout Session {session.id} for mandate {mandate_id}, "
f"amount {amount_chf} CHF"
f"amount {amount_chf} CHF, customer={customerId or 'guest'}, "
f"taxRates={taxRateIds}, autoTax={autoTaxEnabled}"
)
return session.url

View file

@ -10,7 +10,7 @@ Usage:
import hashlib
import logging
from collections import defaultdict
from datetime import datetime, timezone, timedelta
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from modules.shared.timeUtils import getUtcTimestamp
@ -196,22 +196,28 @@ class AiAuditLogger:
self,
mandateId: str,
*,
timeRangeDays: int = 30,
fromTs: float,
toTs: float,
groupBy: str = "model",
) -> Dict[str, Any]:
"""Aggregate statistics for Tab C."""
"""Aggregate statistics for Tab C over an explicit timestamp range.
`fromTs`/`toTs` are inclusive epoch-second boundaries (see
`dateRange.isoDateRangeToLocalEpoch`). Both are required.
"""
self._ensureInitialized()
if not self._db:
return {}
from modules.datamodels.datamodelAiAudit import AiAuditLogEntry
cutoff = (datetime.now(timezone.utc) - timedelta(days=max(1, timeRangeDays))).timestamp()
allRecords = self._db.getRecordset(
AiAuditLogEntry, recordFilter={"mandateId": mandateId}
)
records = [r for r in allRecords if (r.get("timestamp") or 0) >= cutoff]
records = [
r for r in allRecords
if fromTs <= (r.get("timestamp") or 0) <= toTs
]
callsByDay: Dict[str, int] = defaultdict(int)
callsByModel: Dict[str, int] = defaultdict(int)
@ -237,10 +243,13 @@ class AiAuditLogger:
sortedDays = sorted(callsByDay.keys())
neutralizationPercent = round(100.0 * neutralizationCount / totalCalls, 1) if totalCalls else 0.0
days = max(1, int((toTs - fromTs) / 86400) + 1)
return {
"totalCalls": totalCalls,
"timeRangeDays": timeRangeDays,
"fromTs": fromTs,
"toTs": toTs,
"days": days,
"callsPerDay": [{"date": d, "calls": callsByDay[d]} for d in sortedDays],
"costPerDay": [{"date": d, "cost": round(costByDay[d], 4)} for d in sortedDays],
"callsByModel": dict(sorted(callsByModel.items(), key=lambda x: -x[1])),

View file

@ -0,0 +1,80 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Date-range parsing for API endpoints that accept user-provided
`dateFrom`/`dateTo` query params (ISO `YYYY-MM-DD`).
Centralizes:
- Parsing (HTTPException 400 on invalid format)
- Validation (`dateFrom <= dateTo`)
- Conversion to inclusive epoch boundaries for downstream filters
(`dateFrom` -> `00:00:00.000` of that local day,
`dateTo` -> `23:59:59.999` of that local day).
Local-day semantics intentionally follow `datetime.combine(d, time.min).timestamp()`
which the legacy billing/audit code used; this matches the user's calendar
expectation ("01.04 - 03.04" = three full local days). Servers running in a
timezone other than the user's will see consistent boundaries because the
date string carries no timezone.
"""
from datetime import date, datetime, time as dtTime
from typing import Tuple
from fastapi import HTTPException
def parseIsoDate(value: str, fieldName: str) -> date:
"""Parse an ISO `YYYY-MM-DD` string.
Raises HTTPException(400) on invalid input.
"""
if not isinstance(value, str) or not value:
raise HTTPException(
status_code=400,
detail=f"{fieldName} is required (ISO YYYY-MM-DD)",
)
try:
return date.fromisoformat(value)
except ValueError as e:
raise HTTPException(
status_code=400,
detail=f"{fieldName} is not a valid ISO date (YYYY-MM-DD): {value}",
) from e
def parseIsoDateRange(dateFrom: str, dateTo: str) -> Tuple[date, date]:
"""Parse and validate an inclusive ISO date range.
Raises HTTPException(400) on invalid format or `from > to`.
"""
fromDate = parseIsoDate(dateFrom, "dateFrom")
toDate = parseIsoDate(dateTo, "dateTo")
if fromDate > toDate:
raise HTTPException(
status_code=400,
detail=f"dateFrom must be <= dateTo (got {dateFrom} > {dateTo})",
)
return fromDate, toDate
def isoDateRangeToLocalEpoch(dateFrom: str, dateTo: str) -> Tuple[float, float]:
"""Convert an inclusive ISO date range to local-time epoch seconds.
`dateTo` boundary is end-of-day inclusive (23:59:59.999999), so a single-day
range `dateFrom == dateTo` covers the full 24h of that local day.
Returns:
(fromTs, toTs) - both float epoch seconds, suitable for `ts >= fromTs`
and `ts <= toTs` filters.
"""
fromDate, toDate = parseIsoDateRange(dateFrom, dateTo)
fromTs = datetime.combine(fromDate, dtTime.min).timestamp()
toTs = datetime.combine(toDate, dtTime.max).timestamp()
return fromTs, toTs
def daysInRange(dateFrom: str, dateTo: str) -> int:
"""Inclusive day count for a date range. `from == to` returns 1."""
fromDate, toDate = parseIsoDateRange(dateFrom, dateTo)
return (toDate - fromDate).days + 1

View file

@ -50,6 +50,42 @@ class OrphanResult:
targetTable: str
targetColumn: str
orphanCount: int
sourceRowCount: int = 0
targetRowCount: int = 0
targetEmpty: bool = False
wouldDeleteAll: bool = False
@dataclass
class OrphanRecord:
"""A single orphan source-row -- includes the unresolved FK value plus the full row data.
Used by the SysAdmin UI download button so the human can verify the orphan
list before pressing "clean".
"""
sourceDb: str
sourceTable: str
sourceColumn: str
targetDb: str
targetTable: str
targetColumn: str
orphanFkValue: str
rowId: Optional[str]
row: Dict
# ---------------------------------------------------------------------------
# Safety thresholds for cleanup
# ---------------------------------------------------------------------------
# If a single cleanup would delete more than this fraction of the source table,
# refuse without an explicit force=True. Protects against catastrophic wipes
# caused by misconfigured / empty target tables.
_MAX_CLEANUP_FRACTION = 0.5
class OrphanCleanupRefused(Exception):
"""Raised when a cleanup is refused for safety reasons (use force=True to override)."""
# ---------------------------------------------------------------------------
@ -147,6 +183,60 @@ def _loadParentIds(conn, tableName: str, columnName: str) -> Set[str]:
return ids
def _loadPhysicalColumns(conn, tableName: str) -> Set[str]:
"""Return the set of physical (scalar) columns present on a table.
Used by the orphan scanner to skip FK relationships whose ``sourceColumn``
is annotated on the Pydantic model but does NOT exist as a physical column
-- e.g. virtual / computed fields, or fields that the database interface
decided to fold into a JSONB blob (List/Dict typed fields). Comparing a
JSONB array against a scalar via ``=`` always fails and would otherwise
flag every single source row as an orphan (the user-reported "false
positives").
"""
cols: Set[str] = set()
try:
with conn.cursor() as cur:
cur.execute(
"""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = %s
""",
(tableName,),
)
for row in cur.fetchall():
cols.add(row["column_name"])
except Exception:
pass
return cols
def _countRows(conn, tableName: str) -> int:
"""Count physical rows in a table. Returns 0 on any error."""
try:
with conn.cursor() as cur:
cur.execute(f'SELECT COUNT(*) AS cnt FROM "{tableName}"')
return int(cur.fetchone()["cnt"])
except Exception:
return 0
def _countNonNullSource(conn, tableName: str, columnName: str) -> int:
"""Count source rows where the FK column is non-null/non-empty."""
try:
with conn.cursor() as cur:
cur.execute(f"""
SELECT COUNT(*) AS cnt
FROM "{tableName}"
WHERE "{columnName}" IS NOT NULL
AND "{columnName}" != ''
""")
return int(cur.fetchone()["cnt"])
except Exception:
return 0
def _countOrphansSameDb(
conn, sourceTable: str, sourceColumn: str,
targetTable: str, targetColumn: str,
@ -213,6 +303,7 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]:
connCache: Dict[str, any] = {}
tableCache: Dict[str, Set[str]] = {}
columnCache: Dict[str, Set[str]] = {}
parentIdCache: Dict[str, Set[str]] = {}
results: List[dict] = []
@ -236,6 +327,15 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]:
tableCache[dbName] = set()
return tableCache[dbName]
def _existingColumns(dbName: str, tableName: str) -> Set[str]:
cacheKey = f"{dbName}.{tableName}"
if cacheKey not in columnCache:
try:
columnCache[cacheKey] = _loadPhysicalColumns(_ensureConn(dbName), tableName)
except Exception:
columnCache[cacheKey] = set()
return columnCache[cacheKey]
try:
for rel in relationships:
try:
@ -251,17 +351,39 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]:
if rel.targetTable not in targetTables:
continue
# Skip FK annotations whose source column is not a physical
# scalar column (virtual / JSONB-resident / computed field).
# See _loadPhysicalColumns docstring for why this matters.
sourceColumns = _existingColumns(rel.sourceDb, rel.sourceTable)
if rel.sourceColumn not in sourceColumns:
logger.debug(
"Skipping FK %s.%s.%s -- column not present as physical column",
rel.sourceDb, rel.sourceTable, rel.sourceColumn,
)
continue
targetColumns = _existingColumns(rel.targetDb, rel.targetTable)
if rel.targetColumn not in targetColumns:
logger.debug(
"Skipping FK %s.%s.%s -> %s.%s.%s -- target column not present",
rel.sourceDb, rel.sourceTable, rel.sourceColumn,
rel.targetDb, rel.targetTable, rel.targetColumn,
)
continue
sourceConn = _ensureConn(rel.sourceDb)
if rel.sourceDb == rel.targetDb:
targetRowCount = _countRows(sourceConn, rel.targetTable)
count = _countOrphansSameDb(
sourceConn, rel.sourceTable, rel.sourceColumn,
rel.targetTable, rel.targetColumn,
)
else:
targetConn = _ensureConn(rel.targetDb)
targetRowCount = _countRows(targetConn, rel.targetTable)
parentKey = f"{rel.targetDb}.{rel.targetTable}.{rel.targetColumn}"
if parentKey not in parentIdCache:
targetConn = _ensureConn(rel.targetDb)
parentIdCache[parentKey] = _loadParentIds(
targetConn, rel.targetTable, rel.targetColumn,
)
@ -271,6 +393,12 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]:
parentIdCache[parentKey],
)
sourceRowCount = _countNonNullSource(
sourceConn, rel.sourceTable, rel.sourceColumn,
)
wouldDeleteAll = (count > 0 and count >= sourceRowCount)
targetEmpty = (targetRowCount == 0)
results.append(asdict(OrphanResult(
sourceDb=rel.sourceDb,
sourceTable=rel.sourceTable,
@ -279,6 +407,10 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]:
targetTable=rel.targetTable,
targetColumn=rel.targetColumn,
orphanCount=count,
sourceRowCount=sourceRowCount,
targetRowCount=targetRowCount,
targetEmpty=targetEmpty,
wouldDeleteAll=wouldDeleteAll,
)))
except Exception as e:
@ -308,8 +440,16 @@ def _scanOrphans(dbFilter: Optional[str] = None) -> List[dict]:
# Orphan cleanup
# ---------------------------------------------------------------------------
def _cleanOrphans(db: str, table: str, column: str) -> int:
"""Delete orphaned records for a single FK relationship. Returns count deleted."""
def _cleanOrphans(db: str, table: str, column: str, force: bool = False) -> int:
"""Delete orphaned records for a single FK relationship. Returns count deleted.
Safety guards (require force=True to override):
- Refuses if the target table is empty (likely misconfiguration / lazy table).
- Refuses if the cleanup would delete >= _MAX_CLEANUP_FRACTION of the source rows.
These guards prevent catastrophic wipes (e.g. emptying FeatureInstance because
the User table happened to be empty in the wrong DB at scan time).
"""
relationships = _getFkRelationships()
rel = next(
(r for r in relationships
@ -320,7 +460,65 @@ def _cleanOrphans(db: str, table: str, column: str) -> int:
raise ValueError(f"No FK relationship found for {db}.{table}.{column}")
conn = _getConnection(rel.sourceDb)
targetConn = None
try:
if rel.sourceDb == rel.targetDb:
targetRowCount = _countRows(conn, rel.targetTable)
parentIds: Optional[Set[str]] = None
else:
targetConn = _getConnection(rel.targetDb)
targetRowCount = _countRows(targetConn, rel.targetTable)
parentIds = _loadParentIds(targetConn, rel.targetTable, rel.targetColumn)
sourceRowCount = _countNonNullSource(conn, rel.sourceTable, rel.sourceColumn)
if not force:
if targetRowCount == 0 and sourceRowCount > 0:
raise OrphanCleanupRefused(
f"Refusing cleanup: target table '{rel.targetDb}.{rel.targetTable}' "
f"is empty but source '{rel.sourceDb}.{rel.sourceTable}' has "
f"{sourceRowCount} rows with non-null '{rel.sourceColumn}'. "
f"This likely indicates a misconfiguration. Use force=True to override."
)
if rel.sourceDb == rel.targetDb:
with conn.cursor() as cur:
cur.execute(f"""
SELECT COUNT(*) AS cnt
FROM "{rel.sourceTable}" s
WHERE s."{rel.sourceColumn}" IS NOT NULL
AND s."{rel.sourceColumn}" != ''
AND NOT EXISTS (
SELECT 1 FROM "{rel.targetTable}" t
WHERE t."{rel.targetColumn}" = s."{rel.sourceColumn}"
)
""")
wouldDelete = int(cur.fetchone()["cnt"])
else:
if not parentIds:
wouldDelete = sourceRowCount
else:
with conn.cursor() as cur:
cur.execute(f"""
SELECT COUNT(*) AS cnt
FROM "{rel.sourceTable}"
WHERE "{rel.sourceColumn}" IS NOT NULL
AND "{rel.sourceColumn}" != ''
AND "{rel.sourceColumn}" NOT IN (
SELECT unnest(%(ids)s::text[])
)
""", {"ids": list(parentIds)})
wouldDelete = int(cur.fetchone()["cnt"])
if not force and sourceRowCount > 0:
fraction = wouldDelete / sourceRowCount
if fraction >= _MAX_CLEANUP_FRACTION:
raise OrphanCleanupRefused(
f"Refusing cleanup: would delete {wouldDelete} of {sourceRowCount} "
f"non-null rows ({fraction:.0%}) from '{rel.sourceDb}.{rel.sourceTable}'. "
f"Threshold is {_MAX_CLEANUP_FRACTION:.0%}. Use force=True to override."
)
if rel.sourceDb == rel.targetDb:
with conn.cursor() as cur:
cur.execute(f"""
@ -335,12 +533,6 @@ def _cleanOrphans(db: str, table: str, column: str) -> int:
deleted = cur.rowcount
conn.commit()
else:
targetConn = _getConnection(rel.targetDb)
try:
parentIds = _loadParentIds(targetConn, rel.targetTable, rel.targetColumn)
finally:
targetConn.close()
if not parentIds:
with conn.cursor() as cur:
cur.execute(f"""
@ -365,26 +557,56 @@ def _cleanOrphans(db: str, table: str, column: str) -> int:
conn.rollback()
raise
finally:
if targetConn is not None:
try:
targetConn.close()
except Exception:
pass
conn.close()
_invalidateOrphanCache()
logger.info(f"Cleaned {deleted} orphans from {db}.{table}.{column}")
logger.info(
f"Cleaned {deleted} orphans from {db}.{table}.{column} (force={force})"
)
return deleted
def _cleanAllOrphans() -> List[dict]:
"""Clean all detected orphans. Returns list of {db, table, column, deleted}."""
def _cleanAllOrphans(force: bool = False) -> List[dict]:
"""Clean all detected orphans. Returns list of {db, table, column, deleted, [error|skipped]}.
Safety: each individual cleanup re-validates target row counts at delete-time
to avoid cascading wipes (e.g. one delete emptying a target table that the
next iteration depends on). Without force=True, dangerous cleanups are skipped.
"""
orphans = _scanOrphans()
results = []
for orphan in orphans:
if orphan.get("orphanCount", 0) <= 0:
continue
try:
deleted = _cleanOrphans(orphan["sourceDb"], orphan["sourceTable"], orphan["sourceColumn"])
deleted = _cleanOrphans(
orphan["sourceDb"],
orphan["sourceTable"],
orphan["sourceColumn"],
force=force,
)
results.append({
"db": orphan["sourceDb"],
"table": orphan["sourceTable"],
"column": orphan["sourceColumn"],
"deleted": deleted,
})
except OrphanCleanupRefused as e:
logger.warning(
f"Skipping orphan cleanup for {orphan['sourceDb']}.{orphan['sourceTable']}.{orphan['sourceColumn']}: {e}"
)
results.append({
"db": orphan["sourceDb"],
"table": orphan["sourceTable"],
"column": orphan["sourceColumn"],
"deleted": 0,
"skipped": str(e),
})
except Exception as e:
logger.error(
f"Failed to clean orphans for {orphan['sourceDb']}.{orphan['sourceTable']}.{orphan['sourceColumn']}: {e}"
@ -403,3 +625,132 @@ def _invalidateOrphanCache() -> None:
global _orphanCache
with _orphanCacheLock:
_orphanCache = None
# ---------------------------------------------------------------------------
# Listing orphans (for SysAdmin "download / inspect" workflow)
# ---------------------------------------------------------------------------
def _listOrphans(
db: str,
table: str,
column: str,
limit: int = 1000,
) -> List[dict]:
"""Return up to ``limit`` actual orphan source-rows for one FK relationship.
Each entry is ``{"orphanFkValue": str, "rowId": str|None, "row": dict}`` so
the SysAdmin UI can present them as a download (CSV/JSON) for review before
the destructive cleanup is triggered.
"""
relationships = _getFkRelationships()
rel = next(
(r for r in relationships
if r.sourceDb == db and r.sourceTable == table and r.sourceColumn == column),
None,
)
if rel is None:
raise ValueError(f"No FK relationship found for {db}.{table}.{column}")
safeLimit = max(1, min(int(limit), 10000))
sourceConn = _getConnection(rel.sourceDb)
targetConn = None
try:
sourceColumns = _loadPhysicalColumns(sourceConn, rel.sourceTable)
if rel.sourceColumn not in sourceColumns:
return []
if rel.sourceDb == rel.targetDb:
targetColumns = _loadPhysicalColumns(sourceConn, rel.targetTable)
if rel.targetColumn not in targetColumns:
return []
with sourceConn.cursor() as cur:
cur.execute(f"""
SELECT s.*
FROM "{rel.sourceTable}" s
WHERE s."{rel.sourceColumn}" IS NOT NULL
AND s."{rel.sourceColumn}" != ''
AND NOT EXISTS (
SELECT 1 FROM "{rel.targetTable}" t
WHERE t."{rel.targetColumn}" = s."{rel.sourceColumn}"
)
LIMIT %s
""", (safeLimit,))
rows = cur.fetchall()
else:
targetConn = _getConnection(rel.targetDb)
targetColumns = _loadPhysicalColumns(targetConn, rel.targetTable)
if rel.targetColumn not in targetColumns:
return []
parentIds = _loadParentIds(targetConn, rel.targetTable, rel.targetColumn)
with sourceConn.cursor() as cur:
if not parentIds:
cur.execute(f"""
SELECT *
FROM "{rel.sourceTable}"
WHERE "{rel.sourceColumn}" IS NOT NULL
AND "{rel.sourceColumn}" != ''
LIMIT %s
""", (safeLimit,))
else:
cur.execute(f"""
SELECT *
FROM "{rel.sourceTable}"
WHERE "{rel.sourceColumn}" IS NOT NULL
AND "{rel.sourceColumn}" != ''
AND "{rel.sourceColumn}" NOT IN (
SELECT unnest(%(ids)s::text[])
)
LIMIT %(lim)s
""", {"ids": list(parentIds), "lim": safeLimit})
rows = cur.fetchall()
finally:
if targetConn is not None:
try:
targetConn.close()
except Exception:
pass
sourceConn.close()
out: List[dict] = []
for row in rows:
rowDict = {k: _jsonSafe(v) for k, v in dict(row).items()}
out.append(asdict(OrphanRecord(
sourceDb=rel.sourceDb,
sourceTable=rel.sourceTable,
sourceColumn=rel.sourceColumn,
targetDb=rel.targetDb,
targetTable=rel.targetTable,
targetColumn=rel.targetColumn,
orphanFkValue=str(rowDict.get(rel.sourceColumn, "")),
rowId=str(rowDict.get("id")) if rowDict.get("id") is not None else None,
row=rowDict,
)))
return out
def _jsonSafe(v):
"""Coerce psycopg2 row values into JSON-serialisable primitives."""
import datetime
import decimal
import uuid
if v is None or isinstance(v, (str, int, float, bool)):
return v
if isinstance(v, (datetime.datetime, datetime.date, datetime.time)):
return v.isoformat()
if isinstance(v, decimal.Decimal):
return float(v)
if isinstance(v, uuid.UUID):
return str(v)
if isinstance(v, (list, tuple)):
return [_jsonSafe(x) for x in v]
if isinstance(v, dict):
return {str(k): _jsonSafe(val) for k, val in v.items()}
if isinstance(v, (bytes, bytearray, memoryview)):
try:
return bytes(v).decode("utf-8", errors="replace")
except Exception:
return repr(v)
return str(v)

View file

@ -192,3 +192,69 @@ def registerAllFeaturesInCatalog(catalogService) -> Dict[str, bool]:
results[featureName] = False
return results
def syncCatalogFeaturesToDb(catalogService) -> Dict[str, str]:
"""Persist all in-memory feature definitions into the ``Feature`` DB table.
PowerOn discovers Features as Python modules at boot time and registers
them only in the in-memory ``RbacCatalog._featureDefinitions`` dict (see
``registerAllFeaturesInCatalog`` above). However, the ``FeatureInstance``
Pydantic model declares ``featureCode`` as an FK into the ``Feature`` DB
table. If the ``Feature`` table is not kept in sync with the code-side
registry, every ``FeatureInstance`` row appears as a foreign-key orphan
in the SysAdmin DB-health scan -- even though the feature very much
exists at runtime.
This function bridges that gap: after the catalog has been built it walks
every registered feature definition and idempotently upserts it into the
``Feature`` table (insert-if-missing, update label/icon if drifted).
Returns:
Dict ``{featureCode: action}`` with action in
``{"created", "updated", "unchanged", "error"}``.
"""
actions: Dict[str, str] = {}
try:
from modules.security.rootAccess import getRootDbAppConnector
from modules.interfaces.interfaceFeatures import getFeatureInterface
except Exception as e:
logger.error(f"syncCatalogFeaturesToDb: dependency import failed: {e}")
return actions
try:
rootDb = getRootDbAppConnector()
featuresIf = getFeatureInterface(rootDb)
except Exception as e:
logger.error(f"syncCatalogFeaturesToDb: cannot obtain feature interface: {e}")
return actions
try:
definitions = catalogService.getFeatureDefinitions() or []
except Exception as e:
logger.error(f"syncCatalogFeaturesToDb: cannot list feature definitions: {e}")
return actions
for defn in definitions:
code = (defn or {}).get("code")
if not code:
continue
try:
action = featuresIf.upsertFeature(
code=code,
label=defn.get("label") or code,
icon=defn.get("icon") or "",
)
actions[code] = action
except Exception as e:
logger.error(f"syncCatalogFeaturesToDb: failed to upsert {code}: {e}")
actions[code] = "error"
created = sum(1 for v in actions.values() if v == "created")
updated = sum(1 for v in actions.values() if v == "updated")
errors = sum(1 for v in actions.values() if v == "error")
logger.info(
f"Feature DB sync: {len(actions)} definitions processed, "
f"{created} created, {updated} updated, {errors} errors"
)
return actions

94
tests/test_dateRange.py Normal file
View file

@ -0,0 +1,94 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Unit tests for `modules.shared.dateRange`.
Pure-Python, no DB / no API.
"""
from datetime import date, datetime, time as dtTime
import pytest
from fastapi import HTTPException
from modules.shared.dateRange import (
daysInRange,
isoDateRangeToLocalEpoch,
parseIsoDate,
parseIsoDateRange,
)
class TestParseIsoDate:
def test_validIsoDate(self):
assert parseIsoDate("2026-04-15", "dateFrom") == date(2026, 4, 15)
def test_emptyStringRaises400(self):
with pytest.raises(HTTPException) as exc:
parseIsoDate("", "dateFrom")
assert exc.value.status_code == 400
assert "dateFrom" in exc.value.detail
def test_invalidFormatRaises400(self):
with pytest.raises(HTTPException) as exc:
parseIsoDate("15.04.2026", "dateTo")
assert exc.value.status_code == 400
assert "dateTo" in exc.value.detail
assert "15.04.2026" in exc.value.detail
def test_nonStringRaises400(self):
with pytest.raises(HTTPException) as exc:
parseIsoDate(None, "dateFrom") # type: ignore[arg-type]
assert exc.value.status_code == 400
class TestParseIsoDateRange:
def test_validRange(self):
f, t = parseIsoDateRange("2026-04-01", "2026-04-15")
assert f == date(2026, 4, 1)
assert t == date(2026, 4, 15)
def test_sameDayIsValid(self):
f, t = parseIsoDateRange("2026-04-15", "2026-04-15")
assert f == t
def test_invertedRangeRaises400(self):
with pytest.raises(HTTPException) as exc:
parseIsoDateRange("2026-04-15", "2026-04-01")
assert exc.value.status_code == 400
assert "dateFrom must be <= dateTo" in exc.value.detail
class TestIsoDateRangeToLocalEpoch:
def test_inclusiveEndOfDay(self):
"""`dateTo` boundary covers full last day (23:59:59.999999 local)."""
fromTs, toTs = isoDateRangeToLocalEpoch("2026-04-15", "2026-04-15")
startOfDay = datetime.combine(date(2026, 4, 15), dtTime.min).timestamp()
endOfDay = datetime.combine(date(2026, 4, 15), dtTime.max).timestamp()
assert fromTs == startOfDay
assert toTs == endOfDay
# Single-day range covers ~24h - 1 microsecond.
assert (toTs - fromTs) > (24 * 3600 - 1)
def test_multiDayRange(self):
fromTs, toTs = isoDateRangeToLocalEpoch("2026-04-01", "2026-04-03")
# Three local days, end-inclusive: ~3 * 86400 seconds (- 1us).
assert 3 * 86400 - 1 < (toTs - fromTs) < 3 * 86400 + 1
def test_invalidRaises400(self):
with pytest.raises(HTTPException):
isoDateRangeToLocalEpoch("not-a-date", "2026-04-03")
class TestDaysInRange:
def test_singleDayIsOne(self):
assert daysInRange("2026-04-15", "2026-04-15") == 1
def test_threeDaysInclusive(self):
assert daysInRange("2026-04-01", "2026-04-03") == 3
def test_yearSpan(self):
assert daysInRange("2026-01-01", "2026-12-31") == 365
def test_invertedRangeRaises400(self):
with pytest.raises(HTTPException):
daysInRange("2026-04-15", "2026-04-01")