Merge pull request #109 from valueonag/feat/code-editor

Feat/code editor
This commit is contained in:
Patrick Motsch 2026-03-18 23:41:43 +01:00 committed by GitHub
commit eefb37050e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 2561 additions and 330 deletions

View file

@ -4,7 +4,7 @@ import json
import logging
import httpx
import os
from typing import Dict, Any, List, AsyncGenerator, Union
from typing import Dict, Any, List, AsyncGenerator, Optional, Union
from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG
from .aicoreBase import BaseConnectorAi, RateLimitExceededException
@ -295,6 +295,7 @@ class AiAnthropic(BaseConnectorAi):
fullContent = ""
toolUseBlocks: Dict[int, Dict[str, Any]] = {}
currentToolIdx = -1
stopReason: Optional[str] = None
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
if response.status_code != 200:
@ -316,7 +317,16 @@ class AiAnthropic(BaseConnectorAi):
eventType = event.get("type", "")
if eventType == "content_block_start":
if eventType == "error":
errDetail = event.get("error", {})
errMsg = errDetail.get("message", str(errDetail))
errType = errDetail.get("type", "unknown")
logger.error(f"Anthropic stream error event: type={errType}, message={errMsg}")
if "overloaded" in errMsg.lower() or "overloaded" in errType.lower():
raise HTTPException(status_code=500, detail=f"Anthropic API is currently overloaded. Please try again in a few minutes.")
raise HTTPException(status_code=500, detail=f"Anthropic stream error: [{errType}] {errMsg}")
elif eventType == "content_block_start":
block = event.get("content_block", {})
idx = event.get("index", 0)
if block.get("type") == "tool_use":
@ -338,10 +348,22 @@ class AiAnthropic(BaseConnectorAi):
if idx in toolUseBlocks:
toolUseBlocks[idx]["arguments"] += delta.get("partial_json", "")
elif eventType == "message_delta":
delta = event.get("delta", {})
stopReason = delta.get("stop_reason", stopReason)
elif eventType == "message_stop":
break
if not fullContent and not toolUseBlocks:
logger.warning(
f"Anthropic stream returned empty response: model={model.name}, "
f"stopReason={stopReason}"
)
metadata: Dict[str, Any] = {}
if stopReason:
metadata["stopReason"] = stopReason
if toolUseBlocks:
metadata["toolCalls"] = [
{

View file

@ -174,7 +174,11 @@ class AiMistral(BaseConnectorAi):
"temperature": temperature,
"max_tokens": maxTokens
}
if modelCall.tools:
payload["tools"] = modelCall.tools
payload["tool_choice"] = modelCall.toolChoice or "auto"
response = await self.httpClient.post(
model.apiUrl,
json=payload
@ -214,15 +218,20 @@ class AiMistral(BaseConnectorAi):
raise HTTPException(status_code=500, detail=error_message)
responseJson = response.json()
content = responseJson["choices"][0]["message"]["content"]
choiceMessage = responseJson["choices"][0]["message"]
content = choiceMessage.get("content") or ""
metadata = {"response_id": responseJson.get("id", "")}
if choiceMessage.get("tool_calls"):
metadata["toolCalls"] = choiceMessage["tool_calls"]
return AiModelResponse(
content=content,
success=True,
modelId=model.name,
metadata={"response_id": responseJson.get("id", "")}
metadata=metadata,
)
except ContextLengthExceededException:
# Re-raise context length exceptions without wrapping
raise
@ -250,7 +259,12 @@ class AiMistral(BaseConnectorAi):
"stream": True,
}
if modelCall.tools:
payload["tools"] = modelCall.tools
payload["tool_choice"] = modelCall.toolChoice or "auto"
fullContent = ""
toolCallsAccum: Dict[int, Dict[str, Any]] = {}
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
if response.status_code != 200:
@ -280,11 +294,31 @@ class AiMistral(BaseConnectorAi):
fullContent += delta["content"]
yield delta["content"]
for tcDelta in delta.get("tool_calls", []):
idx = tcDelta.get("index", 0)
if idx not in toolCallsAccum:
toolCallsAccum[idx] = {
"id": tcDelta.get("id", ""),
"type": "function",
"function": {"name": "", "arguments": ""},
}
if tcDelta.get("id"):
toolCallsAccum[idx]["id"] = tcDelta["id"]
fn = tcDelta.get("function", {})
if fn.get("name"):
toolCallsAccum[idx]["function"]["name"] = fn["name"]
if fn.get("arguments"):
toolCallsAccum[idx]["function"]["arguments"] += fn["arguments"]
metadata: Dict[str, Any] = {}
if toolCallsAccum:
metadata["toolCalls"] = [toolCallsAccum[i] for i in sorted(toolCallsAccum)]
yield AiModelResponse(
content=fullContent,
success=True,
modelId=model.name,
metadata={},
metadata=metadata,
)
except (RateLimitExceededException, ContextLengthExceededException, HTTPException):

View file

@ -0,0 +1,45 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""FeatureDataSource model for exposing feature instance data to the AI workspace.
A FeatureDataSource links a FeatureInstance table (DATA_OBJECT) to a workspace
so the agent can query structured feature data (e.g. TrusteePosition rows).
"""
from typing import Optional
from pydantic import BaseModel, Field
from modules.shared.attributeUtils import registerModelLabels
from modules.shared.timeUtils import getUtcTimestamp
import uuid
class FeatureDataSource(BaseModel):
"""A feature-instance table attached as data source in the AI workspace."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
featureInstanceId: str = Field(description="FK to FeatureInstance")
featureCode: str = Field(description="Feature code (e.g. trustee, commcoach)")
tableName: str = Field(description="Table name from DATA_OBJECTS meta (e.g. TrusteePosition)")
objectKey: str = Field(description="RBAC object key (e.g. data.feature.trustee.TrusteePosition)")
label: str = Field(description="User-visible label")
mandateId: str = Field(default="", description="Mandate scope")
userId: str = Field(default="", description="Owner user ID")
workspaceInstanceId: str = Field(description="Workspace instance where this source is used")
createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp")
registerModelLabels(
"FeatureDataSource",
{"en": "Feature Data Source", "de": "Feature-Datenquelle", "fr": "Source de données fonctionnalité"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
"featureCode": {"en": "Feature", "de": "Feature", "fr": "Fonctionnalité"},
"tableName": {"en": "Table", "de": "Tabelle", "fr": "Table"},
"objectKey": {"en": "Object Key", "de": "Objekt-Schlüssel", "fr": "Clé objet"},
"label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
"mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
"userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
"workspaceInstanceId": {"en": "Workspace", "de": "Workspace", "fr": "Espace de travail"},
"createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"},
},
)

View file

@ -118,6 +118,13 @@ class BaseAccountingConnector(ABC):
"""Load the vendor list. Override in connectors that support it."""
return []
async def getJournalEntries(self, config: Dict[str, Any], dateFrom: Optional[str] = None, dateTo: Optional[str] = None, accountNumbers: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""Read journal entries from the external system. Each entry should contain:
- externalId, bookingDate, reference, description, currency, totalAmount
- lines: list of {accountNumber, debitAmount, creditAmount, currency, taxCode, costCenter, description}
accountNumbers: pre-fetched account numbers (avoids redundant API call). Override in connectors that support it."""
return []
async def uploadDocument(
self,
config: Dict[str, Any],

View file

@ -0,0 +1,306 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Orchestrates importing accounting data from external systems into TrusteeData* tables.
Flow: load config resolve connector fetch data clear old records write new records compute balances.
"""
import logging
import time
from collections import defaultdict
from typing import Dict, Any, Optional
from .accountingConnectorBase import BaseAccountingConnector
from .accountingRegistry import _getAccountingRegistry
logger = logging.getLogger(__name__)
class AccountingDataSync:
"""Imports accounting data (read-only) from an external system into local TrusteeData* tables."""
def __init__(self, trusteeInterface):
self._if = trusteeInterface
self._registry = _getAccountingRegistry()
async def importData(
self,
featureInstanceId: str,
mandateId: str,
dateFrom: Optional[str] = None,
dateTo: Optional[str] = None,
) -> Dict[str, Any]:
"""Run a full data import for a feature instance.
Returns a summary dict with counts per entity and any errors.
"""
from modules.features.trustee.datamodelFeatureTrustee import (
TrusteeAccountingConfig,
TrusteeDataAccount,
TrusteeDataJournalEntry,
TrusteeDataJournalLine,
TrusteeDataContact,
TrusteeDataAccountBalance,
)
from modules.shared.configuration import decryptValue
summary: Dict[str, Any] = {
"accounts": 0,
"journalEntries": 0,
"journalLines": 0,
"contacts": 0,
"accountBalances": 0,
"errors": [],
"startedAt": time.time(),
}
cfgRecords = self._if.db.getRecordset(
TrusteeAccountingConfig,
recordFilter={"featureInstanceId": featureInstanceId, "isActive": True},
)
if not cfgRecords:
summary["errors"].append("No active accounting configuration found")
return summary
cfgRecord = cfgRecords[0]
connectorType = cfgRecord.get("connectorType", "")
encryptedConfig = cfgRecord.get("encryptedConfig", "")
try:
import json
plainJson = decryptValue(encryptedConfig)
connConfig = json.loads(plainJson) if plainJson else {}
except Exception as e:
summary["errors"].append(f"Failed to decrypt config: {e}")
return summary
connector = self._registry.getConnector(connectorType)
if not connector:
summary["errors"].append(f"Unknown connector type: {connectorType}")
return summary
scope = {"featureInstanceId": featureInstanceId, "mandateId": mandateId}
logger.info(f"AccountingDataSync starting for {featureInstanceId}, connector={connectorType}, dateFrom={dateFrom}, dateTo={dateTo}")
fetchedAccountNumbers: list = []
# 1) Chart of accounts
try:
charts = await connector.getChartOfAccounts(connConfig)
fetchedAccountNumbers = [acc.accountNumber for acc in charts if acc.accountNumber]
self._clearTable(TrusteeDataAccount, featureInstanceId)
for acc in charts:
self._if.db.recordCreate(TrusteeDataAccount, {
"accountNumber": acc.accountNumber,
"label": acc.label,
"accountType": acc.accountType or "",
"currency": "CHF",
"isActive": True,
**scope,
})
summary["accounts"] = len(charts)
except Exception as e:
logger.error(f"Import accounts failed: {e}", exc_info=True)
summary["errors"].append(f"Accounts: {e}")
# 2) Journal entries + lines (pass already-fetched chart to avoid redundant API call)
try:
rawEntries = await connector.getJournalEntries(connConfig, dateFrom=dateFrom, dateTo=dateTo, accountNumbers=fetchedAccountNumbers or None)
self._clearTable(TrusteeDataJournalEntry, featureInstanceId)
self._clearTable(TrusteeDataJournalLine, featureInstanceId)
lineCount = 0
for raw in rawEntries:
import uuid
entryId = str(uuid.uuid4())
self._if.db.recordCreate(TrusteeDataJournalEntry, {
"id": entryId,
"externalId": raw.get("externalId"),
"bookingDate": raw.get("bookingDate"),
"reference": raw.get("reference"),
"description": raw.get("description", ""),
"currency": raw.get("currency", "CHF"),
"totalAmount": float(raw.get("totalAmount", 0)),
**scope,
})
for line in (raw.get("lines") or []):
self._if.db.recordCreate(TrusteeDataJournalLine, {
"journalEntryId": entryId,
"accountNumber": line.get("accountNumber", ""),
"debitAmount": float(line.get("debitAmount", 0)),
"creditAmount": float(line.get("creditAmount", 0)),
"currency": line.get("currency", "CHF"),
"taxCode": line.get("taxCode"),
"costCenter": line.get("costCenter"),
"description": line.get("description", ""),
**scope,
})
lineCount += 1
summary["journalEntries"] = len(rawEntries)
summary["journalLines"] = lineCount
except Exception as e:
logger.error(f"Import journal entries failed: {e}")
summary["errors"].append(f"Journal entries: {e}")
# 3) Contacts (customers + vendors)
try:
self._clearTable(TrusteeDataContact, featureInstanceId)
contactCount = 0
customers = await connector.getCustomers(connConfig)
for c in customers:
self._if.db.recordCreate(TrusteeDataContact, self._mapContact(c, "customer", scope))
contactCount += 1
vendors = await connector.getVendors(connConfig)
for v in vendors:
self._if.db.recordCreate(TrusteeDataContact, self._mapContact(v, "vendor", scope))
contactCount += 1
summary["contacts"] = contactCount
except Exception as e:
logger.error(f"Import contacts failed: {e}", exc_info=True)
summary["errors"].append(f"Contacts: {e}")
# 4) Compute account balances from journal lines
try:
self._clearTable(TrusteeDataAccountBalance, featureInstanceId)
balanceCount = self._computeBalances(featureInstanceId, mandateId)
summary["accountBalances"] = balanceCount
except Exception as e:
logger.error(f"Compute balances failed: {e}")
summary["errors"].append(f"Balances: {e}")
# Update config with last import timestamp
try:
cfgId = cfgRecord.get("id")
if cfgId:
self._if.db.recordModify(TrusteeAccountingConfig, cfgId, {
"lastSyncAt": time.time(),
"lastSyncStatus": "success" if not summary["errors"] else "partial",
"lastSyncErrorMessage": "; ".join(summary["errors"])[:500] if summary["errors"] else None,
})
except Exception:
pass
summary["finishedAt"] = time.time()
summary["durationSeconds"] = round(summary["finishedAt"] - summary["startedAt"], 1)
logger.info(
f"AccountingDataSync completed for {featureInstanceId}: "
f"{summary['accounts']} accounts, {summary['journalEntries']} entries, "
f"{summary['journalLines']} lines, {summary['contacts']} contacts, "
f"{summary['accountBalances']} balances, {len(summary['errors'])} errors, "
f"{summary['durationSeconds']}s"
)
return summary
@staticmethod
def _safeStr(val: Any) -> str:
"""Convert a value to a safe string for DB storage, collapsing nested dicts/lists."""
if val is None:
return ""
if isinstance(val, (dict, list)):
return ""
return str(val)
def _mapContact(self, raw: Dict[str, Any], contactType: str, scope: Dict[str, Any]) -> Dict[str, Any]:
"""Extract contact fields from a raw API dict, handling varying field names across connectors."""
s = self._safeStr
return {
"externalId": s(raw.get("id") or raw.get("Id") or raw.get("customer_nr") or raw.get("vendor_nr") or ""),
"contactType": contactType,
"contactNumber": s(
raw.get("customernumber") or raw.get("customer_nr")
or raw.get("vendornumber") or raw.get("vendor_nr")
or raw.get("nr") or raw.get("ContactNumber")
or raw.get("id") or ""
),
"name": s(raw.get("name") or raw.get("Name") or raw.get("name_1") or ""),
"address": s(raw.get("addr1") or raw.get("address") or raw.get("Address") or ""),
"zip": s(raw.get("zipcode") or raw.get("postcode") or raw.get("Zip") or raw.get("zip") or ""),
"city": s(raw.get("city") or raw.get("City") or ""),
"country": s(raw.get("country") or raw.get("country_id") or raw.get("Country") or ""),
"email": s(raw.get("email") or raw.get("mail") or raw.get("Email") or ""),
"phone": s(raw.get("phone") or raw.get("phone_fixed") or raw.get("Phone") or ""),
"vatNumber": s(raw.get("vat_identifier") or raw.get("vatNumber") or ""),
**scope,
}
def _clearTable(self, model, featureInstanceId: str):
"""Delete all records for this feature instance from a TrusteeData* table."""
records = self._if.db.getRecordset(model, recordFilter={"featureInstanceId": featureInstanceId})
for r in (records or []):
rid = r.get("id") if isinstance(r, dict) else getattr(r, "id", None)
if rid:
try:
self._if.db.recordDelete(model, rid)
except Exception:
pass
def _computeBalances(self, featureInstanceId: str, mandateId: str) -> int:
"""Aggregate journal lines into monthly + annual account balances."""
from modules.features.trustee.datamodelFeatureTrustee import (
TrusteeDataJournalEntry,
TrusteeDataJournalLine,
TrusteeDataAccountBalance,
)
entries = self._if.db.getRecordset(
TrusteeDataJournalEntry,
recordFilter={"featureInstanceId": featureInstanceId},
) or []
entryDates = {}
for e in entries:
eid = e.get("id") if isinstance(e, dict) else getattr(e, "id", None)
bdate = e.get("bookingDate") if isinstance(e, dict) else getattr(e, "bookingDate", None)
if eid and bdate:
entryDates[eid] = bdate
lines = self._if.db.getRecordset(
TrusteeDataJournalLine,
recordFilter={"featureInstanceId": featureInstanceId},
) or []
# key: (accountNumber, year, month)
buckets: Dict[tuple, Dict[str, float]] = defaultdict(lambda: {"debit": 0.0, "credit": 0.0})
for ln in lines:
if isinstance(ln, dict):
jeid = ln.get("journalEntryId", "")
accNo = ln.get("accountNumber", "")
debit = float(ln.get("debitAmount", 0))
credit = float(ln.get("creditAmount", 0))
else:
jeid = getattr(ln, "journalEntryId", "")
accNo = getattr(ln, "accountNumber", "")
debit = float(getattr(ln, "debitAmount", 0))
credit = float(getattr(ln, "creditAmount", 0))
bdate = entryDates.get(jeid, "")
if not accNo or not bdate:
continue
parts = bdate.split("-")
if len(parts) < 2:
continue
year = int(parts[0])
month = int(parts[1])
buckets[(accNo, year, month)]["debit"] += debit
buckets[(accNo, year, month)]["credit"] += credit
buckets[(accNo, year, 0)]["debit"] += debit
buckets[(accNo, year, 0)]["credit"] += credit
count = 0
scope = {"featureInstanceId": featureInstanceId, "mandateId": mandateId}
for (accNo, year, month), totals in buckets.items():
closing = totals["debit"] - totals["credit"]
self._if.db.recordCreate(TrusteeDataAccountBalance, {
"accountNumber": accNo,
"periodYear": year,
"periodMonth": month,
"openingBalance": 0.0,
"debitTotal": round(totals["debit"], 2),
"creditTotal": round(totals["credit"], 2),
"closingBalance": round(closing, 2),
"currency": "CHF",
**scope,
})
count += 1
return count

View file

@ -255,6 +255,60 @@ class AccountingConnectorAbacus(BaseAccountingConnector):
except Exception as e:
return SyncResult(success=False, errorMessage=str(e))
async def getJournalEntries(self, config: Dict[str, Any], dateFrom: Optional[str] = None, dateTo: Optional[str] = None, accountNumbers: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""Read GeneralJournalEntries from Abacus (OData V4, paginated)."""
headers = await self._buildAuthHeaders(config)
if not headers:
return []
filterParts = []
if dateFrom:
filterParts.append(f"JournalDate ge {dateFrom}")
if dateTo:
filterParts.append(f"JournalDate le {dateTo}")
queryParams = ""
if filterParts:
queryParams = "?$filter=" + " and ".join(filterParts)
entries: List[Dict[str, Any]] = []
url: Optional[str] = self._buildEntityUrl(config, f"GeneralJournalEntries{queryParams}")
try:
async with aiohttp.ClientSession() as session:
while url:
async with session.get(url, headers=headers, timeout=aiohttp.ClientTimeout(total=60)) as resp:
if resp.status != 200:
break
data = await resp.json()
for item in data.get("value", []):
lines = []
totalAmt = 0.0
for line in (item.get("Lines") or []):
debit = float(line.get("DebitAmount", 0))
credit = float(line.get("CreditAmount", 0))
lines.append({
"accountNumber": str(line.get("AccountId", "")),
"debitAmount": debit,
"creditAmount": credit,
"description": line.get("Text", ""),
"taxCode": line.get("TaxCode"),
"costCenter": line.get("CostCenterId"),
})
totalAmt += max(debit, credit)
entries.append({
"externalId": str(item.get("Id", "")),
"bookingDate": str(item.get("JournalDate", "")).split("T")[0],
"reference": item.get("Reference", ""),
"description": item.get("Text", ""),
"currency": "CHF",
"totalAmount": totalAmt,
"lines": lines,
})
url = data.get("@odata.nextLink")
except Exception as e:
logger.error(f"Abacus getJournalEntries error: {e}")
return entries
async def getCustomers(self, config: Dict[str, Any]) -> List[Dict[str, Any]]:
headers = await self._buildAuthHeaders(config)
if not headers:

View file

@ -193,6 +193,62 @@ class AccountingConnectorBexio(BaseAccountingConnector):
except Exception as e:
return SyncResult(success=False, errorMessage=str(e))
async def getJournalEntries(self, config: Dict[str, Any], dateFrom: Optional[str] = None, dateTo: Optional[str] = None, accountNumbers: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""Read manual entries from Bexio. API: GET 3.0/accounting/manual-entries"""
try:
accounts = await self._loadRawAccounts(config)
accMap = {acc.get("id"): str(acc.get("account_no", "")) for acc in accounts}
async with aiohttp.ClientSession() as session:
url = self._buildUrl(config, "3.0/accounting/manual-entries")
params: Dict[str, str] = {}
if dateFrom:
params["date_from"] = dateFrom
if dateTo:
params["date_to"] = dateTo
async with session.get(url, headers=self._buildHeaders(config), params=params, timeout=aiohttp.ClientTimeout(total=60)) as resp:
if resp.status != 200:
logger.error(f"Bexio getJournalEntries failed: HTTP {resp.status}")
return []
items = await resp.json()
entries = []
for item in (items if isinstance(items, list) else []):
lines = []
totalAmt = 0.0
for e in (item.get("entries") or []):
amt = float(e.get("amount", 0))
debitAccId = e.get("debit_account_id")
creditAccId = e.get("credit_account_id")
lines.append({
"accountNumber": accMap.get(debitAccId, str(debitAccId or "")),
"debitAmount": amt,
"creditAmount": 0.0,
"description": e.get("description", ""),
"taxCode": str(e.get("tax_id", "")) if e.get("tax_id") else None,
})
if creditAccId and creditAccId != debitAccId:
lines.append({
"accountNumber": accMap.get(creditAccId, str(creditAccId or "")),
"debitAmount": 0.0,
"creditAmount": amt,
"description": e.get("description", ""),
})
totalAmt += amt
entries.append({
"externalId": str(item.get("id", "")),
"bookingDate": item.get("date", ""),
"reference": item.get("reference_nr", ""),
"description": item.get("text", ""),
"currency": "CHF",
"totalAmount": totalAmt,
"lines": lines,
})
return entries
except Exception as e:
logger.error(f"Bexio getJournalEntries error: {e}")
return []
async def getCustomers(self, config: Dict[str, Any]) -> List[Dict[str, Any]]:
try:
async with aiohttp.ClientSession() as session:

View file

@ -150,11 +150,11 @@ class AccountingConnectorRma(BaseAccountingConnector):
charts = []
items = data if isinstance(data, list) else data.get("chart", data.get("row", []))
if not isinstance(items, list):
items = []
items = [items] if isinstance(items, dict) else []
for item in items:
if isinstance(item, dict):
accNo = str(item.get("accno", item.get("account_number", "")))
label = str(item.get("description", item.get("label", "")))
accNo = str(item.get("accno") or item.get("account_number") or item.get("number") or item.get("@accno") or "")
label = str(item.get("description") or item.get("label") or item.get("@description") or "")
rmaLink = item.get("link") or ""
chartType = item.get("charttype") or item.get("category") or ""
if not chartType and rmaLink:
@ -338,6 +338,169 @@ class AccountingConnectorRma(BaseAccountingConnector):
logger.debug("RMA isBookingSynced error: %s trust local", e)
return SyncResult(success=True)
async def getJournalEntries(self, config: Dict[str, Any], dateFrom: Optional[str] = None, dateTo: Optional[str] = None, accountNumbers: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""Read GL entries from RMA.
Strategy: first try GET /gl (bulk), then fall back to iterating
account transactions. Uses pre-fetched accountNumbers if provided.
"""
try:
params: Dict[str, str] = {}
if dateFrom:
params["from_date"] = dateFrom
if dateTo:
params["to_date"] = dateTo
# Try bulk GL endpoint first
bulkEntries = await self._fetchGlBulk(config, params)
if bulkEntries:
return bulkEntries
# Fallback: iterate accounts and fetch transactions
if accountNumbers:
accNums = accountNumbers
else:
chart = await self.getChartOfAccounts(config)
accNums = [acc.accountNumber for acc in chart if acc.accountNumber]
if not accNums:
return []
entriesByRef: Dict[str, Dict[str, Any]] = {}
fetchedCount = 0
emptyCount = 0
errorCount = 0
async with aiohttp.ClientSession() as session:
for accNo in accNums:
url = self._buildUrl(config, f"charts/{accNo}/transactions")
try:
async with session.get(url, headers=self._buildHeaders(config), params=params, timeout=aiohttp.ClientTimeout(total=10)) as resp:
if resp.status != 200:
emptyCount += 1
continue
body = await resp.text()
if not body.strip():
emptyCount += 1
continue
try:
data = json.loads(body)
except Exception:
errorCount += 1
continue
except (asyncio.TimeoutError, Exception):
errorCount += 1
continue
fetchedCount += 1
if isinstance(data, dict):
transactions = data.get("transaction") or data.get("@transaction")
else:
transactions = data
if isinstance(transactions, dict):
transactions = [transactions]
if not isinstance(transactions, list):
continue
for t in transactions:
if not isinstance(t, dict):
continue
ref = t.get("reference") or t.get("@reference") or t.get("batch_number") or str(t.get("id") or "")
transDate = str(t.get("transdate") or t.get("@transdate") or "").split("T")[0]
desc = t.get("description") or t.get("memo") or t.get("@description") or ""
rawAmount = float(t.get("amount") or t.get("@amount") or 0)
debit = rawAmount if rawAmount > 0 else 0.0
credit = abs(rawAmount) if rawAmount < 0 else 0.0
if ref not in entriesByRef:
entriesByRef[ref] = {
"externalId": str(t.get("id") or t.get("@id") or ref),
"bookingDate": transDate,
"reference": ref,
"description": desc,
"currency": "CHF",
"totalAmount": 0.0,
"lines": [],
}
entry = entriesByRef[ref]
entry["lines"].append({
"accountNumber": accNo,
"debitAmount": debit,
"creditAmount": credit,
"description": desc,
})
entry["totalAmount"] += max(debit, credit)
return list(entriesByRef.values())
except Exception as e:
logger.error(f"RMA getJournalEntries error: {e}", exc_info=True)
return []
async def _fetchGlBulk(self, config: Dict[str, Any], params: Dict[str, str]) -> List[Dict[str, Any]]:
"""Try GET /gl to fetch journal entries in bulk (not all RMA versions support this)."""
try:
async with aiohttp.ClientSession() as session:
url = self._buildUrl(config, "gl")
async with session.get(url, headers=self._buildHeaders(config), params=params, timeout=aiohttp.ClientTimeout(total=60)) as resp:
if resp.status != 200:
return []
body = await resp.text()
if not body.strip():
return []
try:
data = json.loads(body)
except Exception:
return []
items = data if isinstance(data, list) else (data.get("gl_batch") or data.get("gl") or data.get("items") or [])
if isinstance(items, dict):
items = [items]
if not isinstance(items, list):
return []
entries = []
for batch in items:
if not isinstance(batch, dict):
continue
transDate = str(batch.get("transdate") or batch.get("date") or "").split("T")[0]
ref = batch.get("batch_number") or batch.get("reference") or str(batch.get("id", ""))
desc = batch.get("description") or batch.get("notes") or ""
rawTxns = batch.get("gl_transactions", {})
txnList = rawTxns.get("gl_transaction") if isinstance(rawTxns, dict) else rawTxns
if isinstance(txnList, dict):
txnList = [txnList]
if not isinstance(txnList, list):
txnList = []
lines = []
totalAmt = 0.0
for t in txnList:
if not isinstance(t, dict):
continue
debit = float(t.get("debit_amount") or 0)
credit = float(t.get("credit_amount") or 0)
lines.append({
"accountNumber": str(t.get("accno", "")),
"debitAmount": debit,
"creditAmount": credit,
"description": t.get("memo", ""),
})
totalAmt += max(debit, credit)
entries.append({
"externalId": str(batch.get("id", ref)),
"bookingDate": transDate,
"reference": ref,
"description": desc,
"currency": batch.get("currency", "CHF"),
"totalAmount": totalAmt,
"lines": lines,
})
return entries
except Exception as e:
logger.debug(f"RMA _fetchGlBulk not available: {e}")
return []
async def pushInvoice(self, config: Dict[str, Any], invoice: Dict[str, Any]) -> SyncResult:
try:
async with aiohttp.ClientSession() as session:
@ -357,8 +520,8 @@ class AccountingConnectorRma(BaseAccountingConnector):
async with session.get(url, headers=self._buildHeaders(config), timeout=aiohttp.ClientTimeout(total=30)) as resp:
if resp.status != 200:
return []
data = await resp.json()
return data if isinstance(data, list) else data.get("customer", [])
data = await self._parseJsonOrXmlList(resp, "customer")
return data
except Exception as e:
logger.error(f"RMA getCustomers error: {e}")
return []
@ -370,12 +533,39 @@ class AccountingConnectorRma(BaseAccountingConnector):
async with session.get(url, headers=self._buildHeaders(config), timeout=aiohttp.ClientTimeout(total=30)) as resp:
if resp.status != 200:
return []
data = await resp.json()
return data if isinstance(data, list) else data.get("vendor", [])
data = await self._parseJsonOrXmlList(resp, "vendor")
return data
except Exception as e:
logger.error(f"RMA getVendors error: {e}")
return []
async def _parseJsonOrXmlList(self, resp: aiohttp.ClientResponse, itemKey: str) -> List[Dict[str, Any]]:
"""Parse RMA response that may be JSON or XML. Returns list of dicts."""
body = await resp.text()
if not body or not body.strip():
return []
try:
data = json.loads(body)
if isinstance(data, list):
return data
if isinstance(data, dict):
items = data.get(itemKey) or data.get("items") or data.get("row") or []
if isinstance(items, dict):
return [items]
return items if isinstance(items, list) else []
return []
except (json.JSONDecodeError, ValueError):
pass
result: List[Dict[str, Any]] = []
ids = re.findall(r"<id>([^<]+)</id>", body)
names = re.findall(r"<name>([^<]+)</name>", body)
for i, rid in enumerate(ids):
entry: Dict[str, Any] = {"id": rid.strip()}
if i < len(names):
entry["name"] = names[i].strip()
result.append(entry)
return result
async def _findBelegByFilename(self, config: Dict[str, Any], session: aiohttp.ClientSession, fileName: str) -> Optional[str]:
"""Try GET /belege (undocumented) to find an existing beleg by filename."""
try:

View file

@ -736,6 +736,177 @@ registerModelLabels(
)
# ── TrusteeData* tables (synced from external accounting apps for analysis) ──
class TrusteeDataAccount(BaseModel):
"""Chart of accounts synced from external accounting system."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
accountNumber: str = Field(description="Account number (e.g. '1020')")
label: str = Field(default="", description="Account name")
accountType: Optional[str] = Field(default=None, description="asset / liability / equity / revenue / expense")
accountGroup: Optional[str] = Field(default=None, description="Account group/category")
currency: str = Field(default="CHF", description="Account currency")
isActive: bool = Field(default=True)
mandateId: Optional[str] = Field(default=None)
featureInstanceId: Optional[str] = Field(default=None)
registerModelLabels(
"TrusteeDataAccount",
{"en": "Account (Synced)", "de": "Konto (Sync)", "fr": "Compte (Sync)"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"accountNumber": {"en": "Account Number", "de": "Kontonummer", "fr": "Numéro de compte"},
"label": {"en": "Name", "de": "Bezeichnung", "fr": "Libellé"},
"accountType": {"en": "Type", "de": "Typ", "fr": "Type"},
"accountGroup": {"en": "Group", "de": "Gruppe", "fr": "Groupe"},
"currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
"isActive": {"en": "Active", "de": "Aktiv", "fr": "Actif"},
"mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
"featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
},
)
class TrusteeDataJournalEntry(BaseModel):
"""Journal entry header synced from external accounting system."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
externalId: Optional[str] = Field(default=None, description="ID in the source system")
bookingDate: Optional[str] = Field(default=None, description="Booking date (YYYY-MM-DD)")
reference: Optional[str] = Field(default=None, description="Booking reference / voucher number")
description: str = Field(default="", description="Booking text")
currency: str = Field(default="CHF")
totalAmount: float = Field(default=0.0, description="Total amount of entry")
mandateId: Optional[str] = Field(default=None)
featureInstanceId: Optional[str] = Field(default=None)
registerModelLabels(
"TrusteeDataJournalEntry",
{"en": "Journal Entry (Synced)", "de": "Buchung (Sync)", "fr": "Écriture (Sync)"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"externalId": {"en": "External ID", "de": "Externe ID", "fr": "ID externe"},
"bookingDate": {"en": "Date", "de": "Datum", "fr": "Date"},
"reference": {"en": "Reference", "de": "Referenz", "fr": "Référence"},
"description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
"currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
"totalAmount": {"en": "Amount", "de": "Betrag", "fr": "Montant"},
"mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
"featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
},
)
class TrusteeDataJournalLine(BaseModel):
"""Journal entry line (debit/credit) synced from external accounting system."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
journalEntryId: str = Field(description="FK → TrusteeDataJournalEntry.id")
accountNumber: str = Field(description="Account number")
debitAmount: float = Field(default=0.0)
creditAmount: float = Field(default=0.0)
currency: str = Field(default="CHF")
taxCode: Optional[str] = Field(default=None)
costCenter: Optional[str] = Field(default=None)
description: str = Field(default="")
mandateId: Optional[str] = Field(default=None)
featureInstanceId: Optional[str] = Field(default=None)
registerModelLabels(
"TrusteeDataJournalLine",
{"en": "Journal Line (Synced)", "de": "Buchungszeile (Sync)", "fr": "Ligne écriture (Sync)"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"journalEntryId": {"en": "Journal Entry", "de": "Buchung", "fr": "Écriture"},
"accountNumber": {"en": "Account", "de": "Konto", "fr": "Compte"},
"debitAmount": {"en": "Debit", "de": "Soll", "fr": "Débit"},
"creditAmount": {"en": "Credit", "de": "Haben", "fr": "Crédit"},
"currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
"taxCode": {"en": "Tax Code", "de": "Steuercode", "fr": "Code TVA"},
"costCenter": {"en": "Cost Center", "de": "Kostenstelle", "fr": "Centre de coûts"},
"description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
"mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
"featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
},
)
class TrusteeDataContact(BaseModel):
"""Customer or vendor synced from external accounting system."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
externalId: Optional[str] = Field(default=None, description="ID in the source system")
contactType: str = Field(default="customer", description="customer / vendor / both")
contactNumber: Optional[str] = Field(default=None, description="Customer/vendor number")
name: str = Field(default="", description="Name / company")
address: Optional[str] = Field(default=None)
zip: Optional[str] = Field(default=None)
city: Optional[str] = Field(default=None)
country: Optional[str] = Field(default=None)
email: Optional[str] = Field(default=None)
phone: Optional[str] = Field(default=None)
vatNumber: Optional[str] = Field(default=None)
mandateId: Optional[str] = Field(default=None)
featureInstanceId: Optional[str] = Field(default=None)
registerModelLabels(
"TrusteeDataContact",
{"en": "Contact (Synced)", "de": "Kontakt (Sync)", "fr": "Contact (Sync)"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"externalId": {"en": "External ID", "de": "Externe ID", "fr": "ID externe"},
"contactType": {"en": "Type", "de": "Typ", "fr": "Type"},
"contactNumber": {"en": "Number", "de": "Nummer", "fr": "Numéro"},
"name": {"en": "Name", "de": "Name", "fr": "Nom"},
"address": {"en": "Address", "de": "Adresse", "fr": "Adresse"},
"zip": {"en": "ZIP", "de": "PLZ", "fr": "NPA"},
"city": {"en": "City", "de": "Ort", "fr": "Ville"},
"country": {"en": "Country", "de": "Land", "fr": "Pays"},
"email": {"en": "Email", "de": "E-Mail", "fr": "E-mail"},
"phone": {"en": "Phone", "de": "Telefon", "fr": "Téléphone"},
"vatNumber": {"en": "VAT Number", "de": "MWST-Nr.", "fr": "N° TVA"},
"mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
"featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
},
)
class TrusteeDataAccountBalance(BaseModel):
"""Account balance per period, derived from journal lines or directly from accounting system."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
accountNumber: str = Field(description="Account number")
periodYear: int = Field(description="Fiscal year")
periodMonth: int = Field(default=0, description="Month (1-12); 0 = annual total")
openingBalance: float = Field(default=0.0)
debitTotal: float = Field(default=0.0)
creditTotal: float = Field(default=0.0)
closingBalance: float = Field(default=0.0)
currency: str = Field(default="CHF")
mandateId: Optional[str] = Field(default=None)
featureInstanceId: Optional[str] = Field(default=None)
registerModelLabels(
"TrusteeDataAccountBalance",
{"en": "Account Balance (Synced)", "de": "Kontosaldo (Sync)", "fr": "Solde compte (Sync)"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"accountNumber": {"en": "Account", "de": "Konto", "fr": "Compte"},
"periodYear": {"en": "Year", "de": "Jahr", "fr": "Année"},
"periodMonth": {"en": "Month", "de": "Monat", "fr": "Mois"},
"openingBalance": {"en": "Opening Balance", "de": "Eröffnungssaldo", "fr": "Solde d'ouverture"},
"debitTotal": {"en": "Debit Total", "de": "Soll-Umsatz", "fr": "Total débit"},
"creditTotal": {"en": "Credit Total", "de": "Haben-Umsatz", "fr": "Total crédit"},
"closingBalance": {"en": "Closing Balance", "de": "Schlusssaldo", "fr": "Solde de clôture"},
"currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
"mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
"featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
},
)
class TrusteeAccountingConfig(BaseModel):
"""Per-instance accounting system configuration with encrypted credentials.

View file

@ -78,6 +78,31 @@ DATA_OBJECTS = [
"label": {"en": "Accounting Sync", "de": "Buchhaltungs-Synchronisation", "fr": "Sync. comptable"},
"meta": {"table": "TrusteeAccountingSync", "fields": ["id", "positionId", "syncStatus", "externalId"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataAccount",
"label": {"en": "Accounts (Synced)", "de": "Kontenplan (Sync)", "fr": "Plan comptable (Sync)"},
"meta": {"table": "TrusteeDataAccount", "fields": ["id", "accountNumber", "label", "accountType", "accountGroup", "currency", "isActive"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataJournalEntry",
"label": {"en": "Journal Entries (Synced)", "de": "Buchungen (Sync)", "fr": "Écritures (Sync)"},
"meta": {"table": "TrusteeDataJournalEntry", "fields": ["id", "externalId", "bookingDate", "reference", "description", "currency", "totalAmount"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataJournalLine",
"label": {"en": "Journal Lines (Synced)", "de": "Buchungszeilen (Sync)", "fr": "Lignes écriture (Sync)"},
"meta": {"table": "TrusteeDataJournalLine", "fields": ["id", "journalEntryId", "accountNumber", "debitAmount", "creditAmount", "currency", "taxCode", "costCenter", "description"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataContact",
"label": {"en": "Contacts (Synced)", "de": "Kontakte (Sync)", "fr": "Contacts (Sync)"},
"meta": {"table": "TrusteeDataContact", "fields": ["id", "externalId", "contactType", "contactNumber", "name", "address", "zip", "city", "country", "email", "phone", "vatNumber"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataAccountBalance",
"label": {"en": "Account Balances (Synced)", "de": "Kontosalden (Sync)", "fr": "Soldes comptes (Sync)"},
"meta": {"table": "TrusteeDataAccountBalance", "fields": ["id", "accountNumber", "periodYear", "periodMonth", "openingBalance", "debitTotal", "creditTotal", "closingBalance", "currency"]}
},
{
"objectKey": "data.feature.trustee.*",
"label": {"en": "All Trustee Data", "de": "Alle Treuhand-Daten", "fr": "Toutes les données fiduciaires"},

View file

@ -1481,6 +1481,63 @@ def get_position_sync_status(
return {"items": items}
# ===== Accounting Data Import =====
@router.post("/{instanceId}/accounting/import-data")
@limiter.limit("3/minute")
async def import_accounting_data(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
data: Dict[str, Any] = Body(default={}),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Import accounting data (chart, journal entries, contacts) from the external system into TrusteeData* tables."""
mandateId = _validateInstanceAccess(instanceId, context)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
from .accounting.accountingDataSync import AccountingDataSync
sync = AccountingDataSync(interface)
dateFrom = data.get("dateFrom")
dateTo = data.get("dateTo")
result = await sync.importData(
featureInstanceId=instanceId,
mandateId=mandateId,
dateFrom=dateFrom,
dateTo=dateTo,
)
return result
@router.get("/{instanceId}/accounting/import-status")
@limiter.limit("30/minute")
def get_import_status(
request: Request,
instanceId: str = Path(..., description="Feature Instance ID"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get counts of imported TrusteeData* records for this instance."""
mandateId = _validateInstanceAccess(instanceId, context)
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
from .datamodelFeatureTrustee import (
TrusteeDataAccount, TrusteeDataJournalEntry, TrusteeDataJournalLine,
TrusteeDataContact, TrusteeDataAccountBalance, TrusteeAccountingConfig,
)
filt = {"featureInstanceId": instanceId}
counts = {
"accounts": len(interface.db.getRecordset(TrusteeDataAccount, recordFilter=filt) or []),
"journalEntries": len(interface.db.getRecordset(TrusteeDataJournalEntry, recordFilter=filt) or []),
"journalLines": len(interface.db.getRecordset(TrusteeDataJournalLine, recordFilter=filt) or []),
"contacts": len(interface.db.getRecordset(TrusteeDataContact, recordFilter=filt) or []),
"accountBalances": len(interface.db.getRecordset(TrusteeDataAccountBalance, recordFilter=filt) or []),
}
cfgRecords = interface.db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": instanceId, "isActive": True})
if cfgRecords:
cfg = cfgRecords[0]
counts["lastSyncAt"] = cfg.get("lastSyncAt")
counts["lastSyncStatus"] = cfg.get("lastSyncStatus")
counts["lastSyncErrorMessage"] = cfg.get("lastSyncErrorMessage")
return counts
# ===== Position-Document Query =====
@router.get("/{instanceId}/positions/document/{documentId}", response_model=List[TrusteePosition])

View file

@ -72,6 +72,7 @@ class WorkspaceInputRequest(BaseModel):
fileIds: List[str] = Field(default_factory=list, description="Referenced file IDs")
uploadedFiles: List[str] = Field(default_factory=list, description="Newly uploaded file IDs")
dataSourceIds: List[str] = Field(default_factory=list, description="Active DataSource IDs")
featureDataSourceIds: List[str] = Field(default_factory=list, description="Attached FeatureDataSource IDs")
voiceMode: bool = Field(default=False, description="Enable voice response")
workflowId: Optional[str] = Field(default=None, description="Continue existing workflow")
userLanguage: str = Field(default="en", description="User language code")
@ -184,6 +185,63 @@ def _buildDataSourceContext(chatService, dataSourceIds: List[str]) -> str:
return "\n".join(parts) if found else ""
def _buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str:
"""Build a description of attached feature data sources for the agent prompt."""
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
from modules.security.rbacCatalog import getCatalogService
from modules.interfaces.interfaceDbApp import getRootInterface
parts = [
"The user has attached data from the following feature instances.",
"Use queryFeatureInstance(featureInstanceId, question) to query this data.",
"",
]
found = False
catalog = getCatalogService()
rootIf = getRootInterface()
instanceCache: Dict[str, Any] = {}
for fdsId in featureDataSourceIds:
try:
records = rootIf.db.getRecordset(FeatureDataSource, recordFilter={"id": fdsId})
if not records:
logger.warning(f"FeatureDataSource {fdsId} not found")
continue
fds = records[0]
found = True
fiId = fds.get("featureInstanceId", "")
featureCode = fds.get("featureCode", "")
tableName = fds.get("tableName", "")
label = fds.get("label", tableName)
if fiId not in instanceCache:
inst = rootIf.getFeatureInstance(fiId)
instanceCache[fiId] = inst
inst = instanceCache.get(fiId)
instanceLabel = getattr(inst, "label", fiId) if inst else fiId
dataObj = catalog.getDataObjects(featureCode)
tableFields = []
for obj in dataObj:
if obj.get("meta", {}).get("table") == tableName:
tableFields = obj.get("meta", {}).get("fields", [])
break
parts.append(
f"- featureInstanceId: {fiId}\n"
f" feature: {featureCode}\n"
f" instance: \"{instanceLabel}\"\n"
f" table: {tableName} ({label})\n"
f" fields: {', '.join(tableFields) if tableFields else 'all'}"
)
except Exception as e:
logger.warning(f"Error loading FeatureDataSource {fdsId}: {e}")
return "\n".join(parts) if found else ""
def _loadConversationHistory(chatInterface, workflowId: str, currentPrompt: str) -> List[Dict[str, str]]:
"""Load prior messages from DB for follow-up context, excluding the current prompt."""
try:
@ -248,7 +306,7 @@ async def _deriveWorkflowName(prompt: str, aiService) -> str:
# ---------------------------------------------------------------------------
@router.post("/{instanceId}/start/stream")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def streamWorkspaceStart(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
@ -264,7 +322,13 @@ async def streamWorkspaceStart(
if userInput.workflowId:
workflow = chatInterface.getWorkflow(userInput.workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {userInput.workflowId} not found")
logger.warning(f"Workflow {userInput.workflowId} not found, creating new one")
workflow = chatInterface.createWorkflow({
"featureInstanceId": instanceId,
"status": "active",
"name": "",
"workflowMode": "Dynamic",
})
else:
workflow = chatInterface.createWorkflow({
"featureInstanceId": instanceId,
@ -290,6 +354,7 @@ async def streamWorkspaceStart(
prompt=userInput.prompt,
fileIds=userInput.fileIds,
dataSourceIds=userInput.dataSourceIds,
featureDataSourceIds=userInput.featureDataSourceIds,
voiceMode=userInput.voiceMode,
instanceId=instanceId,
user=context.user,
@ -344,13 +409,14 @@ async def _runWorkspaceAgent(
prompt: str,
fileIds: List[str],
dataSourceIds: List[str],
voiceMode: bool,
instanceId: str,
user,
mandateId: str,
aiObjects,
chatInterface,
eventManager,
featureDataSourceIds: List[str] = None,
voiceMode: bool = False,
instanceId: str = "",
user=None,
mandateId: str = "",
aiObjects=None,
chatInterface=None,
eventManager=None,
userLanguage: str = "en",
instanceConfig: Dict[str, Any] = None,
allowedProviders: List[str] = None,
@ -396,6 +462,11 @@ async def _runWorkspaceAgent(
if dsInfo:
enrichedPrompt = f"{prompt}\n\n[Active Data Sources]\n{dsInfo}"
if featureDataSourceIds:
fdsInfo = _buildFeatureDataSourceContext(featureDataSourceIds)
if fdsInfo:
enrichedPrompt = f"{enrichedPrompt}\n\n[Attached Feature Data Sources]\n{fdsInfo}"
conversationHistory = _loadConversationHistory(chatInterface, workflowId, prompt)
accumulatedText = ""
@ -525,7 +596,7 @@ async def _runWorkspaceAgent(
# ---------------------------------------------------------------------------
@router.post("/{instanceId}/{workflowId}/stop")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def stopWorkspace(
request: Request,
instanceId: str = Path(...),
@ -549,7 +620,7 @@ async def stopWorkspace(
# ---------------------------------------------------------------------------
@router.get("/{instanceId}/workflows")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def listWorkspaceWorkflows(
request: Request,
instanceId: str = Path(...),
@ -585,7 +656,7 @@ class UpdateWorkflowRequest(BaseModel):
@router.patch("/{instanceId}/workflows/{workflowId}")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def patchWorkspaceWorkflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
@ -620,7 +691,7 @@ async def patchWorkspaceWorkflow(
@router.delete("/{instanceId}/workflows/{workflowId}")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def deleteWorkspaceWorkflow(
request: Request,
instanceId: str = Path(...),
@ -638,7 +709,7 @@ async def deleteWorkspaceWorkflow(
@router.post("/{instanceId}/workflows")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def createWorkspaceWorkflow(
request: Request,
instanceId: str = Path(...),
@ -661,7 +732,7 @@ async def createWorkspaceWorkflow(
@router.get("/{instanceId}/workflows/{workflowId}/messages")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def getWorkspaceMessages(
request: Request,
instanceId: str = Path(...),
@ -691,7 +762,7 @@ async def getWorkspaceMessages(
# ---------------------------------------------------------------------------
@router.get("/{instanceId}/files")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def listWorkspaceFiles(
request: Request,
instanceId: str = Path(...),
@ -723,7 +794,7 @@ async def listWorkspaceFiles(
@router.get("/{instanceId}/files/{fileId}/content")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def getFileContent(
request: Request,
instanceId: str = Path(...),
@ -751,7 +822,7 @@ async def getFileContent(
@router.get("/{instanceId}/folders")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def listWorkspaceFolders(
request: Request,
instanceId: str = Path(...),
@ -775,7 +846,7 @@ async def listWorkspaceFolders(
@router.get("/{instanceId}/datasources")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def listWorkspaceDataSources(
request: Request,
instanceId: str = Path(...),
@ -798,7 +869,7 @@ async def listWorkspaceDataSources(
@router.get("/{instanceId}/connections")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def listWorkspaceConnections(
request: Request,
instanceId: str = Path(...),
@ -843,7 +914,7 @@ class CreateDataSourceRequest(BaseModel):
@router.post("/{instanceId}/datasources")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def createWorkspaceDataSource(
request: Request,
instanceId: str = Path(...),
@ -871,7 +942,7 @@ async def createWorkspaceDataSource(
@router.delete("/{instanceId}/datasources/{dataSourceId}")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def deleteWorkspaceDataSource(
request: Request,
instanceId: str = Path(...),
@ -892,8 +963,204 @@ async def deleteWorkspaceDataSource(
return JSONResponse({"success": True})
# ---- Feature Connections & Feature Data Sources ----
@router.get("/{instanceId}/feature-connections")
@limiter.limit("120/minute")
async def listFeatureConnections(
request: Request,
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""List feature instances the user has access to across ALL mandates."""
_validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.security.rbacCatalog import getCatalogService
from modules.datamodels.datamodelUam import Mandate
rootIf = getRootInterface()
userId = str(context.user.id)
catalog = getCatalogService()
featureCodesWithData = catalog.getFeaturesWithDataObjects()
userMandates = rootIf.getUserMandates(userId)
if not userMandates:
return JSONResponse({"featureConnections": []})
mandateLabels: dict = {}
for um in userMandates:
try:
rows = rootIf.db.getRecordset(Mandate, recordFilter={"id": um.mandateId})
if rows:
m = rows[0]
mandateLabels[um.mandateId] = m.get("label") or m.get("name") or um.mandateId
except Exception:
mandateLabels[um.mandateId] = um.mandateId
items = []
seenIds: set = set()
for um in userMandates:
allInstances = rootIf.getFeatureInstancesByMandate(um.mandateId)
for inst in allInstances:
if inst.id in seenIds:
continue
seenIds.add(inst.id)
if not inst.enabled:
continue
if inst.featureCode not in featureCodesWithData:
continue
featureAccess = rootIf.getFeatureAccess(userId, inst.id)
if not featureAccess or not featureAccess.enabled:
continue
featureDef = catalog.getFeatureDefinition(inst.featureCode) or {}
dataObjects = catalog.getDataObjects(inst.featureCode)
mLabel = mandateLabels.get(inst.mandateId, "")
label = inst.label or inst.featureCode
if mLabel:
label = f"{label} ({mLabel})"
items.append({
"featureInstanceId": inst.id,
"featureCode": inst.featureCode,
"mandateId": inst.mandateId,
"label": label,
"icon": featureDef.get("icon", "mdi-database"),
"tableCount": len(dataObjects),
})
return JSONResponse({"featureConnections": items})
@router.get("/{instanceId}/feature-connections/{fiId}/tables")
@limiter.limit("120/minute")
async def listFeatureConnectionTables(
request: Request,
instanceId: str = Path(...),
fiId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext),
):
"""List data tables (DATA_OBJECTS) for a feature instance, filtered by RBAC."""
_validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.security.rbacCatalog import getCatalogService
rootIf = getRootInterface()
inst = rootIf.getFeatureInstance(fiId)
if not inst:
raise HTTPException(status_code=404, detail="Feature instance not found")
mandateId = str(inst.mandateId) if inst.mandateId else None
catalog = getCatalogService()
try:
from modules.security.rbac import RbacClass
from modules.security.rootAccess import getRootDbAppConnector
dbApp = getRootDbAppConnector()
rbac = RbacClass(dbApp, dbApp=dbApp)
accessible = catalog.getAccessibleDataObjects(
featureCode=inst.featureCode,
rbacInstance=rbac,
user=context.user,
mandateId=mandateId or "",
featureInstanceId=fiId,
)
except Exception:
accessible = catalog.getDataObjects(inst.featureCode)
tables = []
for obj in accessible:
meta = obj.get("meta", {})
tables.append({
"objectKey": obj.get("objectKey", ""),
"tableName": meta.get("table", ""),
"label": obj.get("label", {}),
"fields": meta.get("fields", []),
})
return JSONResponse({"tables": tables})
class CreateFeatureDataSourceRequest(BaseModel):
"""Request body for adding a feature table as data source."""
featureInstanceId: str = Field(description="Feature instance ID")
featureCode: str = Field(description="Feature code")
tableName: str = Field(description="Table name from DATA_OBJECTS")
objectKey: str = Field(description="RBAC object key")
label: str = Field(description="User-visible label")
@router.post("/{instanceId}/feature-datasources")
@limiter.limit("300/minute")
async def createFeatureDataSource(
request: Request,
instanceId: str = Path(...),
body: CreateFeatureDataSourceRequest = Body(...),
context: RequestContext = Depends(getRequestContext),
):
"""Create a FeatureDataSource for this workspace instance."""
_validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
rootIf = getRootInterface()
inst = rootIf.getFeatureInstance(body.featureInstanceId)
mandateId = str(inst.mandateId) if inst else (str(context.mandateId) if context.mandateId else "")
fds = FeatureDataSource(
featureInstanceId=body.featureInstanceId,
featureCode=body.featureCode,
tableName=body.tableName,
objectKey=body.objectKey,
label=body.label,
mandateId=mandateId,
userId=str(context.user.id),
workspaceInstanceId=instanceId,
)
created = rootIf.db.recordCreate(FeatureDataSource, fds.model_dump())
return JSONResponse(created if isinstance(created, dict) else fds.model_dump())
@router.get("/{instanceId}/feature-datasources")
@limiter.limit("300/minute")
async def listFeatureDataSources(
request: Request,
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""List active FeatureDataSources for this workspace instance."""
_validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
rootIf = getRootInterface()
records = rootIf.db.getRecordset(
FeatureDataSource,
recordFilter={"workspaceInstanceId": instanceId},
)
return JSONResponse({"featureDataSources": records or []})
@router.delete("/{instanceId}/feature-datasources/{featureDataSourceId}")
@limiter.limit("300/minute")
async def deleteFeatureDataSource(
request: Request,
instanceId: str = Path(...),
featureDataSourceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Delete a FeatureDataSource."""
_validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
rootIf = getRootInterface()
rootIf.db.recordDelete(FeatureDataSource, featureDataSourceId)
return JSONResponse({"success": True})
@router.get("/{instanceId}/connections/{connectionId}/services")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def listConnectionServices(
request: Request,
instanceId: str = Path(...),
@ -950,7 +1217,7 @@ async def listConnectionServices(
@router.get("/{instanceId}/connections/{connectionId}/browse")
@limiter.limit("60/minute")
@limiter.limit("300/minute")
async def browseConnectionService(
request: Request,
instanceId: str = Path(...),
@ -997,7 +1264,7 @@ async def browseConnectionService(
# ---------------------------------------------------------------------------
@router.post("/{instanceId}/voice/transcribe")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def transcribeVoice(
request: Request,
instanceId: str = Path(...),
@ -1026,7 +1293,7 @@ async def transcribeVoice(
@router.post("/{instanceId}/voice/synthesize")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def synthesizeVoice(
request: Request,
instanceId: str = Path(...),
@ -1046,7 +1313,7 @@ async def synthesizeVoice(
# =========================================================================
@router.get("/{instanceId}/settings/voice")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def getVoiceSettings(
request: Request,
instanceId: str = Path(...),
@ -1071,7 +1338,7 @@ async def getVoiceSettings(
@router.put("/{instanceId}/settings/voice")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def updateVoiceSettings(
request: Request,
instanceId: str = Path(...),
@ -1109,7 +1376,7 @@ async def updateVoiceSettings(
@router.get("/{instanceId}/voice/languages")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def getVoiceLanguages(
request: Request,
instanceId: str = Path(...),
@ -1125,7 +1392,7 @@ async def getVoiceLanguages(
@router.get("/{instanceId}/voice/voices")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def getVoiceVoices(
request: Request,
instanceId: str = Path(...),
@ -1142,7 +1409,7 @@ async def getVoiceVoices(
@router.post("/{instanceId}/voice/test")
@limiter.limit("10/minute")
@limiter.limit("30/minute")
async def testVoice(
request: Request,
instanceId: str = Path(...),
@ -1180,7 +1447,7 @@ async def testVoice(
@router.get("/{instanceId}/pending-edits")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def getPendingEdits(
request: Request,
instanceId: str = Path(...),
@ -1193,7 +1460,7 @@ async def getPendingEdits(
@router.post("/{instanceId}/edit/{editId}/accept")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def acceptEdit(
request: Request,
instanceId: str = Path(...),
@ -1230,7 +1497,7 @@ async def acceptEdit(
@router.post("/{instanceId}/edit/{editId}/reject")
@limiter.limit("30/minute")
@limiter.limit("120/minute")
async def rejectEdit(
request: Request,
instanceId: str = Path(...),
@ -1256,7 +1523,7 @@ async def rejectEdit(
@router.post("/{instanceId}/edit/accept-all")
@limiter.limit("10/minute")
@limiter.limit("30/minute")
async def acceptAllEdits(
request: Request,
instanceId: str = Path(...),
@ -1287,7 +1554,7 @@ async def acceptAllEdits(
@router.post("/{instanceId}/edit/reject-all")
@limiter.limit("10/minute")
@limiter.limit("30/minute")
async def rejectAllEdits(
request: Request,
instanceId: str = Path(...),

View file

@ -441,6 +441,72 @@ def move_folder(
raise HTTPException(status_code=500, detail=str(e))
@router.get("/folders/{folderId}/download")
@limiter.limit("10/minute")
def download_folder(
request: Request,
folderId: str = Path(..., description="ID of the folder to download as ZIP"),
currentUser: User = Depends(getCurrentUser),
context: RequestContext = Depends(getRequestContext)
) -> Response:
"""Download a folder (including subfolders) as a ZIP archive."""
import io
import zipfile
import urllib.parse
try:
mgmt = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None,
)
folder = mgmt.getFolder(folderId)
if not folder:
raise HTTPException(status_code=404, detail=f"Folder {folderId} not found")
folderName = folder.get("name", "download")
def _collectFiles(parentId: str, pathPrefix: str):
"""Recursively collect (zipPath, fileId) tuples."""
entries = []
for f in mgmt._getFilesByCurrentUser(recordFilter={"folderId": parentId}):
fname = f.get("fileName") or f.get("name") or f.get("id", "file")
entries.append((f"{pathPrefix}{fname}", f["id"]))
for sub in mgmt.listFolders(parentId=parentId):
subName = sub.get("name", sub["id"])
entries.extend(_collectFiles(sub["id"], f"{pathPrefix}{subName}/"))
return entries
fileEntries = _collectFiles(folderId, "")
if not fileEntries:
raise HTTPException(status_code=404, detail="Folder is empty")
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
for zipPath, fileId in fileEntries:
data = mgmt.getFileData(fileId)
if data:
zf.writestr(zipPath, data)
buf.seek(0)
zipBytes = buf.getvalue()
encodedName = urllib.parse.quote(f"{folderName}.zip")
return Response(
content=zipBytes,
media_type="application/zip",
headers={
"Content-Disposition": f"attachment; filename*=UTF-8''{encodedName}"
}
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error downloading folder as ZIP: {e}")
raise HTTPException(status_code=500, detail=f"Error downloading folder: {str(e)}")
@router.post("/batch-delete")
@limiter.limit("10/minute")
def batch_delete_items(

View file

@ -119,7 +119,50 @@ class RbacCatalogService:
if featureCode:
return [obj for obj in self._dataObjects.values() if obj["featureCode"] == featureCode]
return list(self._dataObjects.values())
def getAccessibleDataObjects(
self,
featureCode: str,
rbacInstance,
user,
mandateId: str,
featureInstanceId: str,
) -> List[Dict[str, Any]]:
"""Get DATA objects filtered by RBAC read permission for the user.
Args:
featureCode: Feature code to filter by
rbacInstance: RbacClass instance for permission checks
user: User object
mandateId: Mandate scope
featureInstanceId: Feature instance scope
"""
from modules.datamodels.datamodelRbac import AccessRuleContext
allObjects = self.getDataObjects(featureCode)
accessible = []
for obj in allObjects:
objectKey = obj.get("objectKey", "")
try:
perms = rbacInstance.getUserPermissions(
user=user,
context=AccessRuleContext.DATA,
item=objectKey,
mandateId=mandateId,
featureInstanceId=featureInstanceId,
)
if perms.view or perms.read.value != "n":
accessible.append(obj)
except Exception:
pass
return accessible
def getFeaturesWithDataObjects(self) -> List[str]:
"""Get feature codes that have at least one registered DATA object."""
codes = set()
for obj in self._dataObjects.values():
codes.add(obj["featureCode"])
return list(codes)
def getAllObjects(self, featureCode: Optional[str] = None) -> List[Dict[str, Any]]:
"""Get all RBAC objects (UI + RESOURCE + DATA), optionally filtered by feature."""
return self.getUiObjects(featureCode) + self.getResourceObjects(featureCode) + self.getDataObjects(featureCode)

View file

@ -21,6 +21,7 @@ from modules.serviceCenter.services.serviceAgent.conversationManager import (
ConversationManager, buildSystemPrompt
)
from modules.shared.timeUtils import getUtcTimestamp
from modules.shared.jsonUtils import closeJsonStructures
logger = logging.getLogger(__name__)
@ -64,7 +65,12 @@ async def runAgentLoop(
tools = toolRegistry.getTools()
toolDefinitions = toolRegistry.formatToolsForFunctionCalling()
toolsText = toolRegistry.formatToolsForPrompt()
# Text-based tool descriptions are ONLY used as fallback when native function
# calling is unavailable. Including both creates conflicting instructions
# (text ```tool_call format vs native tool_use blocks) and can cause the model
# to respond with plain text instead of actual tool calls.
toolsText = "" if toolDefinitions else toolRegistry.formatToolsForPrompt()
systemPrompt = buildSystemPrompt(tools, toolsText, userLanguage=userLanguage)
conversation = ConversationManager(systemPrompt)
@ -192,6 +198,29 @@ async def runAgentLoop(
toolCalls = _parseToolCalls(aiResponse)
textContent = _extractTextContent(aiResponse)
logger.debug(
f"Round {state.currentRound} AI response: model={aiResponse.modelName}, "
f"toolCalls={len(toolCalls)}, nativeToolCalls={'yes' if aiResponse.toolCalls else 'no'}, "
f"contentLen={len(aiResponse.content)}, streamedLen={len(streamedText)}"
)
# Empty response (no content, no tool calls) = model returned nothing useful.
# Burn the round but let the loop continue so the next iteration can retry
# (the failover mechanism in the AI layer will try alternative models).
if not toolCalls and not textContent and not streamedText:
logger.warning(
f"Round {state.currentRound}: AI returned empty response "
f"(model={aiResponse.modelName}). Retrying next round."
)
conversation.addUserMessage(
"Your previous response was empty. Please use the available tools "
"to accomplish the task. Start by planning the steps, then call the "
"appropriate tools."
)
roundLog.durationMs = int((time.time() - roundStartTime) * 1000)
trace.rounds.append(roundLog)
continue
if textContent and not streamedText:
yield AgentEvent(type=AgentEventTypeEnum.MESSAGE, content=textContent)
@ -228,7 +257,8 @@ async def runAgentLoop(
args=next((tc.args for tc in toolCalls if tc.id == result.toolCallId), {}),
success=result.success,
durationMs=result.durationMs,
error=result.error
error=result.error,
resultData=result.data[:300] if result.data else "",
))
if not result.success:
logger.warning(f"Tool '{result.toolName}' failed: {result.error}")
@ -282,6 +312,8 @@ async def runAgentLoop(
trace.totalCostCHF = state.totalCostCHF
trace.abortReason = state.abortReason
artifactSummary = _buildArtifactSummary(trace.rounds)
yield AgentEvent(
type=AgentEventTypeEnum.AGENT_SUMMARY,
data={
@ -291,7 +323,8 @@ async def runAgentLoop(
"costCHF": round(state.totalCostCHF, 4),
"processingTime": round(state.totalProcessingTime, 2),
"status": state.status.value,
"abortReason": state.abortReason
"abortReason": state.abortReason,
"artifacts": artifactSummary,
}
)
@ -351,46 +384,19 @@ async def _executeToolCalls(toolCalls: List[ToolCallRequest],
def _repairTruncatedJson(raw: str) -> Optional[Dict[str, Any]]:
"""Try to repair truncated JSON from LLM output by closing open brackets/braces.
"""Repair truncated JSON using the shared jsonUtils toolbox.
Uses closeJsonStructures which handles open strings, brackets, braces,
and trailing commas with stack-based structure tracking.
Returns parsed dict on success, None if unrecoverable.
"""
if not raw or not raw.strip().startswith("{"):
return None
openBraces = raw.count("{") - raw.count("}")
openBrackets = raw.count("[") - raw.count("]")
inString = False
lastQuoteEscaped = False
quoteCount = 0
for ch in raw:
if ch == '"' and not lastQuoteEscaped:
quoteCount += 1
inString = not inString
lastQuoteEscaped = (ch == '\\')
candidate = raw
if quoteCount % 2 != 0:
candidate += '"'
candidate += "]" * max(0, openBrackets)
candidate += "}" * max(0, openBraces)
try:
return json.loads(candidate)
except json.JSONDecodeError:
pass
lastComma = candidate.rfind(",")
if lastComma > 0:
trimmed = candidate[:lastComma] + candidate[lastComma + 1:]
try:
return json.loads(trimmed)
except json.JSONDecodeError:
pass
return None
closed = closeJsonStructures(raw)
return json.loads(closed)
except (json.JSONDecodeError, Exception):
return None
def _parseToolCalls(aiResponse: AiCallResponse) -> List[ToolCallRequest]:
@ -409,7 +415,14 @@ def _parseToolCalls(aiResponse: AiCallResponse) -> List[ToolCallRequest]:
parsedArgs = _repairTruncatedJson(rawArgs)
if parsedArgs is None:
logger.warning(f"Unrecoverable truncated JSON for '{tc['function']['name']}': {rawArgs[:200]}")
parsedArgs = {"_parseError": f"Truncated JSON arguments model output was cut off. Raw start: {rawArgs[:120]}"}
parsedArgs = {"_parseError": (
"Your tool call arguments were truncated (output cut off by token limit). "
"The content is too large for a single tool call. Strategies:\n"
"1. For new files: use writeFile(mode='create') with the first part, "
"then writeFile(fileId=..., mode='append') for subsequent parts (~8000 chars each).\n"
"2. For editing existing files: use replaceInFile to change only the specific parts.\n"
"3. For documentation: split into multiple smaller files."
)}
else:
logger.info(f"Repaired truncated JSON for '{tc['function']['name']}'")
else:
@ -471,3 +484,24 @@ def _buildProgressSummary(state: AgentState, reason: str) -> str:
f"- Cost: {state.totalCostCHF:.4f} CHF\n"
f"- Processing time: {state.totalProcessingTime:.1f}s"
)
_ARTIFACT_TOOLS = {"writeFile", "replaceInFile", "deleteFile", "renameFile", "copyFile",
"createFolder", "deleteFolder", "renderDocument", "generateImage"}
def _buildArtifactSummary(roundLogs: List[AgentRoundLog]) -> str:
"""Extract file operations and key results from all agent rounds.
Produces a concise summary persisted as _workflowArtifacts so
follow-up rounds have immediate context (file IDs, names, actions).
"""
ops = []
for log in roundLogs:
for tc in log.toolCalls:
if tc.toolName not in _ARTIFACT_TOOLS or not tc.success:
continue
ops.append(f"- {tc.resultData}" if tc.resultData else f"- {tc.toolName}")
if not ops:
return ""
return "File operations in this run:\n" + "\n".join(ops)

View file

@ -296,6 +296,29 @@ def buildSystemPrompt(
"Think step by step. Call tools when you need information or need to perform actions. "
"When you have enough information to answer, respond directly without calling tools.\n\n"
)
prompt += (
"## Working Guidelines\n\n"
"### Workflow Context\n"
"When continuing a workflow (follow-up message), the Relevant Knowledge section contains "
"artifacts from previous rounds (file IDs, operations). Use this context instead of "
"re-searching or re-listing files.\n\n"
"### Efficient File Editing\n"
"- Use readFile with offset/limit to read specific line ranges of large files.\n"
"- Use searchInFileContent to find text before editing.\n"
"- Use replaceInFile for targeted edits (preferred over rewriting entire files).\n"
"- Use writeFile(mode='overwrite') only when the entire content must change.\n\n"
"### Large Content Strategy\n"
"- For content larger than ~8000 characters: use writeFile(mode='create') for the first "
"part, then writeFile(fileId=..., mode='append') for subsequent parts.\n"
"- Split large documentation into multiple focused files rather than one huge document.\n"
"- Structure outputs so files reference each other (e.g. index.md linking to sections).\n\n"
"### Code Generation\n"
"- Prefer modular file structures over monolithic files.\n"
"- When generating applications, create separate files for logical components.\n"
"- Always plan the structure before writing code.\n\n"
)
if toolsFormatted:
prompt += f"Available Tools:\n{toolsFormatted}\n\n"
prompt += (

View file

@ -111,6 +111,7 @@ class ToolCallLog(BaseModel):
success: bool = True
durationMs: int = 0
error: Optional[str] = None
resultData: str = Field(default="", description="Short result summary for artifact tracking")
class AgentRoundLog(BaseModel):

View file

@ -0,0 +1,253 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Feature Data Sub-Agent.
Specialized mini-agent that queries feature-instance data tables. Receives
schema context (fields, descriptions) for the selected tables and has two
tools: browseTable and queryTable. Runs its own agent loop (max 5 rounds,
low budget) and returns structured results back to the main agent.
"""
import json
import logging
from typing import Any, Callable, Awaitable, Dict, List, Optional
from modules.datamodels.datamodelAi import (
AiCallRequest, AiCallOptions, AiCallResponse, OperationTypeEnum,
)
from modules.serviceCenter.services.serviceAgent.agentLoop import runAgentLoop
from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
AgentConfig, AgentEvent, AgentEventTypeEnum, ToolResult,
)
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.featureDataProvider import FeatureDataProvider
logger = logging.getLogger(__name__)
_MAX_ROUNDS = 5
_MAX_COST_CHF = 0.10
async def runFeatureDataAgent(
question: str,
featureInstanceId: str,
featureCode: str,
selectedTables: List[Dict[str, Any]],
mandateId: str,
userId: str,
aiCallFn: Callable[[AiCallRequest], Awaitable[AiCallResponse]],
dbConnector,
instanceLabel: str = "",
) -> str:
"""Run the feature data sub-agent and return the textual result.
Args:
question: The user/main-agent question to answer using feature data.
featureInstanceId: Feature instance to scope queries.
featureCode: Feature code (trustee, commcoach, ...).
selectedTables: List of DATA_OBJECT dicts the user selected.
mandateId: Mandate scope.
userId: Calling user ID.
aiCallFn: AI call function (with billing).
dbConnector: DatabaseConnector for queries.
instanceLabel: Human-readable instance name for context.
Returns:
Plain-text answer produced by the sub-agent.
"""
provider = FeatureDataProvider(dbConnector)
registry = _buildSubAgentTools(provider, featureInstanceId, mandateId)
for tbl in selectedTables:
meta = tbl.get("meta", {})
tableName = meta.get("table", "")
if tableName:
realCols = provider.getActualColumns(tableName)
if realCols:
meta["fields"] = realCols
schemaContext = _buildSchemaContext(featureCode, instanceLabel, selectedTables)
prompt = f"{schemaContext}\n\nUser question:\n{question}"
config = AgentConfig(maxRounds=_MAX_ROUNDS, maxCostCHF=_MAX_COST_CHF)
async def _getWorkflowCost() -> float:
return 0.0
result = ""
async for event in runAgentLoop(
prompt=prompt,
toolRegistry=registry,
config=config,
aiCallFn=aiCallFn,
getWorkflowCostFn=_getWorkflowCost,
workflowId=f"fda-{featureInstanceId[:8]}",
userId=userId,
featureInstanceId=featureInstanceId,
mandateId=mandateId,
):
if event.type == AgentEventTypeEnum.FINAL and event.content:
result = event.content
elif event.type == AgentEventTypeEnum.MESSAGE and event.content:
result += event.content
return result or "(no data returned by feature agent)"
# ------------------------------------------------------------------
# tool registration
# ------------------------------------------------------------------
def _buildSubAgentTools(
provider: FeatureDataProvider,
featureInstanceId: str,
mandateId: str,
) -> ToolRegistry:
"""Register browseTable and queryTable as sub-agent tools."""
registry = ToolRegistry()
async def _browseTable(args: Dict[str, Any], context: Dict[str, Any]):
tableName = args.get("tableName", "")
limit = args.get("limit", 50)
offset = args.get("offset", 0)
fields = args.get("fields")
if not tableName:
return ToolResult(toolCallId="", toolName="browseTable", success=False, error="tableName required")
result = provider.browseTable(
tableName=tableName,
featureInstanceId=featureInstanceId,
mandateId=mandateId,
fields=fields,
limit=min(limit, 200),
offset=offset,
)
return ToolResult(
toolCallId="", toolName="browseTable",
success="error" not in result,
data=json.dumps(result, default=str, ensure_ascii=False)[:30000],
error=result.get("error"),
)
async def _queryTable(args: Dict[str, Any], context: Dict[str, Any]):
tableName = args.get("tableName", "")
filters = args.get("filters", [])
fields = args.get("fields")
orderBy = args.get("orderBy")
limit = args.get("limit", 50)
offset = args.get("offset", 0)
if not tableName:
return ToolResult(toolCallId="", toolName="queryTable", success=False, error="tableName required")
result = provider.queryTable(
tableName=tableName,
featureInstanceId=featureInstanceId,
mandateId=mandateId,
filters=filters,
fields=fields,
orderBy=orderBy,
limit=min(limit, 200),
offset=offset,
)
return ToolResult(
toolCallId="", toolName="queryTable",
success="error" not in result,
data=json.dumps(result, default=str, ensure_ascii=False)[:30000],
error=result.get("error"),
)
registry.register(
"browseTable", _browseTable,
description="List rows from a feature data table with pagination.",
parameters={
"type": "object",
"properties": {
"tableName": {"type": "string", "description": "Name of the table to browse"},
"fields": {
"type": "array", "items": {"type": "string"},
"description": "Optional list of fields to return (default: all)",
},
"limit": {"type": "integer", "description": "Max rows to return (default 50, max 200)"},
"offset": {"type": "integer", "description": "Row offset for pagination"},
},
"required": ["tableName"],
},
readOnly=True,
)
registry.register(
"queryTable", _queryTable,
description=(
"Query a feature data table with filters, field selection, and ordering. "
"Filters: [{\"field\": \"status\", \"op\": \"=\", \"value\": \"active\"}]. "
"Operators: =, !=, >, <, >=, <=, LIKE, ILIKE, IS NULL, IS NOT NULL."
),
parameters={
"type": "object",
"properties": {
"tableName": {"type": "string", "description": "Name of the table to query"},
"filters": {
"type": "array",
"items": {
"type": "object",
"properties": {
"field": {"type": "string"},
"op": {"type": "string"},
"value": {},
},
},
"description": "Filter conditions",
},
"fields": {
"type": "array", "items": {"type": "string"},
"description": "Optional list of fields to return",
},
"orderBy": {"type": "string", "description": "Field name to order by"},
"limit": {"type": "integer", "description": "Max rows (default 50, max 200)"},
"offset": {"type": "integer", "description": "Row offset"},
},
"required": ["tableName"],
},
readOnly=True,
)
return registry
# ------------------------------------------------------------------
# context building
# ------------------------------------------------------------------
def _buildSchemaContext(
featureCode: str,
instanceLabel: str,
selectedTables: List[Dict[str, Any]],
) -> str:
"""Build a system-level context block describing available tables."""
parts = [
f"You are a data query assistant for the '{featureCode}' feature",
]
if instanceLabel:
parts[0] += f' (instance: "{instanceLabel}")'
parts[0] += "."
parts.append(
"You have access to the following data tables. "
"Use browseTable to list rows and queryTable to filter/search."
)
parts.append("")
for obj in selectedTables:
meta = obj.get("meta", {})
tbl = meta.get("table", "?")
fields = meta.get("fields", [])
label = obj.get("label", {})
labelStr = label.get("en") or label.get("de") or tbl
parts.append(f"Table: {tbl} ({labelStr})")
if fields:
parts.append(f" Fields: {', '.join(fields)}")
parts.append("")
parts.append(
"Answer the user's question using the data from these tables. "
"Be precise, cite row counts, and format data clearly."
)
return "\n".join(parts)

View file

@ -0,0 +1,215 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Generic data provider for querying feature-instance tables.
Uses the RBAC catalog's DATA_OBJECTS metadata (table name, fields) and the
DB connector to execute scoped, read-only queries against any registered
feature table. All queries are automatically filtered by featureInstanceId
and mandateId so data isolation is guaranteed.
"""
import logging
import json
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
_ALLOWED_OPERATORS = {"=", "!=", ">", "<", ">=", "<=", "LIKE", "ILIKE", "IS NULL", "IS NOT NULL"}
class FeatureDataProvider:
"""Reads feature-instance data from the DB using DATA_OBJECTS metadata."""
def __init__(self, dbConnector):
"""
Args:
dbConnector: A connectorDbPostgre.DatabaseConnector with an open connection.
"""
self._db = dbConnector
# ------------------------------------------------------------------
# public API (called by FeatureDataAgent tools)
# ------------------------------------------------------------------
def getAvailableTables(self, featureCode: str) -> List[Dict[str, Any]]:
"""Return DATA_OBJECTS registered for *featureCode*."""
from modules.security.rbacCatalog import getCatalogService
catalog = getCatalogService()
return catalog.getDataObjects(featureCode)
def getTableSchema(self, featureCode: str, tableName: str) -> Optional[Dict[str, Any]]:
"""Return the DATA_OBJECT entry for a specific table."""
for obj in self.getAvailableTables(featureCode):
if obj.get("meta", {}).get("table") == tableName:
return obj
return None
def getActualColumns(self, tableName: str) -> List[str]:
"""Read real column names from PostgreSQL information_schema."""
try:
conn = self._db.connection
with conn.cursor() as cur:
cur.execute(
"SELECT column_name FROM information_schema.columns "
"WHERE table_schema = 'public' AND LOWER(table_name) = LOWER(%s) "
"ORDER BY ordinal_position",
[tableName],
)
cols = [row["column_name"] for row in cur.fetchall()]
return [c for c in cols if not c.startswith("_")]
except Exception as e:
logger.warning(f"getActualColumns({tableName}) failed: {e}")
return []
def browseTable(
self,
tableName: str,
featureInstanceId: str,
mandateId: str,
fields: List[str] = None,
limit: int = 50,
offset: int = 0,
) -> Dict[str, Any]:
"""List rows from a feature table with pagination.
Returns ``{"rows": [...], "total": N, "limit": L, "offset": O}``.
"""
_validateTableName(tableName)
scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId)
try:
conn = self._db.connection
with conn.cursor() as cur:
countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {scopeFilter["where"]}'
cur.execute(countSql, scopeFilter["params"])
total = cur.fetchone()["count"] if cur.rowcount else 0
selectCols = ", ".join(f'"{f}"' for f in fields) if fields else "*"
dataSql = (
f'SELECT {selectCols} FROM "{tableName}" '
f'WHERE {scopeFilter["where"]} '
f'ORDER BY "id" LIMIT %s OFFSET %s'
)
cur.execute(dataSql, scopeFilter["params"] + [limit, offset])
rows = [_serializeRow(dict(r)) for r in cur.fetchall()]
return {"rows": rows, "total": total, "limit": limit, "offset": offset}
except Exception as e:
logger.error(f"browseTable({tableName}) failed: {e}")
return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
def queryTable(
self,
tableName: str,
featureInstanceId: str,
mandateId: str,
filters: List[Dict[str, Any]] = None,
fields: List[str] = None,
orderBy: str = None,
limit: int = 50,
offset: int = 0,
) -> Dict[str, Any]:
"""Query a feature table with optional filters.
``filters`` is a list of ``{"field": "x", "op": "=", "value": "y"}``.
"""
_validateTableName(tableName)
scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId)
extraWhere, extraParams = _buildFilterClauses(filters)
fullWhere = scopeFilter["where"]
allParams = list(scopeFilter["params"])
if extraWhere:
fullWhere += " AND " + extraWhere
allParams.extend(extraParams)
try:
conn = self._db.connection
with conn.cursor() as cur:
countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {fullWhere}'
cur.execute(countSql, allParams)
total = cur.fetchone()["count"] if cur.rowcount else 0
selectCols = ", ".join(f'"{f}"' for f in fields) if fields else "*"
orderClause = f'ORDER BY "{orderBy}"' if orderBy and _isValidIdentifier(orderBy) else 'ORDER BY "id"'
dataSql = (
f'SELECT {selectCols} FROM "{tableName}" '
f'WHERE {fullWhere} {orderClause} LIMIT %s OFFSET %s'
)
cur.execute(dataSql, allParams + [limit, offset])
rows = [_serializeRow(dict(r)) for r in cur.fetchall()]
return {"rows": rows, "total": total, "limit": limit, "offset": offset}
except Exception as e:
logger.error(f"queryTable({tableName}) failed: {e}")
return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
# ------------------------------------------------------------------
# helpers
# ------------------------------------------------------------------
def _validateTableName(tableName: str):
if not tableName or not _isValidIdentifier(tableName):
raise ValueError(f"Invalid table name: {tableName}")
def _isValidIdentifier(name: str) -> bool:
"""Only allow alphanumeric + underscore to prevent SQL injection."""
return name.isidentifier()
def _buildScopeFilter(tableName: str, featureInstanceId: str, mandateId: str) -> Dict[str, Any]:
"""Build the mandatory WHERE clause that scopes rows to the feature instance.
Feature tables usually have either ``featureInstanceId`` or a combination
of ``mandateId`` + an org/context FK. We try ``featureInstanceId`` first,
then fall back to ``mandateId``.
"""
conditions = []
params = []
conditions.append('"featureInstanceId" = %s')
params.append(featureInstanceId)
if mandateId:
conditions.append('"mandateId" = %s')
params.append(mandateId)
return {"where": " AND ".join(conditions), "params": params}
def _buildFilterClauses(filters: Optional[List[Dict[str, Any]]]) -> tuple:
"""Convert agent-provided filter dicts into safe SQL."""
if not filters:
return "", []
parts = []
params = []
for f in filters:
field = f.get("field", "")
op = (f.get("op") or "=").upper()
value = f.get("value")
if not field or not _isValidIdentifier(field):
continue
if op not in _ALLOWED_OPERATORS:
continue
if op in ("IS NULL", "IS NOT NULL"):
parts.append(f'"{field}" {op}')
else:
parts.append(f'"{field}" {op} %s')
params.append(value)
return " AND ".join(parts), params
def _serializeRow(row: Dict[str, Any]) -> Dict[str, Any]:
"""Ensure all values are JSON-serializable."""
for k, v in row.items():
if isinstance(v, (bytes, bytearray)):
row[k] = f"<binary {len(v)} bytes>"
elif hasattr(v, "isoformat"):
row[k] = v.isoformat()
return row