Cleaned backend and enhanced ai call failover modes

This commit is contained in:
ValueOn AG 2026-03-17 22:51:05 +01:00
parent 1d4148e8b5
commit 4e843761a9
42 changed files with 811 additions and 3093 deletions

View file

@ -11,11 +11,36 @@ IMPORTANT: Model Registration Requirements
- If duplicate displayNames are detected during registration, an error will be raised
"""
import re as _re
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, AsyncGenerator, Union
from modules.datamodels.datamodelAi import AiModel, AiModelCall, AiModelResponse
_RETRY_AFTER_PATTERN = _re.compile(r"try again in (\d+(?:\.\d+)?)\s*s", _re.IGNORECASE)
def _parseRetryAfterSeconds(message: str) -> float:
"""Extract retry-after seconds from provider error messages like 'Please try again in 6.558s'."""
match = _RETRY_AFTER_PATTERN.search(message)
return float(match.group(1)) if match else 0.0
class RateLimitExceededException(Exception):
"""Raised when a provider's rate limit (TPM / RPM) is exceeded."""
def __init__(self, message: str = "Rate limit exceeded", retryAfterSeconds: float = 0.0):
super().__init__(message)
if retryAfterSeconds <= 0:
retryAfterSeconds = _parseRetryAfterSeconds(message)
self.retryAfterSeconds = retryAfterSeconds
class ContextLengthExceededException(Exception):
"""Raised when the input exceeds a model's context window."""
pass
class BaseConnectorAi(ABC):
"""
Base class for all AI connectors.

View file

@ -19,26 +19,29 @@ class ModelSelector:
"""Model selector with priority scoring and recent-failure cooldown."""
def __init__(self):
self._failureLog: Dict[str, float] = {}
self._failureLog: Dict[str, Tuple[float, float]] = {}
logger.info("ModelSelector initialized with failure cooldown support")
def reportFailure(self, modelName: str):
def reportFailure(self, modelName: str, cooldownSeconds: float = 0.0):
"""Record that a model just failed (rate limit, error, etc.).
The model will be deprioritized for COOLDOWN_DURATION seconds."""
self._failureLog[modelName] = time.time()
logger.info(f"ModelSelector: Recorded failure for {modelName}, cooldown {_COOLDOWN_DURATION}s")
The model will be deprioritized for *cooldownSeconds* (default: _COOLDOWN_DURATION)."""
if cooldownSeconds <= 0:
cooldownSeconds = _COOLDOWN_DURATION
self._failureLog[modelName] = (time.time(), cooldownSeconds)
logger.info(f"ModelSelector: Recorded failure for {modelName}, cooldown {cooldownSeconds:.1f}s")
def _getCooldownPenalty(self, modelName: str) -> float:
"""Return a score penalty (0.0 = no penalty, large negative = recently failed)."""
failedAt = self._failureLog.get(modelName)
if failedAt is None:
entry = self._failureLog.get(modelName)
if entry is None:
return 0.0
failedAt, cooldown = entry
elapsed = time.time() - failedAt
if elapsed > _COOLDOWN_DURATION:
if elapsed > cooldown:
del self._failureLog[modelName]
return 0.0
remaining = _COOLDOWN_DURATION - elapsed
return -(remaining / _COOLDOWN_DURATION) * 5000.0
remaining = cooldown - elapsed
return -(remaining / cooldown) * 5000.0
def selectModel(self,
prompt: str,

View file

@ -7,7 +7,7 @@ import os
from typing import Dict, Any, List, AsyncGenerator, Union
from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG
from .aicoreBase import BaseConnectorAi
from .aicoreBase import BaseConnectorAi, RateLimitExceededException
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
# Configure logger
@ -203,11 +203,12 @@ class AiAnthropic(BaseConnectorAi):
error_detail = f"Anthropic API error: {response.status_code} - {response.text}"
logger.error(error_detail)
# Provide more specific error messages based on status code
if response.status_code == 429:
raise RateLimitExceededException(
f"Rate limit exceeded for {model.name}: {response.text}"
)
if response.status_code == 529:
error_message = "Anthropic API is currently overloaded. Please try again in a few minutes."
elif response.status_code == 429:
error_message = "Rate limit exceeded. Please wait before making another request."
elif response.status_code == 401:
error_message = "Invalid API key. Please check your Anthropic API configuration."
elif response.status_code == 400:
@ -255,6 +256,8 @@ class AiAnthropic(BaseConnectorAi):
metadata=metadata
)
except (RateLimitExceededException, HTTPException):
raise
except Exception as e:
error_msg = str(e) if str(e) else f"{type(e).__name__}"
error_detail = f"Error calling Anthropic API: {error_msg}"
@ -296,7 +299,12 @@ class AiAnthropic(BaseConnectorAi):
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
if response.status_code != 200:
body = await response.aread()
raise HTTPException(status_code=500, detail=f"Anthropic stream error: {response.status_code} - {body.decode()}")
bodyStr = body.decode()
if response.status_code == 429:
raise RateLimitExceededException(
f"Rate limit exceeded for {model.name}: {bodyStr}"
)
raise HTTPException(status_code=500, detail=f"Anthropic stream error: {response.status_code} - {bodyStr}")
async for line in response.aiter_lines():
if not line.startswith("data: "):
@ -354,7 +362,7 @@ class AiAnthropic(BaseConnectorAi):
metadata=metadata,
)
except HTTPException:
except (RateLimitExceededException, HTTPException):
raise
except Exception as e:
logger.error(f"Error streaming Anthropic API: {e}", exc_info=True)

View file

@ -6,20 +6,11 @@ import httpx
from typing import List, Dict, Any, AsyncGenerator, Union
from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG
from .aicoreBase import BaseConnectorAi
from .aicoreBase import BaseConnectorAi, RateLimitExceededException, ContextLengthExceededException
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
# Configure logger
logger = logging.getLogger(__name__)
class ContextLengthExceededException(Exception):
"""Exception raised when the context length exceeds the model's limit"""
pass
class RateLimitExceededException(Exception):
"""Exception raised when the provider's rate limit (TPM) is exceeded"""
pass
def loadConfigData():
"""Load configuration data for Mistral connector"""
return {
@ -264,7 +255,14 @@ class AiMistral(BaseConnectorAi):
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
if response.status_code != 200:
body = await response.aread()
raise HTTPException(status_code=500, detail=f"Mistral stream error: {response.status_code} - {body.decode()}")
bodyStr = body.decode()
if response.status_code == 429:
try:
errorMsg = _json.loads(bodyStr).get("error", {}).get("message", "Rate limit exceeded")
except (ValueError, KeyError):
errorMsg = f"Rate limit exceeded for {model.name}"
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}: {errorMsg}")
raise HTTPException(status_code=500, detail=f"Mistral stream error: {response.status_code} - {bodyStr}")
async for line in response.aiter_lines():
if not line.startswith("data: "):
@ -289,7 +287,7 @@ class AiMistral(BaseConnectorAi):
metadata={},
)
except HTTPException:
except (RateLimitExceededException, ContextLengthExceededException, HTTPException):
raise
except Exception as e:
logger.error(f"Error streaming Mistral API: {e}")
@ -317,6 +315,17 @@ class AiMistral(BaseConnectorAi):
logger.error(errorMessage)
if response.status_code == 429:
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}")
if response.status_code == 400:
try:
errorData = response.json()
errMsg = errorData.get("error", {}).get("message", "").lower()
errCode = errorData.get("error", {}).get("code", "")
if errCode == "context_length_exceeded" or "too many tokens" in errMsg or "maximum context length" in errMsg:
raise ContextLengthExceededException(
f"Embedding context length exceeded for {model.name}: {errorData.get('error', {}).get('message', '')}"
)
except (ValueError, KeyError):
pass
raise HTTPException(status_code=500, detail=errorMessage)
responseJson = response.json()
@ -334,7 +343,7 @@ class AiMistral(BaseConnectorAi):
},
metadata={"embeddings": embeddings},
)
except RateLimitExceededException:
except (RateLimitExceededException, ContextLengthExceededException):
raise
except Exception as e:
logger.error(f"Error calling Mistral Embedding API: {str(e)}")

View file

@ -6,20 +6,11 @@ import httpx
from typing import List, Dict, Any, AsyncGenerator, Union
from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG
from .aicoreBase import BaseConnectorAi
from .aicoreBase import BaseConnectorAi, RateLimitExceededException, ContextLengthExceededException
from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings, AiCallPromptImage
# Configure logger
logger = logging.getLogger(__name__)
class ContextLengthExceededException(Exception):
"""Exception raised when the context length exceeds the model's limit"""
pass
class RateLimitExceededException(Exception):
"""Exception raised when the provider's rate limit (TPM) is exceeded"""
pass
def loadConfigData():
"""Load configuration data for OpenAI connector"""
return {
@ -316,7 +307,14 @@ class AiOpenai(BaseConnectorAi):
async with self.httpClient.stream("POST", model.apiUrl, json=payload) as response:
if response.status_code != 200:
body = await response.aread()
raise HTTPException(status_code=500, detail=f"OpenAI stream error: {response.status_code} - {body.decode()}")
bodyStr = body.decode()
if response.status_code == 429:
try:
errorMsg = _json.loads(bodyStr).get("error", {}).get("message", "Rate limit exceeded")
except (ValueError, KeyError):
errorMsg = f"Rate limit exceeded for {model.name}"
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}: {errorMsg}")
raise HTTPException(status_code=500, detail=f"OpenAI stream error: {response.status_code} - {bodyStr}")
async for line in response.aiter_lines():
if not line.startswith("data: "):
@ -362,7 +360,7 @@ class AiOpenai(BaseConnectorAi):
metadata=metadata,
)
except HTTPException:
except (RateLimitExceededException, ContextLengthExceededException, HTTPException):
raise
except Exception as e:
logger.error(f"Error streaming OpenAI API: {e}")
@ -390,6 +388,17 @@ class AiOpenai(BaseConnectorAi):
logger.error(errorMessage)
if response.status_code == 429:
raise RateLimitExceededException(f"Rate limit exceeded for {model.name}")
if response.status_code == 400:
try:
errorData = response.json()
errMsg = errorData.get("error", {}).get("message", "").lower()
errCode = errorData.get("error", {}).get("code", "")
if errCode == "context_length_exceeded" or "too many tokens" in errMsg or "maximum context length" in errMsg:
raise ContextLengthExceededException(
f"Embedding context length exceeded for {model.name}: {errorData.get('error', {}).get('message', '')}"
)
except (ValueError, KeyError):
pass
raise HTTPException(status_code=500, detail=errorMessage)
responseJson = response.json()

View file

@ -8,7 +8,16 @@ All ServiceAdapters share the same access token from the UserConnection.
"""
from abc import ABC, abstractmethod
from typing import List, Optional
from dataclasses import dataclass, field
from typing import List, Optional, Union
@dataclass
class DownloadResult:
"""Rich return type for ServiceAdapter.download() when metadata is available."""
data: bytes = field(default=b"", repr=False)
fileName: str = ""
mimeType: str = ""
class ServiceAdapter(ABC):
@ -20,8 +29,8 @@ class ServiceAdapter(ABC):
...
@abstractmethod
async def download(self, path: str) -> bytes:
"""Download a file and return its content bytes."""
async def download(self, path: str) -> Union[bytes, DownloadResult]:
"""Download a file. Return bytes or DownloadResult with metadata."""
...
@abstractmethod

View file

@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional
import aiohttp
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter, DownloadResult
from modules.datamodels.datamodelDataSource import ExternalEntry
logger = logging.getLogger(__name__)
@ -192,8 +192,41 @@ class GmailAdapter(ServiceAdapter):
))
return entries
async def download(self, path: str) -> bytes:
return b""
async def download(self, path: str) -> DownloadResult:
"""Download a Gmail message as RFC 822 EML via format=raw."""
import base64
import re
cleanPath = (path or "").strip("/")
msgId = cleanPath.split("/")[-1] if cleanPath else ""
if not msgId:
return DownloadResult()
url = f"{_GMAIL_BASE}/users/me/messages/{msgId}?format=raw"
result = await _googleGet(self._token, url)
if "error" in result:
return DownloadResult()
rawB64 = result.get("raw", "")
if not rawB64:
return DownloadResult()
emlBytes = base64.urlsafe_b64decode(rawB64)
metaUrl = f"{_GMAIL_BASE}/users/me/messages/{msgId}?format=metadata&metadataHeaders=Subject"
meta = await _googleGet(self._token, metaUrl)
subject = msgId
if "error" not in meta:
for h in meta.get("payload", {}).get("headers", []):
if h.get("name", "").lower() == "subject":
subject = h.get("value", msgId)
break
safeName = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "_", subject)[:80].strip(". ") or "email"
return DownloadResult(
data=emlBytes,
fileName=f"{safeName}.eml",
mimeType="message/rfc822",
)
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
return {"error": "Gmail upload not applicable"}

View file

@ -11,7 +11,7 @@ import aiohttp
import asyncio
from typing import Dict, Any, List, Optional
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter
from modules.connectors.connectorProviderBase import ProviderConnector, ServiceAdapter, DownloadResult
from modules.datamodels.datamodelDataSource import ExternalEntry
logger = logging.getLogger(__name__)
@ -256,14 +256,24 @@ class OutlookAdapter(_GraphApiMixin, ServiceAdapter):
for m in result.get("value", [])
]
async def download(self, path: str) -> bytes:
"""Download a mail message as JSON bytes."""
import json
async def download(self, path: str) -> DownloadResult:
"""Download a mail message as RFC 822 EML via Graph API $value endpoint."""
import re
messageId = path.strip("/").split("/")[-1]
result = await self._graphGet(f"me/messages/{messageId}")
if "error" in result:
return b""
return json.dumps(result, ensure_ascii=False).encode("utf-8")
meta = await self._graphGet(f"me/messages/{messageId}?$select=subject")
subject = meta.get("subject", messageId) if "error" not in meta else messageId
safeName = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "_", subject)[:80].strip(". ") or "email"
emlBytes = await self._graphDownload(f"me/messages/{messageId}/$value")
if not emlBytes:
return DownloadResult()
return DownloadResult(
data=emlBytes,
fileName=f"{safeName}.eml",
mimeType="message/rfc822",
)
async def upload(self, path: str, data: bytes, fileName: str) -> dict:
"""Not applicable for Outlook in the file sense."""

View file

@ -119,7 +119,7 @@ class BillingTransaction(BaseModel):
# Context for workflow transactions
workflowId: Optional[str] = Field(None, description="Workflow ID (for WORKFLOW transactions)")
featureInstanceId: Optional[str] = Field(None, description="Feature instance ID")
featureCode: Optional[str] = Field(None, description="Feature code (e.g., chatplayground, automation)")
featureCode: Optional[str] = Field(None, description="Feature code (e.g., automation)")
aicoreProvider: Optional[str] = Field(None, description="AICore provider (anthropic, openai, etc.)")
aicoreModel: Optional[str] = Field(None, description="AICore model name (e.g., claude-4-sonnet, gpt-4o)")
createdByUserId: Optional[str] = Field(None, description="User who created/caused this transaction")
@ -224,7 +224,7 @@ class UsageStatistics(BaseModel):
# Breakdown by feature
costByFeature: Dict[str, float] = Field(
default_factory=dict,
description="Cost breakdown by feature (e.g., {'chatplayground': 15.00, 'automation': 5.80})"
description="Cost breakdown by feature (e.g., {'automation': 5.80, 'workspace': 3.20})"
)

View file

@ -247,8 +247,6 @@ class WorkflowModeEnum(str, Enum):
WORKFLOW_DYNAMIC = "Dynamic"
WORKFLOW_AUTOMATION = "Automation"
WORKFLOW_CHATBOT = "Chatbot"
WORKFLOW_CODEEDITOR = "CodeEditor"
WORKFLOW_REACT = "React" # Legacy mode - kept for backward compatibility
registerModelLabels(
@ -258,8 +256,6 @@ registerModelLabels(
"WORKFLOW_DYNAMIC": {"en": "Dynamic", "fr": "Dynamique"},
"WORKFLOW_AUTOMATION": {"en": "Automation", "fr": "Automatisation"},
"WORKFLOW_CHATBOT": {"en": "Chatbot", "fr": "Chatbot"},
"WORKFLOW_CODEEDITOR": {"en": "Code Editor", "fr": "Éditeur de code"},
"WORKFLOW_REACT": {"en": "React (Legacy)", "fr": "React (Hérité)"},
},
)
@ -298,10 +294,6 @@ class ChatWorkflow(BaseModel):
"value": WorkflowModeEnum.WORKFLOW_CHATBOT.value,
"label": {"en": "Chatbot", "fr": "Chatbot"},
},
{
"value": WorkflowModeEnum.WORKFLOW_REACT.value,
"label": {"en": "React (Legacy)", "fr": "React (Hérité)"},
},
]})
maxSteps: int = Field(default=10, description="Maximum number of iterations in dynamic mode", json_schema_extra={"frontend_type": "integer", "frontend_readonly": False, "frontend_required": False})
expectedFormats: Optional[List[str]] = Field(None, description="List of expected file format extensions from user request (e.g., ['xlsx', 'pdf']). Extracted during intent analysis.", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})

View file

@ -17,7 +17,7 @@ from modules.features.automation.interfaceFeatureAutomation import getInterface
from modules.features.automation.mainAutomation import getAutomationServices
from modules.auth import limiter, getRequestContext, RequestContext
from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition, AutomationTemplate
from modules.datamodels.datamodelChat import ChatWorkflow, ChatMessage, ChatLog
from modules.datamodels.datamodelChat import ChatWorkflow, ChatMessage, ChatLog, UserInputRequest, WorkflowModeEnum
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.shared.attributeUtils import getModelAttributeDefinitions
from modules.interfaces import interfaceDbChat
@ -235,7 +235,7 @@ def get_available_actions(
# -----------------------------------------------------------------------------
# Workflow routes under /{instanceId}/workflows/ (instance-scoped, same as chatplayground)
# Workflow routes under /{instanceId}/workflows/ (instance-scoped)
# -----------------------------------------------------------------------------
def _validateAutomationInstanceAccess(instanceId: str, context: RequestContext) -> Optional[str]:
@ -854,6 +854,46 @@ def delete_automation(
detail=f"Error deleting automation: {str(e)}"
)
@router.post("/{instanceId}/start", response_model=ChatWorkflow)
@limiter.limit("120/minute")
async def start_automation_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"),
workflowMode: WorkflowModeEnum = Query(..., description="Workflow mode: 'Dynamic' or 'Automation' (mandatory)"),
userInput: UserInputRequest = Body(...),
context: RequestContext = Depends(getRequestContext)
) -> ChatWorkflow:
"""Start a new workflow or continue an existing one."""
try:
from modules.workflows.automation import chatStart
mandateId = _validateAutomationInstanceAccess(instanceId, context)
services = getAutomationServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
services.featureCode = "automation"
if hasattr(userInput, 'allowedProviders') and userInput.allowedProviders:
services.allowedProviders = userInput.allowedProviders
workflow = await chatStart(
context.user,
userInput,
workflowMode,
workflowId,
mandateId=mandateId,
featureInstanceId=instanceId,
featureCode="automation",
services=services,
)
return workflow
except HTTPException:
raise
except Exception as e:
logger.error(f"Error in start_automation_workflow: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{automationId}/execute", response_model=ChatWorkflow)
@limiter.limit("5/minute")
async def execute_automation_route(

View file

@ -1,6 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Chat Playground Feature Container.
Provides workflow-based chat playground functionality.
"""

View file

@ -1,137 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Chat Playground Feature Interface.
Wrapper around interfaceDbChat with feature instance context.
"""
import logging
from typing import Dict, Any, List, Optional
from modules.datamodels.datamodelUam import User
from modules.interfaces import interfaceDbChat
logger = logging.getLogger(__name__)
# Feature code constant
FEATURE_CODE = "chatplayground"
# Singleton instances cache
_instances: Dict[str, "ChatPlaygroundObjects"] = {}
def getInterface(currentUser: User, mandateId: str = None, featureInstanceId: str = None) -> "ChatPlaygroundObjects":
"""
Factory function to get or create a ChatPlaygroundObjects instance.
Uses singleton pattern per user context.
Args:
currentUser: Current user object
mandateId: Mandate ID
featureInstanceId: Feature instance ID
Returns:
ChatPlaygroundObjects instance
"""
cacheKey = f"{currentUser.id}_{mandateId}_{featureInstanceId}"
if cacheKey not in _instances:
_instances[cacheKey] = ChatPlaygroundObjects(currentUser, mandateId, featureInstanceId)
else:
# Update context if needed
_instances[cacheKey].setUserContext(currentUser, mandateId, featureInstanceId)
return _instances[cacheKey]
class ChatPlaygroundObjects:
"""
Chat Playground feature interface.
Wraps the shared interfaceDbChat with feature instance context.
"""
FEATURE_CODE = FEATURE_CODE
def __init__(self, currentUser: User, mandateId: str = None, featureInstanceId: str = None):
"""
Initialize the Chat Playground interface.
Args:
currentUser: Current user object
mandateId: Mandate ID
featureInstanceId: Feature instance ID
"""
self.currentUser = currentUser
self.mandateId = mandateId
self.featureInstanceId = featureInstanceId
# Get the underlying chat interface
self._chatInterface = interfaceDbChat.getInterface(
currentUser,
mandateId=mandateId,
featureInstanceId=featureInstanceId
)
def setUserContext(self, currentUser: User, mandateId: str = None, featureInstanceId: str = None):
"""
Update the user context.
Args:
currentUser: Current user object
mandateId: Mandate ID
featureInstanceId: Feature instance ID
"""
self.currentUser = currentUser
self.mandateId = mandateId
self.featureInstanceId = featureInstanceId
# Update underlying interface
self._chatInterface = interfaceDbChat.getInterface(
currentUser,
mandateId=mandateId,
featureInstanceId=featureInstanceId
)
# =========================================================================
# Delegated methods from interfaceDbChat
# =========================================================================
def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]:
"""Get a workflow by ID."""
return self._chatInterface.getWorkflow(workflowId)
def getWorkflows(self, pagination=None) -> Dict[str, Any]:
"""Get all workflows with pagination."""
return self._chatInterface.getWorkflows(pagination=pagination)
def getUnifiedChatData(self, workflowId: str, afterTimestamp: float = None) -> Dict[str, Any]:
"""Get unified chat data for a workflow."""
return self._chatInterface.getUnifiedChatData(workflowId, afterTimestamp)
def createWorkflow(self, workflow) -> Dict[str, Any]:
"""Create a new workflow."""
return self._chatInterface.createWorkflow(workflow)
def updateWorkflow(self, workflowId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Update a workflow."""
return self._chatInterface.updateWorkflow(workflowId, updates)
def deleteWorkflow(self, workflowId: str) -> bool:
"""Delete a workflow."""
return self._chatInterface.deleteWorkflow(workflowId)
def getMessages(self, workflowId: str) -> List[Dict[str, Any]]:
"""Get messages for a workflow."""
return self._chatInterface.getMessages(workflowId)
def createMessage(self, message) -> Dict[str, Any]:
"""Create a new message."""
return self._chatInterface.createMessage(message)
def getLogs(self, workflowId: str) -> List[Dict[str, Any]]:
"""Get logs for a workflow."""
return self._chatInterface.getLogs(workflowId)
def createLog(self, log) -> Dict[str, Any]:
"""Create a new log entry."""
return self._chatInterface.createLog(log)

View file

@ -1,381 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Chat Playground Feature Container - Main Module.
Handles feature initialization and RBAC catalog registration.
"""
import logging
from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
# Feature metadata
FEATURE_CODE = "chatplayground"
FEATURE_LABEL = {"en": "Chat Playground", "de": "Chat Playground", "fr": "Chat Playground"}
FEATURE_ICON = "mdi-message-text"
# UI Objects for RBAC catalog
UI_OBJECTS = [
{
"objectKey": "ui.feature.chatplayground.playground",
"label": {"en": "Playground", "de": "Playground", "fr": "Playground"},
"meta": {"area": "playground"}
},
{
"objectKey": "ui.feature.chatplayground.workflows",
"label": {"en": "Workflows", "de": "Workflows", "fr": "Workflows"},
"meta": {"area": "workflows"}
},
]
# Resource Objects for RBAC catalog
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.chatplayground.start",
"label": {"en": "Start Workflow", "de": "Workflow starten", "fr": "Démarrer workflow"},
"meta": {"endpoint": "/api/chatplayground/{instanceId}/start", "method": "POST"}
},
{
"objectKey": "resource.feature.chatplayground.stop",
"label": {"en": "Stop Workflow", "de": "Workflow stoppen", "fr": "Arrêter workflow"},
"meta": {"endpoint": "/api/chatplayground/{instanceId}/workflows/{workflowId}/stop", "method": "POST"}
},
{
"objectKey": "resource.feature.chatplayground.chatData",
"label": {"en": "Get Chat Data", "de": "Chat-Daten abrufen", "fr": "Récupérer données chat"},
"meta": {"endpoint": "/api/chatplayground/{instanceId}/workflows/{workflowId}/chatData", "method": "GET"}
},
]
# Service requirements - services this feature needs from the service center
# Same as automation: chatplayground runs the same WorkflowManager and workflow methods
REQUIRED_SERVICES = [
{"serviceKey": "chat", "meta": {"usage": "Workflow CRUD, messages, logs"}},
{"serviceKey": "ai", "meta": {"usage": "AI planning for workflow execution"}},
{"serviceKey": "utils", "meta": {"usage": "Timestamps, utilities"}},
{"serviceKey": "billing", "meta": {"usage": "AI call billing"}},
{"serviceKey": "extraction", "meta": {"usage": "Workflow method actions"}},
{"serviceKey": "sharepoint", "meta": {"usage": "SharePoint actions (listDocuments, uploadDocument, etc.)"}},
{"serviceKey": "generation", "meta": {"usage": "Action completion messages, document creation from results"}},
]
# Template roles for this feature
# Role names MUST follow convention: {featureCode}-{roleName}
TEMPLATE_ROLES = [
{
"roleLabel": "chatplayground-viewer",
"description": {
"en": "Chat Playground Viewer - View chat playground (read-only)",
"de": "Chat Playground Betrachter - Chat Playground ansehen (nur lesen)",
"fr": "Visualiseur Chat Playground - Consulter le chat playground (lecture seule)"
},
"accessRules": [
# UI: only playground view, NO workflows
{"context": "UI", "item": "ui.feature.chatplayground.playground", "view": True},
# RESOURCE: NO access (viewer cannot start/stop/access chat data)
# DATA access (own records, read-only)
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
]
},
{
"roleLabel": "chatplayground-user",
"description": {
"en": "Chat Playground User - Use chat playground and workflows",
"de": "Chat Playground Benutzer - Chat Playground und Workflows nutzen",
"fr": "Utilisateur Chat Playground - Utiliser le chat playground et les workflows"
},
"accessRules": [
# UI: full access to all views
{"context": "UI", "item": "ui.feature.chatplayground.playground", "view": True},
{"context": "UI", "item": "ui.feature.chatplayground.workflows", "view": True},
# Resource access: can start/stop workflows and access chat data
{"context": "RESOURCE", "item": "resource.feature.chatplayground.start", "view": True},
{"context": "RESOURCE", "item": "resource.feature.chatplayground.stop", "view": True},
{"context": "RESOURCE", "item": "resource.feature.chatplayground.chatData", "view": True},
# DATA access (own records)
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
]
},
{
"roleLabel": "chatplayground-admin",
"description": {
"en": "Chat Playground Admin - Full access to chat playground",
"de": "Chat Playground Admin - Vollzugriff auf Chat Playground",
"fr": "Administrateur Chat Playground - Accès complet au chat playground"
},
"accessRules": [
# Full UI access
{"context": "UI", "item": None, "view": True},
# Full resource access
{"context": "RESOURCE", "item": None, "view": True},
# Full DATA access
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
]
},
]
def getRequiredServiceKeys() -> List[str]:
"""Return list of service keys this feature requires."""
return [s["serviceKey"] for s in REQUIRED_SERVICES]
def getChatplaygroundServices(
user,
mandateId: Optional[str] = None,
featureInstanceId: Optional[str] = None,
workflow=None,
) -> "_ChatplaygroundServiceHub":
"""
Get a service hub for the chatplayground feature using the service center.
Resolves only the services declared in REQUIRED_SERVICES.
No legacy fallback - service center only.
Returns a hub-like object with: chat, ai, utils, billing, extraction,
sharepoint, rbac, interfaceDbApp, interfaceDbComponent, interfaceDbChat.
"""
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
_workflow = workflow
if _workflow is None:
_workflow = type("_Placeholder", (), {"featureCode": FEATURE_CODE})()
ctx = ServiceCenterContext(
user=user,
mandate_id=mandateId,
feature_instance_id=featureInstanceId,
workflow=_workflow,
)
hub = _ChatplaygroundServiceHub()
hub.user = user
hub.mandateId = mandateId
hub.featureInstanceId = featureInstanceId
hub.workflow = workflow
hub.featureCode = FEATURE_CODE
hub.allowedProviders = None
for spec in REQUIRED_SERVICES:
key = spec["serviceKey"]
try:
svc = getService(key, ctx)
setattr(hub, key, svc)
except Exception as e:
logger.warning(f"Could not resolve service '{key}' for chatplayground: {e}")
setattr(hub, key, None)
# Copy interfaces from chat service for WorkflowManager compatibility
if hub.chat:
hub.interfaceDbApp = getattr(hub.chat, "interfaceDbApp", None)
hub.interfaceDbComponent = getattr(hub.chat, "interfaceDbComponent", None)
hub.interfaceDbChat = getattr(hub.chat, "interfaceDbChat", None)
# RBAC for MethodBase action permission checks (workflow methods)
hub.rbac = getattr(hub.interfaceDbApp, "rbac", None) if hub.interfaceDbApp else None
return hub
class _ChatplaygroundServiceHub:
"""Lightweight hub exposing only services required by the chatplayground feature."""
user = None
mandateId = None
featureInstanceId = None
workflow = None
featureCode = "chatplayground"
allowedProviders = None
interfaceDbApp = None
interfaceDbComponent = None
interfaceDbChat = None
rbac = None
chat = None
ai = None
utils = None
billing = None
extraction = None
sharepoint = None
def getFeatureDefinition() -> Dict[str, Any]:
"""Return the feature definition for registration."""
return {
"code": FEATURE_CODE,
"label": FEATURE_LABEL,
"icon": FEATURE_ICON,
"autoCreateInstance": True, # Automatically create instance in root mandate during bootstrap
}
def getUiObjects() -> List[Dict[str, Any]]:
"""Return UI objects for RBAC catalog registration."""
return UI_OBJECTS
def getResourceObjects() -> List[Dict[str, Any]]:
"""Return resource objects for RBAC catalog registration."""
return RESOURCE_OBJECTS
def getTemplateRoles() -> List[Dict[str, Any]]:
"""Return template roles for this feature."""
return TEMPLATE_ROLES
def registerFeature(catalogService) -> bool:
"""
Register this feature's RBAC objects in the catalog.
Args:
catalogService: The RBAC catalog service instance
Returns:
True if registration was successful
"""
try:
# Register UI objects
for uiObj in UI_OBJECTS:
catalogService.registerUiObject(
featureCode=FEATURE_CODE,
objectKey=uiObj["objectKey"],
label=uiObj["label"],
meta=uiObj.get("meta")
)
# Register Resource objects
for resObj in RESOURCE_OBJECTS:
catalogService.registerResourceObject(
featureCode=FEATURE_CODE,
objectKey=resObj["objectKey"],
label=resObj["label"],
meta=resObj.get("meta")
)
# Sync template roles to database
_syncTemplateRolesToDb()
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI objects and {len(RESOURCE_OBJECTS)} resource objects")
return True
except Exception as e:
logger.error(f"Failed to register feature '{FEATURE_CODE}': {e}")
return False
def _syncTemplateRolesToDb() -> int:
"""
Sync template roles and their AccessRules to the database.
Creates global template roles (mandateId=None) if they don't exist.
Returns:
Number of roles created/updated
"""
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
rootInterface = getRootInterface()
# Get existing template roles for this feature (Pydantic models)
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
# Filter to template roles (mandateId is None)
templateRoles = [r for r in existingRoles if r.mandateId is None]
existingRoleLabels = {r.roleLabel: str(r.id) for r in templateRoles}
createdCount = 0
for roleTemplate in TEMPLATE_ROLES:
roleLabel = roleTemplate["roleLabel"]
if roleLabel in existingRoleLabels:
roleId = existingRoleLabels[roleLabel]
# Ensure AccessRules exist for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
else:
# Create new template role
newRole = Role(
roleLabel=roleLabel,
description=roleTemplate.get("description", {}),
featureCode=FEATURE_CODE,
mandateId=None, # Global template
featureInstanceId=None,
isSystemRole=False
)
createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
roleId = createdRole.get("id")
# Create AccessRules for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
logger.info(f"Created template role '{roleLabel}' with ID {roleId}")
createdCount += 1
if createdCount > 0:
logger.info(f"Feature '{FEATURE_CODE}': Created {createdCount} template roles")
return createdCount
except Exception as e:
logger.error(f"Error syncing template roles for feature '{FEATURE_CODE}': {e}")
return 0
def _ensureAccessRulesForRole(rootInterface, roleId: str, ruleTemplates: List[Dict[str, Any]]) -> int:
"""
Ensure AccessRules exist for a role based on templates.
Args:
rootInterface: Root interface instance
roleId: Role ID
ruleTemplates: List of rule templates
Returns:
Number of rules created
"""
from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext
# Get existing rules for this role (Pydantic models)
existingRules = rootInterface.getAccessRulesByRole(roleId)
# Create a set of existing rule signatures to avoid duplicates
# IMPORTANT: Use .value for enum comparison, not str() which gives "AccessRuleContext.DATA" in Python 3.11+
existingSignatures = set()
for rule in existingRules:
sig = (rule.context.value if rule.context else None, rule.item)
existingSignatures.add(sig)
createdCount = 0
for template in ruleTemplates:
context = template.get("context", "UI")
item = template.get("item")
sig = (context, item)
if sig in existingSignatures:
continue
# Map context string to enum
if context == "UI":
contextEnum = AccessRuleContext.UI
elif context == "DATA":
contextEnum = AccessRuleContext.DATA
elif context == "RESOURCE":
contextEnum = AccessRuleContext.RESOURCE
else:
contextEnum = context
newRule = AccessRule(
roleId=roleId,
context=contextEnum,
item=item,
view=template.get("view", False),
read=template.get("read"),
create=template.get("create"),
update=template.get("update"),
delete=template.get("delete"),
)
rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
createdCount += 1
if createdCount > 0:
logger.debug(f"Created {createdCount} AccessRules for role {roleId}")
return createdCount

View file

@ -1,722 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Chat Playground Feature Routes.
Implements the endpoints for chat playground workflow management as a feature.
"""
import json
import logging
from typing import Optional, Dict, Any
from fastapi import APIRouter, HTTPException, Depends, Body, Path, Query, Request, status
# Import auth modules
from modules.auth import limiter, getRequestContext, RequestContext
# Import interfaces
from modules.interfaces import interfaceDbChat
from modules.interfaces.interfaceDbBilling import getInterface as _getBillingInterface
# Import models
from modules.datamodels.datamodelChat import (
ChatWorkflow,
ChatMessage,
ChatLog,
UserInputRequest,
WorkflowModeEnum,
)
from modules.datamodels.datamodelPagination import (
PaginationParams,
PaginatedResponse,
PaginationMetadata,
normalize_pagination_dict,
)
# Import workflow control functions
from modules.workflows.automation import chatStart, chatStop
from modules.features.chatplayground.mainChatplayground import getChatplaygroundServices
from modules.shared.attributeUtils import getModelAttributeDefinitions
# Configure logger
logger = logging.getLogger(__name__)
# Model attributes for ChatWorkflow (workflow attributes endpoint)
workflowAttributes = getModelAttributeDefinitions(ChatWorkflow)
# Create router for chat playground feature endpoints
router = APIRouter(
prefix="/api/chatplayground",
tags=["Chat Playground Feature"],
responses={404: {"description": "Not found"}}
)
def _getServiceChat(context: RequestContext, featureInstanceId: str = None):
"""Get chat interface with feature instance context."""
return interfaceDbChat.getInterface(
context.user,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=featureInstanceId
)
def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
"""
Validate that user has access to the feature instance.
Args:
instanceId: Feature instance ID
context: Request context
Returns:
mandateId for the instance
Raises:
HTTPException if access is denied
"""
from modules.interfaces.interfaceDbApp import getRootInterface
rootInterface = getRootInterface()
# Get feature instance (Pydantic model)
instance = rootInterface.getFeatureInstance(instanceId)
if not instance:
raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
# Check user has access to this instance using interface method
featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
if not featureAccess or not featureAccess.enabled:
raise HTTPException(status_code=403, detail="Access denied to this feature instance")
return str(instance.mandateId) if instance.mandateId else None
# Workflow start endpoint
@router.post("/{instanceId}/start", response_model=ChatWorkflow)
@limiter.limit("120/minute")
async def start_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"),
workflowMode: WorkflowModeEnum = Query(..., description="Workflow mode: 'Dynamic' or 'Automation' (mandatory)"),
userInput: UserInputRequest = Body(...),
context: RequestContext = Depends(getRequestContext)
) -> ChatWorkflow:
"""
Starts a new workflow or continues an existing one.
Args:
instanceId: Feature instance ID
workflowMode: "Dynamic" for iterative dynamic-style processing, "Automation" for automated workflow execution
"""
try:
# Validate access and get mandate ID
mandateId = _validateInstanceAccess(instanceId, context)
# Get chatplayground services from service center (not automation)
services = getChatplaygroundServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
services.featureCode = 'chatplayground'
if hasattr(userInput, 'allowedProviders') and userInput.allowedProviders:
services.allowedProviders = userInput.allowedProviders
# Start or continue workflow
workflow = await chatStart(
context.user,
userInput,
workflowMode,
workflowId,
mandateId=mandateId,
featureInstanceId=instanceId,
featureCode='chatplayground',
services=services,
)
return workflow
except HTTPException:
raise
except Exception as e:
logger.error(f"Error in start_workflow: {str(e)}")
raise HTTPException(
status_code=500,
detail=str(e)
)
# Stop workflow endpoint (under /workflows/{workflowId}/ for consistency)
@router.post("/{instanceId}/workflows/{workflowId}/stop", response_model=ChatWorkflow)
@limiter.limit("120/minute")
async def stop_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow to stop"),
context: RequestContext = Depends(getRequestContext)
) -> ChatWorkflow:
"""Stops a running workflow."""
try:
# Validate access and get mandate ID
mandateId = _validateInstanceAccess(instanceId, context)
# Get chatplayground services from service center (not automation)
services = getChatplaygroundServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
services.featureCode = 'chatplayground'
# Stop workflow (pass featureInstanceId for proper RBAC filtering)
workflow = await chatStop(
context.user,
workflowId,
mandateId=mandateId,
featureInstanceId=instanceId,
featureCode='chatplayground',
services=services,
)
return workflow
except HTTPException:
raise
except Exception as e:
logger.error(f"Error in stop_workflow: {str(e)}")
raise HTTPException(
status_code=500,
detail=str(e)
)
# Unified Chat Data Endpoint for Polling (under /workflows/{workflowId}/ for consistency)
@router.get("/{instanceId}/workflows/{workflowId}/chatData")
@limiter.limit("120/minute")
def get_workflow_chat_data(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
afterTimestamp: Optional[float] = Query(None, description="Unix timestamp to get data after"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""
Get unified chat data (messages, logs, stats) for a workflow with timestamp-based selective data transfer.
Returns all data types in chronological order based on _createdAt timestamp.
"""
try:
# Validate access
_validateInstanceAccess(instanceId, context)
# Get service with feature instance context
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
# Verify workflow exists
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(
status_code=404,
detail=f"Workflow with ID {workflowId} not found"
)
# Get workflow cost from billing transactions (single source of truth)
billingInterface = _getBillingInterface(context.user, context.mandateId)
workflowCost = billingInterface.getWorkflowCost(workflowId)
chatData = chatInterface.getUnifiedChatData(workflowId, afterTimestamp, workflowCost=workflowCost)
return chatData
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting unified chat data: {str(e)}", exc_info=True)
raise HTTPException(
status_code=500,
detail=f"Error getting unified chat data: {str(e)}"
)
# Get workflow attributes (ChatWorkflow model)
@router.get("/{instanceId}/workflows/attributes", response_model=Dict[str, Any])
@limiter.limit("120/minute")
def get_workflow_attributes(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get attribute definitions for ChatWorkflow model."""
_validateInstanceAccess(instanceId, context)
return {"attributes": workflowAttributes}
# Get workflows for this instance
@router.get("/{instanceId}/workflows", response_model=PaginatedResponse[ChatWorkflow])
@limiter.limit("120/minute")
def get_workflows(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
page: int = Query(1, ge=1, description="Page number (legacy)"),
pageSize: int = Query(20, ge=1, le=100, description="Items per page (legacy)"),
context: RequestContext = Depends(getRequestContext)
) -> PaginatedResponse[ChatWorkflow]:
"""
Get all workflows for this feature instance with optional pagination.
"""
try:
# Validate access
_validateInstanceAccess(instanceId, context)
# Get service with feature instance context
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
# Parse pagination parameter
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(
status_code=400,
detail=f"Invalid pagination parameter: {str(e)}"
)
else:
paginationParams = PaginationParams(page=page, pageSize=pageSize)
result = chatInterface.getWorkflows(pagination=paginationParams)
if paginationParams:
return PaginatedResponse(
items=result.items,
pagination=PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
)
)
else:
return PaginatedResponse(items=result, pagination=None)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting workflows: {str(e)}", exc_info=True)
raise HTTPException(
status_code=500,
detail=f"Error getting workflows: {str(e)}"
)
# Action Discovery Endpoints (must be before /{workflowId} to avoid path conflict)
@router.get("/{instanceId}/workflows/actions", response_model=Dict[str, Any])
@limiter.limit("120/minute")
def get_all_workflow_actions(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get all available workflow actions for the current user (filtered by RBAC)."""
try:
mandateId = _validateInstanceAccess(instanceId, context)
services = getChatplaygroundServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
discoverMethods(services)
allActions = []
for methodName, methodInfo in methods.items():
if methodName.startswith('Method'):
continue
methodInstance = methodInfo['instance']
methodActions = methodInstance.actions
for actionName, actionInfo in methodActions.items():
actionResponse = {
"module": methodInstance.name,
"actionId": f"{methodInstance.name}.{actionName}",
"name": actionName,
"description": actionInfo.get('description', ''),
"parameters": actionInfo.get('parameters', {})
}
allActions.append(actionResponse)
return {"actions": allActions}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting all actions: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Failed to get actions: {str(e)}")
@router.get("/{instanceId}/workflows/actions/{method}", response_model=Dict[str, Any])
@limiter.limit("120/minute")
def get_method_workflow_actions(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
method: str = Path(..., description="Method name (e.g., 'outlook', 'sharepoint')"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get all available actions for a specific method."""
try:
mandateId = _validateInstanceAccess(instanceId, context)
services = getChatplaygroundServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
discoverMethods(services)
methodInstance = None
for methodName, methodInfo in methods.items():
if methodInfo['instance'].name == method:
methodInstance = methodInfo['instance']
break
if not methodInstance:
raise HTTPException(status_code=404, detail=f"Method '{method}' not found")
actions = []
for actionName, actionInfo in methodInstance.actions.items():
actionResponse = {
"actionId": f"{methodInstance.name}.{actionName}",
"name": actionName,
"description": actionInfo.get('description', ''),
"parameters": actionInfo.get('parameters', {})
}
actions.append(actionResponse)
return {"module": methodInstance.name, "description": methodInstance.description, "actions": actions}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting actions for method {method}: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Failed to get actions for method {method}: {str(e)}")
@router.get("/{instanceId}/workflows/actions/{method}/{action}", response_model=Dict[str, Any])
@limiter.limit("120/minute")
def get_action_schema(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
method: str = Path(..., description="Method name (e.g., 'outlook', 'sharepoint')"),
action: str = Path(..., description="Action name (e.g., 'readEmails', 'uploadDocument')"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get action schema with parameter definitions for a specific action."""
try:
mandateId = _validateInstanceAccess(instanceId, context)
services = getChatplaygroundServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
discoverMethods(services)
methodInstance = None
for methodName, methodInfo in methods.items():
if methodInfo['instance'].name == method:
methodInstance = methodInfo['instance']
break
if not methodInstance:
raise HTTPException(status_code=404, detail=f"Method '{method}' not found")
methodActions = methodInstance.actions
if action not in methodActions:
raise HTTPException(status_code=404, detail=f"Action '{action}' not found in method '{method}'")
actionInfo = methodActions[action]
return {
"method": methodInstance.name,
"action": action,
"actionId": f"{methodInstance.name}.{action}",
"description": actionInfo.get('description', ''),
"parameters": actionInfo.get('parameters', {})
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting action schema for {method}.{action}: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Failed to get action schema: {str(e)}")
# Get single workflow by ID
@router.get("/{instanceId}/workflows/{workflowId}", response_model=ChatWorkflow)
@limiter.limit("120/minute")
def get_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
context: RequestContext = Depends(getRequestContext)
) -> ChatWorkflow:
"""Get workflow by ID."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail="Workflow not found")
return workflow
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting workflow: {str(e)}")
raise HTTPException(status_code=500, detail=f"Failed to get workflow: {str(e)}")
# Update workflow
@router.put("/{instanceId}/workflows/{workflowId}", response_model=ChatWorkflow)
@limiter.limit("120/minute")
def update_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow to update"),
workflowData: Dict[str, Any] = Body(...),
context: RequestContext = Depends(getRequestContext)
) -> ChatWorkflow:
"""Update workflow by ID."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail="Workflow not found")
if not chatInterface.checkRbacPermission(ChatWorkflow, "update", workflowId):
raise HTTPException(status_code=403, detail="You don't have permission to update this workflow")
updatedWorkflow = chatInterface.updateWorkflow(workflowId, workflowData)
if not updatedWorkflow:
raise HTTPException(status_code=500, detail="Failed to update workflow")
return updatedWorkflow
except HTTPException:
raise
except Exception as e:
logger.error(f"Error updating workflow: {str(e)}")
raise HTTPException(status_code=500, detail=f"Failed to update workflow: {str(e)}")
# Delete workflow
@router.delete("/{instanceId}/workflows/{workflowId}")
@limiter.limit("120/minute")
def delete_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow to delete"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Deletes a workflow and its associated data."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow with ID {workflowId} not found")
if not chatInterface.checkRbacPermission(ChatWorkflow, "delete", workflowId):
raise HTTPException(status_code=403, detail="You don't have permission to delete this workflow")
success = chatInterface.deleteWorkflow(workflowId)
if not success:
raise HTTPException(status_code=500, detail="Failed to delete workflow")
return {"id": workflowId, "message": "Workflow and associated data deleted successfully"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error deleting workflow: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error deleting workflow: {str(e)}")
# Get workflow status
@router.get("/{instanceId}/workflows/{workflowId}/status", response_model=ChatWorkflow)
@limiter.limit("120/minute")
def get_workflow_status(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
context: RequestContext = Depends(getRequestContext)
) -> ChatWorkflow:
"""Get the current status of a workflow."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow with ID {workflowId} not found")
return workflow
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting workflow status: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error getting workflow status: {str(e)}")
# Get workflow logs
@router.get("/{instanceId}/workflows/{workflowId}/logs", response_model=PaginatedResponse[ChatLog])
@limiter.limit("120/minute")
def get_workflow_logs(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
logId: Optional[str] = Query(None, description="Optional log ID for selective data transfer"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
) -> PaginatedResponse[ChatLog]:
"""Get logs for a workflow with optional pagination."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow with ID {workflowId} not found")
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
result = chatInterface.getLogs(workflowId, pagination=paginationParams)
if logId:
allLogs = result.items if paginationParams else result
logIndex = next((i for i, log in enumerate(allLogs) if log.id == logId), -1)
if logIndex >= 0:
filteredLogs = allLogs[logIndex + 1:]
return PaginatedResponse(items=filteredLogs, pagination=None)
if paginationParams:
return PaginatedResponse(
items=result.items,
pagination=PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
)
)
return PaginatedResponse(items=result, pagination=None)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting workflow logs: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error getting workflow logs: {str(e)}")
# Get workflow messages
@router.get("/{instanceId}/workflows/{workflowId}/messages", response_model=PaginatedResponse[ChatMessage])
@limiter.limit("120/minute")
def get_workflow_messages(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
messageId: Optional[str] = Query(None, description="Optional message ID for selective data transfer"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
) -> PaginatedResponse[ChatMessage]:
"""Get messages for a workflow with optional pagination."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow with ID {workflowId} not found")
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
result = chatInterface.getMessages(workflowId, pagination=paginationParams)
if messageId:
allMessages = result.items if paginationParams else result
messageIndex = next((i for i, msg in enumerate(allMessages) if msg.id == messageId), -1)
if messageIndex >= 0:
filteredMessages = allMessages[messageIndex + 1:]
return PaginatedResponse(items=filteredMessages, pagination=None)
if paginationParams:
return PaginatedResponse(
items=result.items,
pagination=PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=result.totalItems,
totalPages=result.totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters
)
)
return PaginatedResponse(items=result, pagination=None)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting workflow messages: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error getting workflow messages: {str(e)}")
# Delete message from workflow
@router.delete("/{instanceId}/workflows/{workflowId}/messages/{messageId}")
@limiter.limit("120/minute")
def delete_workflow_message(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
messageId: str = Path(..., description="ID of the message to delete"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Delete a message from a workflow."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow with ID {workflowId} not found")
success = chatInterface.deleteMessage(workflowId, messageId)
if not success:
raise HTTPException(status_code=404, detail=f"Message with ID {messageId} not found in workflow {workflowId}")
return {"workflowId": workflowId, "messageId": messageId, "message": "Message deleted successfully"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error deleting message: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error deleting message: {str(e)}")
# Delete file from message
@router.delete("/{instanceId}/workflows/{workflowId}/messages/{messageId}/files/{fileId}")
@limiter.limit("120/minute")
def delete_file_from_message(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="ID of the workflow"),
messageId: str = Path(..., description="ID of the message"),
fileId: str = Path(..., description="ID of the file to delete"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Delete a file reference from a message in a workflow."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow with ID {workflowId} not found")
success = chatInterface.deleteFileFromMessage(workflowId, messageId, fileId)
if not success:
raise HTTPException(status_code=404, detail=f"File with ID {fileId} not found in message {messageId}")
return {"workflowId": workflowId, "messageId": messageId, "fileId": fileId, "message": "File reference deleted successfully"}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error deleting file reference: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error deleting file reference: {str(e)}")

View file

@ -1 +0,0 @@
"""CodeEditor Feature - Cursor-style AI file editing via chat interface."""

View file

@ -1,280 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""CodeEditor processor -- single-shot (Phase 1) and agent loop (Phase 2).
Orchestrates file loading, prompt building, AI calls, response parsing, and SSE emission."""
import logging
from typing import List, Dict, Any
from modules.features.codeeditor import fileContextManager, promptAssembly, responseParser
from modules.features.codeeditor.datamodelCodeeditor import (
FileEditProposal, SegmentTypeEnum, AgentState
)
from modules.features.codeeditor import toolRegistry
from modules.shared.timeUtils import getUtcTimestamp
logger = logging.getLogger(__name__)
async def processMessage(
workflowId: str,
userPrompt: str,
selectedFileIds: List[str],
dbManagement,
interfaceAi,
chatInterface,
eventManager,
agentMode: bool = False
):
"""Process a user message. Dispatches to single-shot or agent loop based on mode."""
if agentMode:
await _processAgentMessage(
workflowId, userPrompt, dbManagement, interfaceAi, chatInterface, eventManager
)
else:
await _processSingleShot(
workflowId, userPrompt, selectedFileIds, dbManagement, interfaceAi, chatInterface, eventManager
)
async def _processSingleShot(
workflowId, userPrompt, selectedFileIds, dbManagement, interfaceAi, chatInterface, eventManager
):
"""Phase 1: Single AI call with pre-loaded file context."""
try:
await _emitStatus(eventManager, workflowId, "Loading files...")
fileContexts = await fileContextManager.loadFileContexts(dbManagement, selectedFileIds)
await _emitStatus(eventManager, workflowId, "Building prompt...")
chatHistory = _loadChatHistory(chatInterface, workflowId)
aiRequest = promptAssembly.buildRequest(userPrompt, fileContexts, chatHistory)
await _emitStatus(eventManager, workflowId, "AI is processing...")
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
if aiResponse.errorCount > 0:
await _emitError(eventManager, workflowId, aiResponse.content)
return
segments = responseParser.parseResponse(aiResponse.content)
await _emitSegments(eventManager, workflowId, segments, fileContexts)
_logAiStats(aiResponse, workflowId)
await eventManager.emit_event(workflowId, "complete", {
"workflowId": workflowId,
"modelName": aiResponse.modelName,
"priceCHF": aiResponse.priceCHF,
"processingTime": aiResponse.processingTime
})
except Exception as e:
logger.error(f"CodeEditor single-shot failed for {workflowId}: {e}", exc_info=True)
await eventManager.emit_event(workflowId, "error", {
"workflowId": workflowId, "error": str(e)
})
async def _processAgentMessage(
workflowId, userPrompt, dbManagement, interfaceAi, chatInterface, eventManager
):
"""Phase 2: Agent loop -- multiple AI calls with tool execution until done."""
state = AgentState(workflowId=workflowId)
try:
await _emitStatus(eventManager, workflowId, "Agent: Scanning available files...")
fileListContext = fileContextManager.buildFileListContext(dbManagement)
state.conversationHistory.append({"role": "user", "content": userPrompt})
aiRequest = promptAssembly.buildAgentRequest(
userPrompt=userPrompt,
fileListContext=fileListContext,
conversationHistory=[]
)
while state.status == "running" and state.currentRound < state.maxRounds:
state.currentRound += 1
state.totalAiCalls += 1
await _emitStatus(eventManager, workflowId,
f"Agent round {state.currentRound}: AI is thinking...")
await eventManager.emit_event(workflowId, "chatdata", {
"type": "agent_progress",
"item": {
"round": state.currentRound,
"totalAiCalls": state.totalAiCalls,
"totalToolCalls": state.totalToolCalls,
"costCHF": round(state.totalCostCHF, 4),
}
})
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
state.totalCostCHF += aiResponse.priceCHF
state.totalProcessingTime += aiResponse.processingTime
if aiResponse.errorCount > 0:
logger.error(f"Agent AI call failed in round {state.currentRound}: {aiResponse.content}")
await _emitError(eventManager, workflowId, aiResponse.content)
state.status = "error"
break
_logAiStats(aiResponse, workflowId)
state.conversationHistory.append({"role": "assistant", "content": aiResponse.content})
segments = responseParser.parseResponse(aiResponse.content)
textAndEditSegments = [s for s in segments if s.type != SegmentTypeEnum.TOOL_CALL]
if textAndEditSegments:
await _emitSegments(eventManager, workflowId, textAndEditSegments, [])
toolCallSegments = [s for s in segments if s.type == SegmentTypeEnum.TOOL_CALL]
if not toolCallSegments:
state.status = "completed"
break
toolResultTexts = []
for tc in toolCallSegments:
state.totalToolCalls += 1
await _emitStatus(eventManager, workflowId,
f"Agent: Running {tc.toolName}...")
result = await toolRegistry.dispatch(tc.toolName, tc.toolArgs or {}, dbManagement)
toolResultTexts.append(f"[{tc.toolName}] (success={result.success}):\n{result.result}")
logger.info(f"Agent tool {tc.toolName}: success={result.success}, time={result.executionTime:.2f}s")
combinedResults = "\n\n".join(toolResultTexts)
state.conversationHistory.append({
"role": "tool_result",
"content": combinedResults,
"toolName": "batch"
})
aiRequest = promptAssembly.buildAgentRequest(
userPrompt=None,
fileListContext=fileListContext,
conversationHistory=state.conversationHistory
)
if state.currentRound >= state.maxRounds and state.status == "running":
state.status = "max_rounds"
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message",
"item": {
"role": "system",
"content": f"Agent stopped: maximum rounds ({state.maxRounds}) reached.",
"createdAt": getUtcTimestamp()
}
})
await eventManager.emit_event(workflowId, "chatdata", {
"type": "agent_summary",
"item": {
"rounds": state.currentRound,
"totalAiCalls": state.totalAiCalls,
"totalToolCalls": state.totalToolCalls,
"costCHF": round(state.totalCostCHF, 4),
"processingTime": round(state.totalProcessingTime, 1),
"status": state.status,
}
})
await eventManager.emit_event(workflowId, "complete", {
"workflowId": workflowId,
"agentRounds": state.currentRound,
"totalCostCHF": round(state.totalCostCHF, 4),
"processingTime": round(state.totalProcessingTime, 1)
})
except Exception as e:
logger.error(f"CodeEditor agent loop failed for {workflowId}: {e}", exc_info=True)
await eventManager.emit_event(workflowId, "error", {
"workflowId": workflowId, "error": str(e)
})
# ---------------------------------------------------------------------------
# Shared helpers
# ---------------------------------------------------------------------------
async def _emitStatus(eventManager, workflowId: str, label: str):
await eventManager.emit_event(workflowId, "chatdata", {
"type": "status", "label": label
})
async def _emitError(eventManager, workflowId: str, errorMsg: str):
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message",
"item": {"role": "assistant", "content": f"Error: {errorMsg}"}
})
await eventManager.emit_event(workflowId, "error", {
"workflowId": workflowId, "error": errorMsg
})
async def _emitSegments(eventManager, workflowId: str, segments, fileContexts):
"""Emit parsed segments as SSE events."""
for segment in segments:
messageData = {
"role": "assistant",
"content": segment.content,
"type": segment.type.value,
"createdAt": getUtcTimestamp()
}
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message", "item": messageData
})
if segment.type == SegmentTypeEnum.FILE_EDIT:
proposal = FileEditProposal(
workflowId=workflowId,
fileId=_resolveFileId(segment.fileName, fileContexts),
fileName=segment.fileName,
operation="edit",
oldContent=segment.oldContent,
newContent=segment.newContent
)
await eventManager.emit_event(workflowId, "chatdata", {
"type": "file_edit_proposal", "item": proposal.model_dump()
})
def _loadChatHistory(chatInterface, workflowId: str) -> List[Dict[str, Any]]:
"""Load recent chat messages for multi-turn context."""
try:
messages = chatInterface.getMessages(workflowId)
if not messages:
return []
history = []
for msg in messages:
role = msg.get("role", "unknown") if isinstance(msg, dict) else getattr(msg, "role", "unknown")
content = msg.get("content", "") if isinstance(msg, dict) else getattr(msg, "content", "")
history.append({"role": role, "content": content})
return history
except Exception as e:
logger.warning(f"Could not load chat history for {workflowId}: {e}")
return []
def _resolveFileId(fileName: str, fileContexts) -> str:
"""Resolve a fileName to its fileId from the loaded contexts."""
for fc in fileContexts:
if fc.fileName == fileName:
return fc.fileId
return f"unknown-{fileName}"
def _logAiStats(aiResponse, workflowId: str):
"""Log AI call statistics."""
logger.info(
f"CodeEditor AI call for {workflowId}: "
f"model={aiResponse.modelName}, "
f"provider={aiResponse.provider}, "
f"cost={aiResponse.priceCHF:.4f} CHF, "
f"time={aiResponse.processingTime:.1f}s, "
f"sent={aiResponse.bytesSent}B, received={aiResponse.bytesReceived}B"
)

View file

@ -1,122 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Data models for the CodeEditor feature."""
from typing import List, Dict, Any, Optional
from enum import Enum
from pydantic import BaseModel, Field
from modules.shared.timeUtils import getUtcTimestamp
import uuid
class SegmentTypeEnum(str, Enum):
TEXT = "text"
CODE_BLOCK = "code_block"
FILE_EDIT = "file_edit"
TOOL_CALL = "tool_call"
class EditStatusEnum(str, Enum):
PENDING = "pending"
ACCEPTED = "accepted"
REJECTED = "rejected"
class FileContext(BaseModel):
"""A text file loaded as context for the AI."""
fileId: str
fileName: str
content: Optional[str] = None
mimeType: str
sizeBytes: int = 0
modifiedAt: Optional[float] = None
tags: List[str] = Field(default_factory=list)
class ResponseSegment(BaseModel):
"""A parsed segment from the AI response."""
type: SegmentTypeEnum
content: str
language: Optional[str] = None
fileId: Optional[str] = None
fileName: Optional[str] = None
oldContent: Optional[str] = None
newContent: Optional[str] = None
toolName: Optional[str] = None
toolArgs: Optional[Dict[str, Any]] = None
class FileEditProposal(BaseModel):
"""A proposed file edit from the AI, awaiting user accept/reject."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
workflowId: str
fileId: str
fileName: str
operation: str = "edit"
oldContent: Optional[str] = None
newContent: str
diffSummary: Optional[str] = None
status: EditStatusEnum = EditStatusEnum.PENDING
createdAt: float = Field(default_factory=getUtcTimestamp)
class FileVersion(BaseModel):
"""A new version of a file created after accepting an edit proposal."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
sourceFileId: str
editProposalId: str
newFileId: str
createdAt: float = Field(default_factory=getUtcTimestamp)
class AgentState(BaseModel):
"""Tracks state across an agent loop execution."""
workflowId: str
currentRound: int = 0
maxRounds: int = 50
totalAiCalls: int = 0
totalToolCalls: int = 0
totalCostCHF: float = 0.0
totalProcessingTime: float = 0.0
conversationHistory: List[Dict[str, Any]] = Field(default_factory=list)
status: str = "running"
class ToolResult(BaseModel):
"""Result from executing a tool."""
toolName: str
result: str
success: bool = True
executionTime: float = 0.0
TEXT_MIME_TYPES = {
"text/plain", "text/markdown", "text/html", "text/css", "text/csv",
"text/xml", "text/yaml", "text/x-python", "text/x-java",
"text/javascript", "text/x-typescript", "text/x-sql",
"application/json", "application/xml", "application/yaml",
"application/x-yaml", "application/javascript",
}
TEXT_EXTENSIONS = {
".md", ".txt", ".json", ".yaml", ".yml", ".xml", ".csv",
".py", ".js", ".ts", ".tsx", ".jsx", ".html", ".htm", ".css", ".scss",
".sql", ".sh", ".bash", ".zsh", ".ps1", ".bat",
".toml", ".ini", ".cfg", ".conf", ".env", ".gitignore",
".dockerfile", ".docker-compose", ".makefile",
".java", ".kt", ".go", ".rs", ".rb", ".php", ".swift", ".c", ".cpp", ".h",
".r", ".lua", ".dart", ".vue", ".svelte",
}
def isTextFile(mimeType: Optional[str], fileName: Optional[str] = None) -> bool:
"""Check if a file is a text-based file suitable for the editor."""
if mimeType and mimeType.lower() in TEXT_MIME_TYPES:
return True
if mimeType and mimeType.lower().startswith("text/"):
return True
if fileName:
ext = "." + fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
if ext in TEXT_EXTENSIONS:
return True
return False

View file

@ -1,84 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""File context manager for CodeEditor feature.
Loads text files from the database and provides them as context for AI calls."""
import logging
from typing import List, Optional
from modules.features.codeeditor.datamodelCodeeditor import FileContext, isTextFile
logger = logging.getLogger(__name__)
async def loadFileContexts(dbManagement, fileIds: List[str]) -> List[FileContext]:
"""Load text files from DB and return as FileContext list.
Args:
dbManagement: interfaceDbManagement instance with user context set
fileIds: list of file IDs to load
"""
contexts = []
for fileId in fileIds:
fileItem = dbManagement.getFile(fileId)
if not fileItem:
logger.warning(f"File {fileId} not found or no access")
continue
if not isTextFile(fileItem.mimeType, fileItem.fileName):
logger.warning(f"File {fileItem.fileName} ({fileItem.mimeType}) is not a text file, skipping")
continue
fileData = dbManagement.getFileData(fileId)
if not fileData:
logger.warning(f"No data for file {fileId}")
continue
try:
content = fileData.decode("utf-8")
except UnicodeDecodeError:
logger.warning(f"File {fileItem.fileName} is not valid UTF-8, skipping")
continue
contexts.append(FileContext(
fileId=fileId,
fileName=fileItem.fileName,
content=content,
mimeType=fileItem.mimeType,
sizeBytes=fileItem.fileSize
))
logger.info(f"Loaded {len(contexts)} file contexts from {len(fileIds)} requested")
return contexts
def listTextFiles(dbManagement) -> List[FileContext]:
"""List all text files accessible to the user (metadata only, no content)."""
allFiles = dbManagement.getAllFiles()
textFiles = []
if not allFiles:
return textFiles
for fileItem in allFiles:
if isTextFile(fileItem.mimeType, fileItem.fileName):
modifiedAt = getattr(fileItem, "_modifiedAt", None) or getattr(fileItem, "creationDate", None)
textFiles.append(FileContext(
fileId=fileItem.id,
fileName=fileItem.fileName,
content=None,
mimeType=fileItem.mimeType,
sizeBytes=fileItem.fileSize,
modifiedAt=modifiedAt
))
return textFiles
def buildFileListContext(dbManagement) -> str:
"""Build a compact file list string for the agent prompt (no content, just metadata)."""
textFiles = listTextFiles(dbManagement)
if not textFiles:
return "No text files available."
lines = [f"- {f.fileName} (id: {f.fileId}, size: {f.sizeBytes}B)" for f in textFiles]
return f"Total: {len(lines)} text files\n" + "\n".join(lines)

View file

@ -1,248 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
CodeEditor Feature Container - Main Module.
Handles feature initialization and RBAC catalog registration.
Cursor-style AI file editing via chat interface.
"""
import logging
from typing import Dict, List, Any
logger = logging.getLogger(__name__)
FEATURE_CODE = "codeeditor"
FEATURE_LABEL = {"en": "Code Editor", "de": "Code Editor", "fr": "Code Editor"}
FEATURE_ICON = "mdi-file-document-edit"
UI_OBJECTS = [
{
"objectKey": "ui.feature.codeeditor.editor",
"label": {"en": "Editor", "de": "Editor", "fr": "Editeur"},
"meta": {"area": "editor"}
},
{
"objectKey": "ui.feature.codeeditor.workflows",
"label": {"en": "Workflows", "de": "Workflows", "fr": "Workflows"},
"meta": {"area": "workflows"}
},
]
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.codeeditor.start",
"label": {"en": "Start Workflow", "de": "Workflow starten", "fr": "Demarrer workflow"},
"meta": {"endpoint": "/api/codeeditor/{instanceId}/start/stream", "method": "POST"}
},
{
"objectKey": "resource.feature.codeeditor.stop",
"label": {"en": "Stop Workflow", "de": "Workflow stoppen", "fr": "Arreter workflow"},
"meta": {"endpoint": "/api/codeeditor/{instanceId}/{workflowId}/stop", "method": "POST"}
},
{
"objectKey": "resource.feature.codeeditor.chatData",
"label": {"en": "Get Chat Data", "de": "Chat-Daten abrufen", "fr": "Recuperer donnees chat"},
"meta": {"endpoint": "/api/codeeditor/{instanceId}/{workflowId}/chatData", "method": "GET"}
},
{
"objectKey": "resource.feature.codeeditor.files",
"label": {"en": "Manage Files", "de": "Dateien verwalten", "fr": "Gerer fichiers"},
"meta": {"endpoint": "/api/codeeditor/{instanceId}/files", "method": "GET"}
},
{
"objectKey": "resource.feature.codeeditor.apply",
"label": {"en": "Apply Edit", "de": "Aenderung anwenden", "fr": "Appliquer modification"},
"meta": {"endpoint": "/api/codeeditor/{instanceId}/{workflowId}/apply", "method": "POST"}
},
]
TEMPLATE_ROLES = [
{
"roleLabel": "codeeditor-viewer",
"description": {
"en": "Code Editor Viewer - View editor (read-only)",
"de": "Code Editor Betrachter - Editor ansehen (nur lesen)",
"fr": "Visualiseur Code Editor - Consulter l'editeur (lecture seule)"
},
"accessRules": [
{"context": "UI", "item": "ui.feature.codeeditor.editor", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
]
},
{
"roleLabel": "codeeditor-user",
"description": {
"en": "Code Editor User - Use editor and workflows",
"de": "Code Editor Benutzer - Editor und Workflows nutzen",
"fr": "Utilisateur Code Editor - Utiliser l'editeur et les workflows"
},
"accessRules": [
{"context": "UI", "item": "ui.feature.codeeditor.editor", "view": True},
{"context": "UI", "item": "ui.feature.codeeditor.workflows", "view": True},
{"context": "RESOURCE", "item": "resource.feature.codeeditor.start", "view": True},
{"context": "RESOURCE", "item": "resource.feature.codeeditor.stop", "view": True},
{"context": "RESOURCE", "item": "resource.feature.codeeditor.chatData", "view": True},
{"context": "RESOURCE", "item": "resource.feature.codeeditor.files", "view": True},
{"context": "RESOURCE", "item": "resource.feature.codeeditor.apply", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
]
},
{
"roleLabel": "codeeditor-admin",
"description": {
"en": "Code Editor Admin - Full access to code editor",
"de": "Code Editor Admin - Vollzugriff auf Code Editor",
"fr": "Administrateur Code Editor - Acces complet au code editor"
},
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "RESOURCE", "item": None, "view": True},
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
]
},
]
def getFeatureDefinition() -> Dict[str, Any]:
"""Return the feature definition for registration."""
return {
"code": FEATURE_CODE,
"label": FEATURE_LABEL,
"icon": FEATURE_ICON,
"autoCreateInstance": True,
}
def getUiObjects() -> List[Dict[str, Any]]:
"""Return UI objects for RBAC catalog registration."""
return UI_OBJECTS
def getResourceObjects() -> List[Dict[str, Any]]:
"""Return resource objects for RBAC catalog registration."""
return RESOURCE_OBJECTS
def getTemplateRoles() -> List[Dict[str, Any]]:
"""Return template roles for this feature."""
return TEMPLATE_ROLES
def registerFeature(catalogService) -> bool:
"""Register this feature's RBAC objects in the catalog."""
try:
for uiObj in UI_OBJECTS:
catalogService.registerUiObject(
featureCode=FEATURE_CODE,
objectKey=uiObj["objectKey"],
label=uiObj["label"],
meta=uiObj.get("meta")
)
for resObj in RESOURCE_OBJECTS:
catalogService.registerResourceObject(
featureCode=FEATURE_CODE,
objectKey=resObj["objectKey"],
label=resObj["label"],
meta=resObj.get("meta")
)
_syncTemplateRolesToDb()
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI objects and {len(RESOURCE_OBJECTS)} resource objects")
return True
except Exception as e:
logger.error(f"Failed to register feature '{FEATURE_CODE}': {e}")
return False
def _syncTemplateRolesToDb() -> int:
"""Sync template roles and their AccessRules to the database."""
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
rootInterface = getRootInterface()
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
templateRoles = [r for r in existingRoles if r.mandateId is None]
existingRoleLabels = {r.roleLabel: str(r.id) for r in templateRoles}
createdCount = 0
for roleTemplate in TEMPLATE_ROLES:
roleLabel = roleTemplate["roleLabel"]
if roleLabel in existingRoleLabels:
roleId = existingRoleLabels[roleLabel]
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
else:
newRole = Role(
roleLabel=roleLabel,
description=roleTemplate.get("description", {}),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
isSystemRole=False
)
createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
roleId = createdRole.get("id")
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
logger.info(f"Created template role '{roleLabel}' with ID {roleId}")
createdCount += 1
if createdCount > 0:
logger.info(f"Feature '{FEATURE_CODE}': Created {createdCount} template roles")
return createdCount
except Exception as e:
logger.error(f"Error syncing template roles for feature '{FEATURE_CODE}': {e}")
return 0
def _ensureAccessRulesForRole(rootInterface, roleId: str, ruleTemplates: List[Dict[str, Any]]) -> int:
"""Ensure AccessRules exist for a role based on templates."""
from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext
existingRules = rootInterface.getAccessRulesByRole(roleId)
existingSignatures = set()
for rule in existingRules:
sig = (rule.context.value if rule.context else None, rule.item)
existingSignatures.add(sig)
createdCount = 0
for template in ruleTemplates:
context = template.get("context", "UI")
item = template.get("item")
sig = (context, item)
if sig in existingSignatures:
continue
if context == "UI":
contextEnum = AccessRuleContext.UI
elif context == "DATA":
contextEnum = AccessRuleContext.DATA
elif context == "RESOURCE":
contextEnum = AccessRuleContext.RESOURCE
else:
contextEnum = context
newRule = AccessRule(
roleId=roleId,
context=contextEnum,
item=item,
view=template.get("view", False),
read=template.get("read"),
create=template.get("create"),
update=template.get("update"),
delete=template.get("delete"),
)
rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
createdCount += 1
if createdCount > 0:
logger.debug(f"Created {createdCount} AccessRules for role {roleId}")
return createdCount

View file

@ -1,183 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Prompt assembly for the CodeEditor feature.
Builds Cursor-style system prompts with file context and format instructions."""
import logging
from typing import List, Optional, Dict, Any
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
from modules.features.codeeditor.datamodelCodeeditor import FileContext
logger = logging.getLogger(__name__)
SYSTEM_PROMPT = """You are an AI assistant for text and code file editing. You receive files as context and can suggest changes.
## Rules for file edits
- Use ```file_edit``` blocks for file changes
- Each file_edit block must contain: fileName, oldContent (exact text to replace), newContent (replacement text)
- Explain changes in normal text before or after the block
- oldContent must EXACTLY match existing content (including whitespace and indentation)
- You may propose edits to multiple files in one response
## Response format
Normal text is displayed as explanation.
File changes must use this format:
```file_edit
fileName: <filename>
oldContent: |
<exact existing content to replace>
newContent: |
<new replacement content>
```
Code examples (without edits) use standard markdown code blocks:
```language
code here
```
## Important
- Only edit files that are provided in context
- Make minimal, targeted changes
- Preserve existing formatting and style
- If a task is unclear, ask for clarification instead of guessing"""
def buildRequest(
userPrompt: str,
fileContexts: List[FileContext],
chatHistory: Optional[List[Dict[str, Any]]] = None
) -> AiCallRequest:
"""Build an AiCallRequest with system prompt, file context, and user prompt."""
systemPart = SYSTEM_PROMPT
fileContextPart = _buildFileContext(fileContexts)
historyPart = _buildChatHistory(chatHistory) if chatHistory else ""
fullPrompt = systemPart
if historyPart:
fullPrompt += f"\n\n## Previous conversation\n{historyPart}"
fullPrompt += f"\n\n## User request\n{userPrompt}"
return AiCallRequest(
prompt=fullPrompt,
context=fileContextPart if fileContextPart else None,
options=AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
temperature=0.0,
compressPrompt=False,
compressContext=False,
resultFormat="txt"
)
)
def _buildFileContext(fileContexts: List[FileContext]) -> str:
"""Build the file context string with line numbers."""
if not fileContexts:
return ""
parts = []
for fc in fileContexts:
if not fc.content:
continue
lines = fc.content.split("\n")
numberedLines = [f"{i + 1}|{line}" for i, line in enumerate(lines)]
numbered = "\n".join(numberedLines)
parts.append(f"--- FILE: {fc.fileName} ---\n{numbered}\n--- END FILE ---")
return "\n\n".join(parts)
def buildAgentRequest(
userPrompt: Optional[str],
fileListContext: str,
conversationHistory: List[Dict[str, Any]]
) -> AiCallRequest:
"""Build an AiCallRequest for agent mode with tool definitions and conversation history."""
from modules.features.codeeditor.toolRegistry import formatToolDefinitions
systemPrompt = _AGENT_SYSTEM_PROMPT.replace("{{TOOL_DEFINITIONS}}", formatToolDefinitions())
if not conversationHistory:
fullPrompt = systemPrompt
context = f"## Available files\n{fileListContext}\n\n## Task\n{userPrompt}"
else:
fullPrompt = systemPrompt
historyText = _buildConversationHistory(conversationHistory)
context = f"## Available files\n{fileListContext}\n\n## Conversation\n{historyText}"
return AiCallRequest(
prompt=fullPrompt,
context=context,
options=AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
temperature=0.0,
compressPrompt=False,
compressContext=False,
resultFormat="txt"
)
)
_AGENT_SYSTEM_PROMPT = """You are an AI agent for file analysis and editing. You work autonomously by using tools to read files, search content, and propose edits.
## Available tools
{{TOOL_DEFINITIONS}}
## How to call tools
Use this exact format for each tool call:
```tool_call
tool: <tool_name>
args: {"param": "value"}
```
## Rules
- Read files ONE AT A TIME with read_file, never assume file contents
- First create a plan, then execute it step by step
- Use search_files to find relevant files before reading them
- Use list_files to discover what files are available
- For file changes, use ```file_edit``` blocks (same format as before)
- You may combine text explanations, tool calls, and file edits in one response
- When you are DONE and need no more tool calls, simply respond with text only (no tool_call blocks)
- Keep responses focused and efficient
## file_edit format (for changes)
```file_edit
fileName: <filename>
oldContent: |
<exact existing content>
newContent: |
<replacement content>
```"""
def _buildConversationHistory(history: List[Dict[str, Any]]) -> str:
"""Build the full conversation history for agent multi-turn context."""
parts = []
for msg in history:
role = msg.get("role", "unknown")
content = msg.get("content", "")
if role == "tool_result":
toolName = msg.get("toolName", "")
parts.append(f"[Tool Result - {toolName}]:\n{content}")
else:
parts.append(f"[{role}]:\n{content}")
return "\n\n".join(parts)
def _buildChatHistory(chatHistory: List[Dict[str, Any]]) -> str:
"""Build a condensed chat history string for multi-turn context."""
if not chatHistory:
return ""
parts = []
for msg in chatHistory[-10:]:
role = msg.get("role", "unknown")
content = msg.get("content", "")
if len(content) > 500:
content = content[:500] + "..."
parts.append(f"[{role}]: {content}")
return "\n".join(parts)

View file

@ -1,184 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Response parser for the CodeEditor feature.
Parses AI responses into typed segments (text, code_block, file_edit, tool_call)."""
import logging
import json
import re
from typing import List, Optional
from modules.features.codeeditor.datamodelCodeeditor import ResponseSegment, SegmentTypeEnum
logger = logging.getLogger(__name__)
_FENCE_PATTERN = re.compile(r"^```(\w*)\s*$", re.MULTILINE)
def parseResponse(rawContent: str) -> List[ResponseSegment]:
"""Parse an AI response into typed segments."""
if not rawContent or not rawContent.strip():
return []
segments = []
lines = rawContent.split("\n")
i = 0
textBuffer = []
while i < len(lines):
line = lines[i]
match = _FENCE_PATTERN.match(line)
if match:
if textBuffer:
_flushTextBuffer(textBuffer, segments)
textBuffer = []
lang = match.group(1).strip()
blockLines, endIdx = _collectBlock(lines, i + 1)
blockContent = "\n".join(blockLines)
if lang == "file_edit":
segment = _parseFileEditBlock(blockContent)
if segment:
segments.append(segment)
else:
segments.append(ResponseSegment(
type=SegmentTypeEnum.CODE_BLOCK,
content=blockContent,
language="text"
))
elif lang == "tool_call":
segment = _parseToolCallBlock(blockContent)
if segment:
segments.append(segment)
else:
segments.append(ResponseSegment(
type=SegmentTypeEnum.CODE_BLOCK,
content=blockContent,
language="text"
))
else:
segments.append(ResponseSegment(
type=SegmentTypeEnum.CODE_BLOCK,
content=blockContent,
language=lang or "text"
))
i = endIdx + 1
else:
textBuffer.append(line)
i += 1
if textBuffer:
_flushTextBuffer(textBuffer, segments)
return segments
def hasToolCalls(segments: List[ResponseSegment]) -> bool:
"""Check if any segments contain tool calls."""
return any(s.type == SegmentTypeEnum.TOOL_CALL for s in segments)
def _collectBlock(lines: List[str], startIdx: int) -> tuple:
"""Collect lines inside a fenced code block until closing ```."""
blockLines = []
idx = startIdx
while idx < len(lines):
if lines[idx].strip() == "```":
return blockLines, idx
blockLines.append(lines[idx])
idx += 1
return blockLines, idx
def _flushTextBuffer(buffer: List[str], segments: List[ResponseSegment]):
"""Flush accumulated text lines into a text segment."""
text = "\n".join(buffer).strip()
buffer.clear()
if text:
segments.append(ResponseSegment(
type=SegmentTypeEnum.TEXT,
content=text
))
def _parseFileEditBlock(blockContent: str) -> Optional[ResponseSegment]:
"""Parse a file_edit block into a ResponseSegment with fileName, oldContent, newContent."""
fields = {"fileName": None, "oldContent": None, "newContent": None}
currentField = None
currentLines = []
for line in blockContent.split("\n"):
stripped = line.strip()
newField = None
for key in ("fileName", "oldContent", "newContent"):
if stripped.startswith(f"{key}:"):
newField = key
break
if newField:
if currentField and currentLines:
fields[currentField] = "\n".join(currentLines)
currentField = newField
value = stripped[len(f"{newField}:"):].strip()
if newField == "fileName":
fields["fileName"] = value if value else None
currentField = None
currentLines = []
else:
currentLines = [value] if value and value != "|" else []
else:
if currentField in ("oldContent", "newContent"):
dedented = line[2:] if line.startswith(" ") else line
currentLines.append(dedented)
if currentField and currentLines:
fields[currentField] = "\n".join(currentLines)
if not fields["fileName"]:
logger.warning("file_edit block missing fileName")
return None
if fields["newContent"] is None:
logger.warning(f"file_edit block for {fields['fileName']} missing newContent")
return None
return ResponseSegment(
type=SegmentTypeEnum.FILE_EDIT,
content=f"Edit: {fields['fileName']}",
fileName=fields["fileName"],
oldContent=fields["oldContent"],
newContent=fields["newContent"]
)
def _parseToolCallBlock(blockContent: str) -> Optional[ResponseSegment]:
"""Parse a tool_call block into a ResponseSegment with toolName and toolArgs."""
toolName = None
toolArgs = {}
for line in blockContent.split("\n"):
stripped = line.strip()
if stripped.startswith("tool:"):
toolName = stripped[len("tool:"):].strip()
elif stripped.startswith("args:"):
argsStr = stripped[len("args:"):].strip()
try:
toolArgs = json.loads(argsStr)
except json.JSONDecodeError:
logger.warning(f"Could not parse tool args as JSON: {argsStr}")
toolArgs = {"raw": argsStr}
if not toolName:
logger.warning("tool_call block missing tool name")
return None
return ResponseSegment(
type=SegmentTypeEnum.TOOL_CALL,
content=f"Tool: {toolName}",
toolName=toolName,
toolArgs=toolArgs
)

View file

@ -1,395 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
CodeEditor Feature Routes.
SSE-based endpoints for Cursor-style AI file editing.
"""
import logging
import json
import asyncio
from typing import Optional, Dict, Any, List
from fastapi import APIRouter, HTTPException, Depends, Body, Path, Query, Request
from fastapi.responses import StreamingResponse
from modules.auth import limiter, getRequestContext, RequestContext
from modules.interfaces import interfaceDbChat, interfaceDbManagement
from modules.interfaces.interfaceAiObjects import AiObjects
from modules.datamodels.datamodelChat import UserInputRequest
from modules.serviceCenter.core.serviceStreaming import get_event_manager
from modules.features.codeeditor import codeEditorProcessor, fileContextManager
from modules.features.codeeditor.datamodelCodeeditor import FileEditProposal, EditStatusEnum
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/api/codeeditor",
tags=["Code Editor Feature"],
responses={404: {"description": "Not found"}}
)
_aiObjects: Optional[AiObjects] = None
async def _getAiObjects() -> AiObjects:
"""Lazy-init singleton for AiObjects."""
global _aiObjects
if _aiObjects is None:
_aiObjects = await AiObjects.create()
return _aiObjects
def _getServiceChat(context: RequestContext, featureInstanceId: str = None):
"""Get chat interface with feature instance context."""
return interfaceDbChat.getInterface(
context.user,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=featureInstanceId
)
def _getDbManagement(context: RequestContext, featureInstanceId: str = None):
"""Get management interface with user context for file access."""
return interfaceDbManagement.getInterface(
context.user,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=featureInstanceId
)
def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
"""Validate user has access to the feature instance. Returns mandateId."""
from modules.interfaces.interfaceDbApp import getRootInterface
rootInterface = getRootInterface()
instance = rootInterface.getFeatureInstance(instanceId)
if not instance:
raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
if not featureAccess or not featureAccess.enabled:
raise HTTPException(status_code=403, detail="Access denied to this feature instance")
return str(instance.mandateId) if instance.mandateId else None
@router.post("/{instanceId}/start/stream")
@limiter.limit("60/minute")
async def streamCodeeditorStart(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: Optional[str] = Query(None, description="Optional workflow ID to continue"),
mode: str = Query("simple", description="Processing mode: 'simple' (single AI call) or 'agent' (multi-step with tools)"),
userInput: UserInputRequest = Body(...),
context: RequestContext = Depends(getRequestContext)
):
"""Start or continue a CodeEditor workflow with SSE streaming. Supports simple and agent mode."""
try:
mandateId = _validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
aiObjects = await _getAiObjects()
eventManager = get_event_manager()
if workflowId:
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
else:
workflow = chatInterface.createWorkflow({
"workflowMode": "CodeEditor",
"status": "running",
"label": userInput.prompt[:80] if userInput.prompt else "CodeEditor Session",
})
workflowId = workflow.get("id") if isinstance(workflow, dict) else workflow.id
queue = eventManager.create_queue(workflowId)
userMessage = {
"role": "user",
"content": userInput.prompt,
"selectedFiles": userInput.listFileId or []
}
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message", "item": userMessage
})
selectedFileIds = userInput.listFileId or []
agentMode = mode.lower() == "agent"
asyncio.create_task(
codeEditorProcessor.processMessage(
workflowId=workflowId,
userPrompt=userInput.prompt,
selectedFileIds=selectedFileIds,
dbManagement=dbManagement,
interfaceAi=aiObjects,
chatInterface=chatInterface,
eventManager=eventManager,
agentMode=agentMode
)
)
async def _eventStream():
streamTimeout = 300
lastActivity = asyncio.get_event_loop().time()
while True:
now = asyncio.get_event_loop().time()
if now - lastActivity > streamTimeout:
yield f"data: {json.dumps({'type': 'error', 'error': 'Stream timeout'})}\n\n"
break
if await request.is_disconnected():
logger.info(f"Client disconnected for workflow {workflowId}")
break
try:
event = await asyncio.wait_for(queue.get(), timeout=1.0)
lastActivity = asyncio.get_event_loop().time()
eventType = event.get("type", "")
if eventType == "chatdata":
yield f"data: {json.dumps(event.get('data', {}))}\n\n"
elif eventType in ("complete", "stopped", "error"):
yield f"data: {json.dumps({'type': eventType, **event.get('data', {})})}\n\n"
break
else:
yield f"data: {json.dumps(event)}\n\n"
except asyncio.TimeoutError:
yield ": keepalive\n\n"
await eventManager.cleanup(workflowId)
return StreamingResponse(
_eventStream(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no"
}
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error in streamCodeeditorStart: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{instanceId}/{workflowId}/stop")
@limiter.limit("120/minute")
async def stopWorkflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="Workflow ID"),
context: RequestContext = Depends(getRequestContext)
):
"""Stop a running CodeEditor workflow."""
try:
_validateInstanceAccess(instanceId, context)
eventManager = get_event_manager()
await eventManager.emit_event(workflowId, "stopped", {
"workflowId": workflowId
}, event_category="workflow", message="Workflow stopped by user")
return {"status": "stopped", "workflowId": workflowId}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error stopping workflow: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{instanceId}/{workflowId}/chatData")
@limiter.limit("120/minute")
def getWorkflowChatData(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="Workflow ID"),
afterTimestamp: Optional[float] = Query(None, description="Unix timestamp for incremental fetch"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get chat data for a workflow (polling fallback)."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
return chatInterface.getUnifiedChatData(workflowId, afterTimestamp)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting chat data: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{instanceId}/workflows")
@limiter.limit("120/minute")
def getWorkflows(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
page: int = Query(1, ge=1),
pageSize: int = Query(20, ge=1, le=100),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""List workflows for this feature instance."""
try:
_validateInstanceAccess(instanceId, context)
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
from modules.datamodels.datamodelPagination import PaginationParams
pagination = PaginationParams(page=page, pageSize=pageSize)
return chatInterface.getWorkflows(pagination=pagination)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting workflows: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{instanceId}/files")
@limiter.limit("120/minute")
def getFiles(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""List all text files accessible to the user."""
try:
_validateInstanceAccess(instanceId, context)
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
textFiles = fileContextManager.listTextFiles(dbManagement)
return {
"files": [f.model_dump(exclude={"content"}) for f in textFiles],
"count": len(textFiles)
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error listing files: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.get("/{instanceId}/files/{fileId}/content")
@limiter.limit("120/minute")
def getFileContent(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
fileId: str = Path(..., description="File ID"),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Get the text content of a file."""
try:
_validateInstanceAccess(instanceId, context)
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
fileItem = dbManagement.getFile(fileId)
if not fileItem:
raise HTTPException(status_code=404, detail=f"File {fileId} not found")
fileData = dbManagement.getFileData(fileId)
if not fileData:
raise HTTPException(status_code=404, detail=f"No data for file {fileId}")
try:
content = fileData.decode("utf-8")
except UnicodeDecodeError:
raise HTTPException(status_code=400, detail="File is not valid UTF-8 text")
return {
"fileId": fileId,
"fileName": fileItem.fileName,
"mimeType": fileItem.mimeType,
"content": content
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting file content: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@router.post("/{instanceId}/{workflowId}/apply")
@limiter.limit("60/minute")
async def applyEdit(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="Workflow ID"),
proposalData: Dict[str, Any] = Body(...),
context: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""Accept a file edit proposal. Updates existing file or creates new one."""
try:
_validateInstanceAccess(instanceId, context)
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
fileId = proposalData.get("fileId", "")
newContent = proposalData.get("newContent")
fileName = proposalData.get("fileName", "")
if newContent is None:
raise HTTPException(status_code=400, detail="newContent is required")
contentBytes = newContent.encode("utf-8")
isNewFile = not fileId or fileId.startswith("unknown-")
if isNewFile:
mimeType = _guessMimeType(fileName)
fileItem = dbManagement.createFile(fileName, mimeType, contentBytes)
resultFileId = fileItem.id
resultFileName = fileItem.fileName
else:
fileItem = dbManagement.getFile(fileId)
if not fileItem:
raise HTTPException(status_code=404, detail=f"File {fileId} not found")
success = dbManagement.createFileData(fileId, contentBytes)
if not success:
raise HTTPException(status_code=500, detail="Failed to store updated file content")
resultFileId = fileId
resultFileName = fileName or fileItem.fileName
eventManager = get_event_manager()
await eventManager.emit_event(workflowId, "chatdata", {
"type": "file_version",
"item": {
"fileId": resultFileId,
"fileName": resultFileName,
"status": "accepted",
"isNew": isNewFile
}
})
return {
"status": "accepted",
"fileId": resultFileId,
"fileName": resultFileName,
"isNew": isNewFile
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error applying edit: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
_MIME_MAP = {
".md": "text/markdown", ".txt": "text/plain", ".json": "application/json",
".yaml": "application/yaml", ".yml": "application/yaml", ".xml": "application/xml",
".csv": "text/csv", ".py": "text/x-python", ".js": "text/javascript",
".ts": "text/x-typescript", ".html": "text/html", ".css": "text/css",
".sql": "text/x-sql", ".sh": "text/x-shellscript",
}
def _guessMimeType(fileName: str) -> str:
"""Guess MIME type from file extension."""
if not fileName or "." not in fileName:
return "text/plain"
ext = "." + fileName.rsplit(".", 1)[-1].lower()
return _MIME_MAP.get(ext, "text/plain")

View file

@ -1,157 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Tool registry and dispatcher for the CodeEditor agent loop.
Defines available tools and executes them against the file context manager."""
import logging
import time
import fnmatch
from typing import Dict, Any, List
from modules.features.codeeditor.datamodelCodeeditor import ToolResult
logger = logging.getLogger(__name__)
TOOL_DEFINITIONS = [
{
"name": "read_file",
"description": "Read the full content of a single file by its fileId.",
"parameters": {"fileId": "string (required)"}
},
{
"name": "list_files",
"description": "List all available text files with metadata (name, size, mimeType). Optionally filter by glob pattern.",
"parameters": {"filter": "string (optional, glob pattern e.g. '*.py')"}
},
{
"name": "search_files",
"description": "Search all file contents for a text query. Returns matching lines with file name and line number.",
"parameters": {"query": "string (required)", "fileType": "string (optional, extension e.g. 'py')"}
},
]
async def dispatch(toolName: str, toolArgs: Dict[str, Any], dbManagement) -> ToolResult:
"""Execute a tool and return the result."""
startTime = time.time()
try:
if toolName == "read_file":
result = await _toolReadFile(toolArgs, dbManagement)
elif toolName == "list_files":
result = _toolListFiles(toolArgs, dbManagement)
elif toolName == "search_files":
result = await _toolSearchFiles(toolArgs, dbManagement)
else:
result = f"Unknown tool: {toolName}"
return ToolResult(toolName=toolName, result=result, success=False,
executionTime=time.time() - startTime)
return ToolResult(toolName=toolName, result=result, success=True,
executionTime=time.time() - startTime)
except Exception as e:
logger.error(f"Tool {toolName} failed: {e}", exc_info=True)
return ToolResult(toolName=toolName, result=f"Error: {str(e)}", success=False,
executionTime=time.time() - startTime)
async def _toolReadFile(args: Dict[str, Any], dbManagement) -> str:
"""Read a single file's content."""
fileId = args.get("fileId", "")
if not fileId:
return "Error: fileId is required"
fileItem = dbManagement.getFile(fileId)
if not fileItem:
return f"Error: File {fileId} not found"
fileData = dbManagement.getFileData(fileId)
if not fileData:
return f"Error: No data for file {fileId}"
try:
content = fileData.decode("utf-8")
except UnicodeDecodeError:
return f"Error: File {fileItem.fileName} is not valid UTF-8"
lines = content.split("\n")
numbered = "\n".join([f"{i + 1}|{line}" for i, line in enumerate(lines)])
return f"--- FILE: {fileItem.fileName} (id: {fileId}) ---\n{numbered}\n--- END FILE ---"
def _toolListFiles(args: Dict[str, Any], dbManagement) -> str:
"""List all text files, optionally filtered by glob pattern."""
from modules.features.codeeditor.datamodelCodeeditor import isTextFile
filterPattern = args.get("filter", "")
allFiles = dbManagement.getAllFiles()
if not allFiles:
return "No files found."
lines = []
for f in allFiles:
if not isTextFile(f.mimeType, f.fileName):
continue
if filterPattern and not fnmatch.fnmatch(f.fileName, filterPattern):
continue
lines.append(f"- {f.fileName} (id: {f.id}, size: {f.fileSize}B, type: {f.mimeType})")
if not lines:
return "No matching text files found."
return f"Available files ({len(lines)}):\n" + "\n".join(lines)
async def _toolSearchFiles(args: Dict[str, Any], dbManagement) -> str:
"""Search file contents for a query string."""
from modules.features.codeeditor.datamodelCodeeditor import isTextFile
query = args.get("query", "")
if not query:
return "Error: query is required"
fileType = args.get("fileType", "")
allFiles = dbManagement.getAllFiles()
if not allFiles:
return "No files to search."
hits = []
maxHits = 50
queryLower = query.lower()
for f in allFiles:
if not isTextFile(f.mimeType, f.fileName):
continue
if fileType and not f.fileName.endswith(f".{fileType}"):
continue
fileData = dbManagement.getFileData(f.id)
if not fileData:
continue
try:
content = fileData.decode("utf-8")
except UnicodeDecodeError:
continue
for lineNum, line in enumerate(content.split("\n"), 1):
if queryLower in line.lower():
hits.append(f"{f.fileName}:{lineNum}: {line.strip()}")
if len(hits) >= maxHits:
break
if len(hits) >= maxHits:
break
if not hits:
return f"No matches found for '{query}'."
result = f"Search results for '{query}' ({len(hits)} matches):\n" + "\n".join(hits)
if len(hits) >= maxHits:
result += f"\n... (truncated at {maxHits} matches)"
return result
def formatToolDefinitions() -> str:
"""Format tool definitions for inclusion in the system prompt."""
parts = []
for tool in TOOL_DEFINITIONS:
params = ", ".join([f"{k}: {v}" for k, v in tool["parameters"].items()])
parts.append(f"- **{tool['name']}**: {tool['description']}\n Parameters: {{{params}}}")
return "\n".join(parts)

View file

@ -1,3 +1,3 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Unified AI Workspace feature -- merges Codeeditor, Chatbot, and Playground."""
"""Unified AI Workspace feature."""

View file

@ -3,7 +3,7 @@
"""
Workspace Feature Container - Main Module.
Handles feature initialization and RBAC catalog registration.
Unified AI Workspace combining Codeeditor, Chatbot, and Playground capabilities.
Unified AI Workspace feature.
"""
import logging
@ -21,6 +21,11 @@ UI_OBJECTS = [
"label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
"meta": {"area": "dashboard"}
},
{
"objectKey": "ui.feature.workspace.editor",
"label": {"en": "Editor", "de": "Editor", "fr": "Editeur"},
"meta": {"area": "editor"}
},
{
"objectKey": "ui.feature.workspace.settings",
"label": {"en": "Settings", "de": "Einstellungen", "fr": "Parametres"},
@ -59,6 +64,11 @@ RESOURCE_OBJECTS = [
"label": {"en": "Voice Input/Output", "de": "Spracheingabe/-ausgabe", "fr": "Entree/sortie vocale"},
"meta": {"endpoint": "/api/workspace/{instanceId}/voice/*", "method": "POST"}
},
{
"objectKey": "resource.feature.workspace.edits",
"label": {"en": "Review File Edits", "de": "Datei-Aenderungen pruefen", "fr": "Verifier les modifications de fichiers"},
"meta": {"endpoint": "/api/workspace/{instanceId}/edit/*", "method": "POST"}
},
]
TEMPLATE_ROLES = [
@ -71,6 +81,7 @@ TEMPLATE_ROLES = [
},
"accessRules": [
{"context": "UI", "item": "ui.feature.workspace.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.workspace.editor", "view": True},
{"context": "UI", "item": "ui.feature.workspace.settings", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
]
@ -84,6 +95,7 @@ TEMPLATE_ROLES = [
},
"accessRules": [
{"context": "UI", "item": "ui.feature.workspace.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.workspace.editor", "view": True},
{"context": "UI", "item": "ui.feature.workspace.settings", "view": True},
{"context": "RESOURCE", "item": "resource.feature.workspace.start", "view": True},
{"context": "RESOURCE", "item": "resource.feature.workspace.stop", "view": True},
@ -91,6 +103,7 @@ TEMPLATE_ROLES = [
{"context": "RESOURCE", "item": "resource.feature.workspace.folders", "view": True},
{"context": "RESOURCE", "item": "resource.feature.workspace.datasources", "view": True},
{"context": "RESOURCE", "item": "resource.feature.workspace.voice", "view": True},
{"context": "RESOURCE", "item": "resource.feature.workspace.edits", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
]
},

View file

@ -2,8 +2,7 @@
# All rights reserved.
"""Unified AI Workspace routes.
SSE-based endpoints that combine the capabilities of Codeeditor, Chatbot,
and Playground into a single agent-driven workspace.
SSE-based endpoints for the agent-driven AI Workspace.
"""
import logging
@ -19,7 +18,7 @@ from modules.auth import limiter, getRequestContext, RequestContext
from modules.interfaces import interfaceDbChat, interfaceDbManagement
from modules.interfaces.interfaceAiObjects import AiObjects
from modules.serviceCenter.core.serviceStreaming import get_event_manager
from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentEventTypeEnum
from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentEventTypeEnum, PendingFileEdit
logger = logging.getLogger(__name__)
@ -32,6 +31,41 @@ router = APIRouter(
_aiObjects: Optional[AiObjects] = None
class _InstanceEdits:
"""Pending file edits for a single workspace instance."""
def __init__(self):
self._edits: Dict[str, PendingFileEdit] = {}
def add(self, edit: PendingFileEdit) -> None:
self._edits[edit.id] = edit
def get(self, editId: str) -> Optional[PendingFileEdit]:
return self._edits.get(editId)
def getPending(self) -> List[PendingFileEdit]:
return [e for e in self._edits.values() if e.status == "pending"]
def items(self):
return self._edits.items()
class _PendingEditsStore:
"""Global store for pending file edits across all workspace instances."""
def __init__(self):
self._instances: Dict[str, _InstanceEdits] = {}
def forInstance(self, instanceId: str) -> _InstanceEdits:
"""Get-or-create the edit collection for a workspace instance."""
if instanceId not in self._instances:
self._instances[instanceId] = _InstanceEdits()
return self._instances[instanceId]
_pendingEditsStore = _PendingEditsStore()
class WorkspaceInputRequest(BaseModel):
"""Prompt input for the unified workspace."""
prompt: str = Field(description="User prompt text")
@ -41,6 +75,7 @@ class WorkspaceInputRequest(BaseModel):
voiceMode: bool = Field(default=False, description="Enable voice response")
workflowId: Optional[str] = Field(default=None, description="Continue existing workflow")
userLanguage: str = Field(default="en", description="User language code")
allowedProviders: List[str] = Field(default_factory=list, description="Restrict AI to these providers")
async def _getAiObjects() -> AiObjects:
@ -264,6 +299,7 @@ async def streamWorkspaceStart(
eventManager=eventManager,
userLanguage=userInput.userLanguage,
instanceConfig=instanceConfig,
allowedProviders=userInput.allowedProviders,
)
)
eventManager.register_agent_task(queueId, agentTask)
@ -317,6 +353,7 @@ async def _runWorkspaceAgent(
eventManager,
userLanguage: str = "en",
instanceConfig: Dict[str, Any] = None,
allowedProviders: List[str] = None,
):
"""Run the serviceAgent loop and forward events to the SSE queue."""
try:
@ -332,6 +369,9 @@ async def _runWorkspaceAgent(
chatService = getService("chat", ctx)
aiService = getService("ai", ctx)
if allowedProviders:
aiService.services.allowedProviders = allowedProviders
wfRecord = chatInterface.getWorkflow(workflowId) if workflowId else None
wfName = ""
if wfRecord:
@ -383,6 +423,35 @@ async def _runWorkspaceAgent(
if event.type == AgentEventTypeEnum.CHUNK and event.content:
accumulatedText += event.content
if event.type == AgentEventTypeEnum.FILE_EDIT_PROPOSAL and event.data:
editData = event.data
editId = editData.get("id", "")
if editId:
pendingEdit = PendingFileEdit(
id=editId,
fileId=editData.get("fileId", ""),
fileName=editData.get("fileName", ""),
mimeType=editData.get("mimeType", ""),
oldContent=editData.get("oldContent", ""),
newContent=editData.get("newContent", ""),
workflowId=workflowId,
)
_pendingEditsStore.forInstance(instanceId).add(pendingEdit)
logger.info(f"Stored pending edit {editId} for file '{pendingEdit.fileName}' in instance {instanceId}")
await eventManager.emit_event(queueId, "fileEditProposal", {
"type": "fileEditProposal",
"workflowId": workflowId,
"item": {
"id": editId,
"fileId": editData.get("fileId", ""),
"fileName": editData.get("fileName", ""),
"mimeType": editData.get("mimeType", ""),
"oldSize": len(editData.get("oldContent", "")),
"newSize": len(editData.get("newContent", "")),
},
})
continue
sseEvent = {
"type": event.type.value if hasattr(event.type, "value") else event.type,
"workflowId": workflowId,
@ -1103,3 +1172,137 @@ async def testVoice(
except Exception as e:
logger.error(f"Voice test failed: {e}")
raise HTTPException(status_code=500, detail=f"TTS test failed: {str(e)}")
# =============================================================================
# FILE EDIT PROPOSAL ENDPOINTS
# =============================================================================
@router.get("/{instanceId}/pending-edits")
@limiter.limit("30/minute")
async def getPendingEdits(
request: Request,
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Return all pending file edit proposals for this workspace instance."""
_validateInstanceAccess(instanceId, context)
editList = [e.model_dump() for e in _pendingEditsStore.forInstance(instanceId).getPending()]
return JSONResponse({"edits": editList})
@router.post("/{instanceId}/edit/{editId}/accept")
@limiter.limit("30/minute")
async def acceptEdit(
request: Request,
instanceId: str = Path(...),
editId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Accept a proposed file edit -- applies the new content to the file."""
_validateInstanceAccess(instanceId, context)
edit = _pendingEditsStore.forInstance(instanceId).get(editId)
if not edit:
raise HTTPException(status_code=404, detail=f"Edit proposal {editId} not found")
if edit.status != "pending":
raise HTTPException(status_code=409, detail=f"Edit proposal is already {edit.status}")
dbMgmt = _getDbManagement(context, instanceId)
try:
success = dbMgmt.updateFileData(edit.fileId, edit.newContent.encode("utf-8"))
if not success:
raise HTTPException(status_code=500, detail="Failed to update file data")
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to apply edit {editId}: {e}")
raise HTTPException(status_code=500, detail=f"Failed to apply edit: {str(e)}")
edit.status = "accepted"
logger.info(f"Edit {editId} accepted for file '{edit.fileName}' in instance {instanceId}")
return JSONResponse({
"success": True,
"editId": editId,
"fileId": edit.fileId,
"fileName": edit.fileName,
})
@router.post("/{instanceId}/edit/{editId}/reject")
@limiter.limit("30/minute")
async def rejectEdit(
request: Request,
instanceId: str = Path(...),
editId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Reject a proposed file edit -- discards the change."""
_validateInstanceAccess(instanceId, context)
edit = _pendingEditsStore.forInstance(instanceId).get(editId)
if not edit:
raise HTTPException(status_code=404, detail=f"Edit proposal {editId} not found")
if edit.status != "pending":
raise HTTPException(status_code=409, detail=f"Edit proposal is already {edit.status}")
edit.status = "rejected"
logger.info(f"Edit {editId} rejected for file '{edit.fileName}' in instance {instanceId}")
return JSONResponse({
"success": True,
"editId": editId,
"fileId": edit.fileId,
"fileName": edit.fileName,
})
@router.post("/{instanceId}/edit/accept-all")
@limiter.limit("10/minute")
async def acceptAllEdits(
request: Request,
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Accept all pending file edit proposals for this instance."""
_validateInstanceAccess(instanceId, context)
instanceEdits = _pendingEditsStore.forInstance(instanceId)
dbMgmt = _getDbManagement(context, instanceId)
accepted = []
errors = []
for editId, edit in instanceEdits.items():
if edit.status != "pending":
continue
try:
success = dbMgmt.updateFileData(edit.fileId, edit.newContent.encode("utf-8"))
if success:
edit.status = "accepted"
accepted.append(editId)
else:
errors.append({"editId": editId, "error": "updateFileData returned False"})
except Exception as e:
errors.append({"editId": editId, "error": str(e)})
logger.info(f"Accepted {len(accepted)} edits for instance {instanceId}, {len(errors)} errors")
return JSONResponse({"accepted": accepted, "errors": errors})
@router.post("/{instanceId}/edit/reject-all")
@limiter.limit("10/minute")
async def rejectAllEdits(
request: Request,
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
"""Reject all pending file edit proposals for this instance."""
_validateInstanceAccess(instanceId, context)
instanceEdits = _pendingEditsStore.forInstance(instanceId)
rejected = []
for editId, edit in instanceEdits.items():
if edit.status != "pending":
continue
edit.status = "rejected"
rejected.append(editId)
logger.info(f"Rejected {len(rejected)} edits for instance {instanceId}")
return JSONResponse({"rejected": rejected})

View file

@ -12,6 +12,7 @@ logger = logging.getLogger(__name__)
from modules.aicore.aicoreModelRegistry import modelRegistry
from modules.aicore.aicoreModelSelector import modelSelector
from modules.aicore.aicoreBase import RateLimitExceededException
from modules.datamodels.datamodelAi import (
AiModel,
AiCallOptions,
@ -97,15 +98,18 @@ class AiObjects:
# Get failover models for this operation type
availableModels = modelRegistry.getAvailableModels()
# Filter by allowedProviders if specified (from workflow config)
allowedProviders = getattr(options, 'allowedProviders', None) if options else None
if allowedProviders:
filteredModels = [m for m in availableModels if m.connectorType in allowedProviders]
if filteredModels:
logger.info(f"Filtered models by allowedProviders {allowedProviders}: {len(filteredModels)} models (from {len(availableModels)})")
availableModels = filteredModels
else:
logger.warning(f"No models match allowedProviders {allowedProviders}, using all {len(availableModels)} available models")
errorMsg = f"No models match allowedProviders {allowedProviders} for operation {options.operationType}"
logger.error(errorMsg)
return AiCallResponse(
content=errorMsg, modelName="error", priceCHF=0.0,
processingTime=0.0, bytesSent=0, bytesReceived=0, errorCount=1,
)
failoverModelList = modelSelector.getFailoverModelList(prompt, context, options, availableModels)
@ -122,7 +126,8 @@ class AiObjects:
errorCount=1
)
# Try each model in failover sequence
_MAX_SHORT_RETRY = 15.0
lastError = None
for attempt, model in enumerate(failoverModelList):
try:
@ -136,6 +141,31 @@ class AiObjects:
logger.info(f"AI call successful with model: {model.name}")
return response
except RateLimitExceededException as rle:
retryAfter = rle.retryAfterSeconds
lastError = rle
if 0 < retryAfter <= _MAX_SHORT_RETRY:
logger.info(f"Rate limit on {model.name}, waiting {retryAfter:.1f}s before retry")
await asyncio.sleep(retryAfter + 0.5)
try:
if request.messages:
response = await self._callWithMessages(model, request.messages, options, request.tools)
else:
response = await self._callWithModel(model, prompt, context, options)
logger.info(f"AI call successful with {model.name} after rate-limit retry")
return response
except Exception as retryErr:
lastError = retryErr
logger.warning(f"Retry after rate-limit wait also failed for {model.name}: {retryErr}")
else:
logger.warning(f"Rate limit on {model.name} (retryAfter={retryAfter:.1f}s), failing over")
cooldown = max(retryAfter, 10.0) if retryAfter > 0 else 0.0
modelSelector.reportFailure(model.name, cooldownSeconds=cooldown)
if attempt < len(failoverModelList) - 1:
continue
logger.error(f"All {len(failoverModelList)} models failed for operation {options.operationType}")
break
except Exception as e:
lastError = e
logger.warning(f"AI call failed with model {model.name}: {str(e)}")
@ -323,6 +353,13 @@ class AiObjects:
filtered = [m for m in availableModels if m.connectorType in allowedProviders]
if filtered:
availableModels = filtered
else:
yield AiCallResponse(
content=f"No models match allowedProviders {allowedProviders} for operation {options.operationType}",
modelName="error", priceCHF=0.0, processingTime=0.0,
bytesSent=0, bytesReceived=0, errorCount=1,
)
return
failoverModelList = modelSelector.getFailoverModelList(
request.prompt, request.context or "", options, availableModels
@ -335,6 +372,8 @@ class AiObjects:
)
return
_MAX_SHORT_RETRY = 15.0
lastError = None
for attempt, model in enumerate(failoverModelList):
try:
@ -342,6 +381,28 @@ class AiObjects:
async for chunk in self._callWithMessagesStream(model, request.messages, options, request.tools):
yield chunk
return
except RateLimitExceededException as rle:
retryAfter = rle.retryAfterSeconds
lastError = rle
if 0 < retryAfter <= _MAX_SHORT_RETRY:
logger.info(f"Rate limit on {model.name}, waiting {retryAfter:.1f}s before retry")
await asyncio.sleep(retryAfter + 0.5)
try:
async for chunk in self._callWithMessagesStream(model, request.messages, options, request.tools):
yield chunk
return
except Exception as retryErr:
lastError = retryErr
logger.warning(f"Retry after rate-limit wait also failed for {model.name}: {retryErr}")
else:
logger.warning(f"Rate limit on {model.name} (retryAfter={retryAfter:.1f}s), failing over")
cooldown = max(retryAfter, 10.0) if retryAfter > 0 else 0.0
modelSelector.reportFailure(model.name, cooldownSeconds=cooldown)
if attempt < len(failoverModelList) - 1:
continue
break
except Exception as e:
lastError = e
logger.warning(f"Streaming AI call failed with {model.name}: {e}")
@ -421,12 +482,18 @@ class AiObjects:
async def callEmbedding(self, texts: List[str], options: AiCallOptions = None) -> AiCallResponse:
"""Generate embeddings for a list of texts using the best available embedding model.
Uses the standard model selector with OperationTypeEnum.EMBEDDING to pick the model.
Failover across providers (OpenAI Mistral) works identically to chat models.
Token-aware batching: splits the texts list into batches that respect the
model's contextLength (with 10% safety margin). Each batch is sent as a
separate API call; the resulting embeddings are merged in order.
Failover across providers (OpenAI -> Mistral) works identically to chat models,
but ContextLengthExceededException is NOT retried via failover (same limits).
Returns:
AiCallResponse with metadata["embeddings"] containing the vectors.
"""
from modules.aicore.aicoreBase import ContextLengthExceededException as _CtxExc
if options is None:
options = AiCallOptions(operationType=OperationTypeEnum.EMBEDDING)
else:
@ -434,6 +501,15 @@ class AiObjects:
combinedText = " ".join(texts[:3])[:500]
availableModels = modelRegistry.getAvailableModels()
allowedProviders = getattr(options, 'allowedProviders', None) if options else None
if allowedProviders:
filtered = [m for m in availableModels if m.connectorType in allowedProviders]
if filtered:
availableModels = filtered
else:
logger.warning(f"No embedding models match allowedProviders {allowedProviders}")
failoverModelList = modelSelector.getFailoverModelList(
combinedText, "", options, availableModels
)
@ -451,23 +527,39 @@ class AiObjects:
inputBytes = sum(len(t.encode("utf-8")) for t in texts)
startTime = time.time()
modelCall = AiModelCall(
model=model, options=options, embeddingInput=texts
batches = _buildEmbeddingBatches(texts, model.contextLength)
logger.info(
f"Embedding: {len(texts)} texts -> {len(batches)} batch(es), "
f"model contextLength={model.contextLength}"
)
modelResponse = await model.functionCall(modelCall)
if not modelResponse.success:
raise ValueError(f"Embedding call failed: {modelResponse.error}")
allEmbeddings: List[List[float]] = []
totalPriceCHF = 0.0
for batchIdx, batch in enumerate(batches):
modelCall = AiModelCall(
model=model, options=options, embeddingInput=batch
)
modelResponse = await model.functionCall(modelCall)
if not modelResponse.success:
raise ValueError(f"Embedding batch {batchIdx + 1} failed: {modelResponse.error}")
batchEmbeddings = (modelResponse.metadata or {}).get("embeddings", [])
allEmbeddings.extend(batchEmbeddings)
batchBytes = sum(len(t.encode("utf-8")) for t in batch)
totalPriceCHF += model.calculatepriceCHF(0, batchBytes, 0)
processingTime = time.time() - startTime
priceCHF = model.calculatepriceCHF(processingTime, inputBytes, 0)
embeddings = (modelResponse.metadata or {}).get("embeddings", [])
if totalPriceCHF == 0.0:
totalPriceCHF = model.calculatepriceCHF(processingTime, inputBytes, 0)
response = AiCallResponse(
content="", modelName=model.name, provider=model.connectorType,
priceCHF=priceCHF, processingTime=processingTime,
priceCHF=totalPriceCHF, processingTime=processingTime,
bytesSent=inputBytes, bytesReceived=0, errorCount=0,
metadata={"embeddings": embeddings}
metadata={"embeddings": allEmbeddings}
)
if self.billingCallback:
@ -478,6 +570,23 @@ class AiObjects:
return response
except _CtxExc as e:
logger.error(f"ContextLengthExceeded for {model.name} despite batching aborting failover: {e}")
return AiCallResponse(
content=str(e), modelName=model.name, priceCHF=0.0,
processingTime=0.0, bytesSent=0, bytesReceived=0, errorCount=1
)
except RateLimitExceededException as rle:
retryAfter = rle.retryAfterSeconds
lastError = rle
cooldown = max(retryAfter, 10.0) if retryAfter > 0 else 0.0
logger.warning(f"Rate limit on {model.name} during embedding (retryAfter={retryAfter:.1f}s)")
modelSelector.reportFailure(model.name, cooldownSeconds=cooldown)
if attempt < len(failoverModelList) - 1:
continue
break
except Exception as e:
lastError = e
logger.warning(f"Embedding call failed with {model.name}: {str(e)}")
@ -514,4 +623,50 @@ class AiObjects:
return [model.displayName for model in models]
# =============================================================================
# Internal helpers
# =============================================================================
_CHARS_PER_TOKEN = 4
_SAFETY_MARGIN = 0.90
def _estimateTokens(text: str) -> int:
"""Rough token estimate: 1 token ~ 4 characters."""
return max(1, len(text) // _CHARS_PER_TOKEN)
def _buildEmbeddingBatches(texts: List[str], contextLength: int) -> List[List[str]]:
"""Split a list of texts into batches whose total estimated token count
stays within the model's contextLength (with safety margin).
Each individual text is assumed to already be within limits (enforced by
the chunking layer). If a single text exceeds the budget, it is placed
in its own batch as a last resort.
"""
if not texts:
return []
if contextLength <= 0:
return [texts]
maxTokensPerBatch = int(contextLength * _SAFETY_MARGIN)
batches: List[List[str]] = []
currentBatch: List[str] = []
currentTokens = 0
for text in texts:
textTokens = _estimateTokens(text)
if currentBatch and (currentTokens + textTokens) > maxTokensPerBatch:
batches.append(currentBatch)
currentBatch = []
currentTokens = 0
currentBatch.append(text)
currentTokens += textTokens
if currentBatch:
batches.append(currentBatch)
return batches

View file

@ -96,6 +96,9 @@ def initBootstrap(db: DatabaseConnector) -> None:
if mandateId:
initRootMandateFeatures(db, mandateId)
# Remove feature instances for features that no longer exist in the codebase
_cleanupRemovedFeatureInstances(db)
# Initialize billing settings for root mandate
if mandateId:
initRootMandateBilling(mandateId)
@ -257,6 +260,33 @@ def initRootMandateFeatures(db: DatabaseConnector, mandateId: str) -> None:
logger.info("Root mandate features initialization completed")
def _cleanupRemovedFeatureInstances(db: DatabaseConnector) -> None:
"""Remove feature instances whose featureCode no longer exists in the codebase."""
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.system.registry import loadFeatureMainModules
mainModules = loadFeatureMainModules()
activeCodes = set()
for featureName, module in mainModules.items():
if hasattr(module, "getFeatureDefinition"):
try:
featureDef = module.getFeatureDefinition()
activeCodes.add(featureDef.get("code", featureName))
except Exception:
pass
allInstances = db.getRecordset(FeatureInstance)
for inst in allInstances:
code = inst.get("featureCode") if isinstance(inst, dict) else getattr(inst, "featureCode", None)
instId = inst.get("id") if isinstance(inst, dict) else getattr(inst, "id", None)
if code and code not in activeCodes:
try:
db.recordDelete(FeatureInstance, str(instId))
logger.info(f"Removed orphaned feature instance '{instId}' (featureCode='{code}')")
except Exception as e:
logger.warning(f"Could not remove orphaned feature instance '{instId}': {e}")
def initRootMandate(db: DatabaseConnector) -> Optional[str]:
"""
Creates the Root mandate if it doesn't exist.
@ -443,7 +473,7 @@ def initRoles(db: DatabaseConnector) -> None:
# Check specifically for system template roles:
# mandateId=NULL, isSystemRole=True, featureCode=NULL
# Feature templates (e.g. chatplayground admin) share the same labels but have featureCode set!
# Feature templates (e.g. automation admin) share the same labels but have featureCode set!
allTemplates = db.getRecordset(
Role,
recordFilter={"mandateId": None, "isSystemRole": True}
@ -475,7 +505,7 @@ def _deduplicateRoles(db: DatabaseConnector) -> None:
# Group by (roleLabel, mandateId, featureInstanceId, featureCode)
# featureCode is essential: system template ('admin', None, None, None)
# must NOT be grouped with feature template ('admin', None, None, 'chatplayground')
# must NOT be grouped with feature template ('admin', None, None, 'automation')
groups: dict = {}
for role in allRoles:
key = (role.get("roleLabel"), role.get("mandateId"), role.get("featureInstanceId"), role.get("featureCode"))
@ -1931,8 +1961,6 @@ def _createStoreResourceRules(db: DatabaseConnector) -> None:
"""
storeResources = [
"resource.store.automation",
"resource.store.chatplayground",
"resource.store.codeeditor",
"resource.store.teamsbot",
]

View file

@ -853,7 +853,9 @@ class ComponentObjects:
"svg": "image/svg+xml",
"py": "text/x-python",
"js": "application/javascript",
"css": "text/css"
"css": "text/css",
"eml": "message/rfc822",
"msg": "application/vnd.ms-outlook",
}
return extensionToMime.get(ext.lower(), "application/octet-stream")

View file

@ -102,12 +102,6 @@ def _getFeatureUiObjects(featureCode: str) -> List[Dict[str, Any]]:
elif featureCode == "realestate":
from modules.features.realEstate.mainRealEstate import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "chatplayground":
from modules.features.chatplayground.mainChatplayground import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "codeeditor":
from modules.features.codeeditor.mainCodeeditor import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "automation":
from modules.features.automation.mainAutomation import UI_OBJECTS
return UI_OBJECTS
@ -127,7 +121,7 @@ def _getFeatureUiObjects(featureCode: str) -> List[Dict[str, Any]]:
from modules.features.workspace.mainWorkspace import UI_OBJECTS
return UI_OBJECTS
else:
logger.warning(f"Unknown feature code: {featureCode}")
logger.debug(f"Skipping removed feature code: {featureCode}")
return []
except ImportError as e:
logger.error(f"Failed to import UI_OBJECTS for feature {featureCode}: {e}")

View file

@ -41,7 +41,7 @@ class ActionToolAdapter:
if not actionDef or not getattr(actionDef, "dynamicMode", False):
continue
compoundName = f"{shortName}.{actionName}"
compoundName = f"{shortName}_{actionName}"
toolDef = _buildToolDefinition(compoundName, actionDef, actionInfo)
handler = _createDispatchHandler(self._actionExecutor, shortName, actionName)
@ -120,16 +120,16 @@ def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
data = _formatActionResult(result)
return ToolResult(
toolCallId="",
toolName=f"{methodName}.{actionName}",
toolName=f"{methodName}_{actionName}",
success=result.success,
data=data,
error=result.error
)
except Exception as e:
logger.error(f"ActionToolAdapter dispatch failed for {methodName}.{actionName}: {e}")
logger.error(f"ActionToolAdapter dispatch failed for {methodName}_{actionName}: {e}")
return ToolResult(
toolCallId="",
toolName=f"{methodName}.{actionName}",
toolName=f"{methodName}_{actionName}",
success=False,
error=str(e)
)

View file

@ -26,6 +26,10 @@ class AgentEventTypeEnum(str, Enum):
AGENT_PROGRESS = "agentProgress"
AGENT_SUMMARY = "agentSummary"
FILE_CREATED = "fileCreated"
FILE_UPDATED = "fileUpdated"
FILE_EDIT_PROPOSAL = "fileEditProposal"
FILE_VERSION = "fileVersion"
FILE_EDIT_REJECTED = "fileEditRejected"
DATA_SOURCE_ACCESS = "dataSourceAccess"
VOICE_RESPONSE = "voiceResponse"
FINAL = "final"
@ -50,7 +54,7 @@ class ToolDefinition(BaseModel):
)
toolSet: Optional[str] = Field(
default=None,
description="Tool-set scope (None = available to all sets, e.g. 'core', 'codeeditor')"
description="Tool-set scope (None = available to all sets, e.g. 'core', 'workspace')"
)
@ -133,3 +137,16 @@ class AgentTrace(BaseModel):
totalCostCHF: float = 0.0
abortReason: Optional[str] = None
rounds: List[AgentRoundLog] = Field(default_factory=list)
class PendingFileEdit(BaseModel):
"""A proposed file edit awaiting user approval."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
fileId: str
fileName: str
mimeType: str = ""
oldContent: str = ""
newContent: str = ""
status: str = Field(default="pending", description="pending | accepted | rejected")
toolCallId: str = ""
workflowId: str = ""

View file

@ -160,7 +160,8 @@ class AgentService:
):
if event.type == AgentEventTypeEnum.AGENT_SUMMARY:
await self._persistTrace(workflowId, event.data or {})
logger.debug(f"runAgent yielding event type={event.type}")
if event.type != AgentEventTypeEnum.CHUNK:
logger.debug(f"runAgent yielding event type={event.type}")
yield event
logger.info(f"runAgent loop completed for workflow {workflowId}")
@ -366,6 +367,7 @@ def _getOrCreateTempFolder(chatService) -> Optional[str]:
def _registerCoreTools(registry: ToolRegistry, services):
"""Register built-in core tools: file operations, search, and folder management."""
import uuid as _uuid
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
# ---- Read-only tools ----
@ -411,7 +413,8 @@ def _registerCoreTools(registry: ToolRegistry, services):
_BINARY_TYPES = ("application/pdf", "image/", "application/vnd.", "application/zip",
"application/x-zip", "application/x-tar", "application/x-7z",
"application/msword", "application/octet-stream")
"application/msword", "application/octet-stream",
"message/rfc822")
isBinary = any(mimeType.startswith(t) for t in _BINARY_TYPES)
rawBytes = chatService.getFileData(fileId)
@ -1013,16 +1016,28 @@ def _registerCoreTools(registry: ToolRegistry, services):
toolCallId="", toolName="editFile", success=False,
error=f"Cannot edit binary file ({file.mimeType}). Only text-based files are supported."
)
contentBytes = content.encode("utf-8")
success = dbMgmt.updateFileData(fileId, contentBytes)
if not success:
return ToolResult(toolCallId="", toolName="editFile", success=False, error="Failed to update file data")
oldContent = ""
oldData = dbMgmt.getFileData(fileId)
if oldData:
try:
oldContent = oldData.decode("utf-8")
except UnicodeDecodeError:
oldContent = ""
editId = str(_uuid.uuid4())
return ToolResult(
toolCallId="", toolName="editFile", success=True,
data=f"File '{file.fileName}' updated ({len(contentBytes)} bytes)",
data=f"Edit proposed for '{file.fileName}'. Waiting for user review.",
sideEvents=[{
"type": "fileUpdated",
"data": {"fileId": fileId, "fileName": file.fileName, "fileSize": len(contentBytes)},
"type": "fileEditProposal",
"data": {
"id": editId,
"fileId": fileId,
"fileName": file.fileName,
"mimeType": file.mimeType,
"oldContent": oldContent,
"newContent": content,
},
}],
)
except Exception as e:
@ -1088,8 +1103,9 @@ def _registerCoreTools(registry: ToolRegistry, services):
registry.register(
"editFile", _editFile,
description=(
"Update the content of an existing text file. Only works for text-based files "
"(text/*, application/json, etc.). For binary files, create a new file instead."
"Propose an edit to an existing text file. The change is shown to the user "
"for review (accept/reject) before being applied. Only works for text-based "
"files (text/*, application/json, etc.). For binary files, create a new file instead."
),
parameters={
"type": "object",
@ -1164,15 +1180,24 @@ def _registerCoreTools(registry: ToolRegistry, services):
return ToolResult(toolCallId="", toolName="externalDownload", success=False, error="connectionId, service, and path are required")
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.connectors.connectorProviderBase import DownloadResult as _DR
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, service)
fileBytes = await adapter.download(path)
result = await adapter.download(path)
if isinstance(result, _DR):
fileBytes = result.data
fileName = result.fileName or path.split("/")[-1] or "downloaded_file"
else:
fileBytes = result
fileName = path.split("/")[-1] or "downloaded_file"
if not fileBytes:
return ToolResult(toolCallId="", toolName="externalDownload", success=False, error="Download returned empty")
fileName = path.split("/")[-1] or "downloaded_file"
chatService = services.chat
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(fileBytes, fileName)
fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
@ -1183,7 +1208,7 @@ def _registerCoreTools(registry: ToolRegistry, services):
if tempFolderId:
chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
ext = fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
hint = "Use readFile to read text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx") else "Use readFile to access the content."
hint = "Use readFile to read text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx", "eml", "msg") else "Use readFile to access the content."
return ToolResult(
toolCallId="", toolName="externalDownload", success=True,
data=f"Downloaded '{fileName}' ({len(fileBytes)} bytes) → local file id: {fid}. {hint}"
@ -1375,6 +1400,8 @@ def _registerCoreTools(registry: ToolRegistry, services):
logger.info(f"Resolved DataSource '{dsId}' ({label}): sourceType={sourceType}, service={service}, connectionId={connectionId}, path={path[:80]}")
return connectionId, service, path
_MAIL_SERVICES = {"outlook", "gmail"}
async def _browseDataSource(args: Dict[str, Any], context: Dict[str, Any]):
dsId = args.get("dataSourceId", "")
subPath = args.get("subPath", "")
@ -1403,7 +1430,10 @@ def _registerCoreTools(registry: ToolRegistry, services):
prefix = "[DIR]" if e.isFolder else "[FILE]"
sizeInfo = f" ({e.size} bytes)" if e.size else ""
lines.append(f"- {prefix} {e.name}{sizeInfo} path: {e.path}")
return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data="\n".join(lines))
result = "\n".join(lines)
if service in _MAIL_SERVICES:
result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data=result)
except Exception as e:
return ToolResult(toolCallId="", toolName="browseDataSource", success=False, error=str(e))
@ -1424,7 +1454,10 @@ def _registerCoreTools(registry: ToolRegistry, services):
if not entries:
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data="No results found.")
lines = [f"- {e.name} (path: {e.path})" for e in entries]
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data="\n".join(lines))
result = "\n".join(lines)
if service in _MAIL_SERVICES:
result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data=result)
except Exception as e:
return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error=str(e))
@ -1435,17 +1468,26 @@ def _registerCoreTools(registry: ToolRegistry, services):
if not dsId or not filePath:
return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="dataSourceId and filePath are required")
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.connectors.connectorProviderBase import DownloadResult as _DR
connectionId, service, basePath = await _resolveDataSource(dsId)
fullPath = filePath if filePath.startswith("/") else f"{basePath.rstrip('/')}/{filePath}"
from modules.connectors.connectorResolver import ConnectorResolver
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, service)
fileBytes = await adapter.download(fullPath)
result = await adapter.download(fullPath)
if isinstance(result, _DR):
fileBytes = result.data
fileName = result.fileName or fileName
else:
fileBytes = result
if not fileBytes:
return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="Download returned empty")
if not fileName or "." not in fileName:
pathSegment = fullPath.split("/")[-1] or "downloaded_file"
fileName = fileName or pathSegment
@ -1460,16 +1502,20 @@ def _registerCoreTools(registry: ToolRegistry, services):
except Exception:
pass
if "." not in fileName:
import mimetypes as _mt
guessed = _mt.guess_type(f"file.{_mt.guess_extension('application/octet-stream') or ''}")[0]
if not guessed and fileBytes[:4] == b"%PDF":
if fileBytes[:4] == b"%PDF":
fileName = f"{fileName}.pdf"
elif not guessed and fileBytes[:2] == b"PK":
elif fileBytes[:2] == b"PK":
fileName = f"{fileName}.zip"
chatService = services.chat
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(fileBytes, fileName)
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"featureInstanceId": fiId})
tempFolderId = _getOrCreateTempFolder(chatService)
if tempFolderId:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"folderId": tempFolderId})
ext = fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
hint = "Use readFile to read the text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx", "pdf") else "Use readFile to access the content."
hint = "Use readFile to read the text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx", "pdf", "eml", "msg") else "Use readFile to access the content."
return ToolResult(
toolCallId="", toolName="downloadFromDataSource", success=True,
data=f"Downloaded '{fileName}' ({len(fileBytes)} bytes) → local file id: {fileItem.id}. {hint}"
@ -1508,7 +1554,7 @@ def _registerCoreTools(registry: ToolRegistry, services):
registry.register(
"downloadFromDataSource", _downloadFromDataSource,
description="Download a file from an attached data source into local storage. Returns the local file ID which can then be read with readFile. Always provide the fileName if known from the browse results.",
description="Download a file or email message from an attached data source into local storage. Returns the local file ID which can then be read with readFile. For email sources (Outlook, Gmail), this downloads the full email content -- browse/search only return subjects. Always provide the fileName if known.",
parameters={
"type": "object",
"properties": {

View file

@ -7,7 +7,7 @@ import time
import base64
from typing import Dict, Any, List, Optional, Tuple, Callable
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument, WorkflowModeEnum
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.datamodels.datamodelAi import AiCallRequest, AiCallResponse, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.datamodels.datamodelExtraction import ContentPart, DocumentIntent
from modules.datamodels.datamodelWorkflow import AiResponse, AiResponseMetadata, DocumentData
from modules.datamodels.datamodelDocument import RenderedDocument
@ -198,6 +198,19 @@ class AiService:
finally:
self.aiObjects.billingCallback = None
async def callEmbedding(self, texts: List[str]) -> AiCallResponse:
"""Generate embeddings while respecting allowedProviders."""
await self.ensureAiObjectsInitialized()
options = AiCallOptions(operationType=OperationTypeEnum.EMBEDDING)
effectiveProviders = self._calculateEffectiveProviders()
if effectiveProviders:
options.allowedProviders = effectiveProviders
self.aiObjects.billingCallback = self._createBillingCallback()
try:
return await self.aiObjects.callEmbedding(texts, options)
finally:
self.aiObjects.billingCallback = None
# =========================================================================
# SPEECH_TEAMS: Dedicated handler for Teams Meeting AI analysis
# Bypasses standard model selection. Uses a fixed fast model.

View file

@ -42,7 +42,7 @@ def getService(currentUser: User, mandateId: str, featureInstanceId: str = None,
currentUser: Current user object
mandateId: Mandate ID for context
featureInstanceId: Optional feature instance ID
featureCode: Optional feature code (e.g., 'chatplayground', 'automation')
featureCode: Optional feature code (e.g., 'automation')
Returns:
BillingService instance

View file

@ -3,6 +3,7 @@
"""Knowledge service: 3-tier RAG with indexing, semantic search, and context building."""
import logging
import re
from typing import Any, Callable, Dict, List, Optional
from modules.datamodels.datamodelKnowledge import (
@ -14,7 +15,8 @@ from modules.shared.timeUtils import getUtcTimestamp
logger = logging.getLogger(__name__)
DEFAULT_CHUNK_SIZE = 512
CHARS_PER_TOKEN = 4
DEFAULT_CHUNK_TOKENS = 400
DEFAULT_CONTEXT_BUDGET = 8000
@ -31,14 +33,9 @@ class KnowledgeService:
# =========================================================================
async def _embed(self, texts: List[str]) -> List[List[float]]:
"""Embed texts via the AI interface's generic embedding method."""
"""Embed texts via AiService (respects allowedProviders)."""
aiService = self._getService("ai")
await aiService.ensureAiObjectsInitialized()
aiObjects = aiService.aiObjects
if aiObjects is None:
logger.warning("Embedding skipped: aiObjects not available")
return []
response = await aiObjects.callEmbedding(texts)
response = await aiService.callEmbedding(texts)
if response.errorCount > 0:
logger.error(f"Embedding failed: {response.content}")
return []
@ -115,9 +112,16 @@ class KnowledgeService:
textObjects = [o for o in contentObjects if o.get("contentType") == "text"]
if textObjects:
self._knowledgeDb.updateFileStatus(fileId, "embedding")
chunks = _chunkForEmbedding(textObjects, chunkSize=DEFAULT_CHUNK_SIZE)
chunks = _chunkForEmbedding(textObjects, maxTokens=DEFAULT_CHUNK_TOKENS)
texts = [c["data"] for c in chunks]
totalChars = sum(len(t) for t in texts)
estTokens = totalChars // CHARS_PER_TOKEN
logger.info(
f"Embedding file {fileId}: {len(textObjects)} text objects -> "
f"{len(chunks)} chunks, ~{estTokens} tokens total"
)
embeddings = await self._embed(texts) if texts else []
for i, chunk in enumerate(chunks):
@ -428,49 +432,77 @@ class KnowledgeService:
# Internal helpers
# =============================================================================
def _estimateTokens(text: str) -> int:
"""Estimate token count using character-based heuristic (1 token ~ 4 chars)."""
return max(1, len(text) // CHARS_PER_TOKEN)
def _splitSentences(text: str) -> List[str]:
"""Split text into sentences at common boundaries (.!?) followed by whitespace."""
parts = re.split(r'(?<=[.!?])\s+', text.replace("\n", " ").strip())
return [p for p in parts if p.strip()]
def _hardSplitByTokens(text: str, maxTokens: int) -> List[str]:
"""Force-split text into pieces that each fit within maxTokens.
Used as safety net when sentence splitting produces oversized segments.
Splits at word boundaries where possible.
"""
maxChars = maxTokens * CHARS_PER_TOKEN
pieces = []
while len(text) > maxChars:
splitAt = text.rfind(" ", 0, maxChars)
if splitAt <= 0:
splitAt = maxChars
pieces.append(text[:splitAt].strip())
text = text[splitAt:].strip()
if text:
pieces.append(text)
return pieces
def _chunkForEmbedding(
textObjects: List[Dict[str, Any]], chunkSize: int = 512
textObjects: List[Dict[str, Any]], maxTokens: int = DEFAULT_CHUNK_TOKENS
) -> List[Dict[str, Any]]:
"""Split text content objects into chunks suitable for embedding.
"""Split text content objects into token-aware chunks suitable for embedding.
Each chunk preserves the contextRef from its source object.
Long texts are split at sentence boundaries where possible.
Splits at sentence boundaries; applies hard-cap if a single sentence exceeds maxTokens.
"""
chunks = []
for obj in textObjects:
text = obj.get("data", "")
text = (obj.get("data", "") or "").strip()
if not text:
continue
contentObjectId = obj.get("contentObjectId", "")
contextRef = obj.get("contextRef", {})
if len(text) <= chunkSize:
chunks.append({
"data": text,
"contentObjectId": contentObjectId,
"contextRef": contextRef,
})
if _estimateTokens(text) <= maxTokens:
chunks.append({"data": text, "contentObjectId": contentObjectId, "contextRef": contextRef})
continue
# Split at sentence boundaries
sentences = text.replace("\n", " ").split(". ")
sentences = _splitSentences(text)
currentChunk = ""
for sentence in sentences:
candidate = f"{currentChunk}. {sentence}" if currentChunk else sentence
if len(candidate) > chunkSize and currentChunk:
chunks.append({
"data": currentChunk.strip(),
"contentObjectId": contentObjectId,
"contextRef": contextRef,
})
if _estimateTokens(sentence) > maxTokens:
if currentChunk.strip():
chunks.append({"data": currentChunk.strip(), "contentObjectId": contentObjectId, "contextRef": contextRef})
currentChunk = ""
for piece in _hardSplitByTokens(sentence, maxTokens):
chunks.append({"data": piece, "contentObjectId": contentObjectId, "contextRef": contextRef})
continue
candidate = f"{currentChunk} {sentence}".strip() if currentChunk else sentence
if _estimateTokens(candidate) > maxTokens:
if currentChunk.strip():
chunks.append({"data": currentChunk.strip(), "contentObjectId": contentObjectId, "contextRef": contextRef})
currentChunk = sentence
else:
currentChunk = candidate
if currentChunk.strip():
chunks.append({
"data": currentChunk.strip(),
"contentObjectId": contentObjectId,
"contextRef": contextRef,
})
chunks.append({"data": currentChunk.strip(), "contentObjectId": contentObjectId, "contextRef": contextRef})
return chunks

View file

@ -437,16 +437,6 @@ RESOURCE_OBJECTS = [
"label": {"en": "Store: Automation", "de": "Store: Automation", "fr": "Store: Automatisation"},
"meta": {"category": "store", "featureCode": "automation"}
},
{
"objectKey": "resource.store.chatplayground",
"label": {"en": "Store: Chat Playground", "de": "Store: Chat Playground", "fr": "Store: Chat Playground"},
"meta": {"category": "store", "featureCode": "chatplayground"}
},
{
"objectKey": "resource.store.codeeditor",
"label": {"en": "Store: Code Editor", "de": "Store: Code Editor", "fr": "Store: Code Editor"},
"meta": {"category": "store", "featureCode": "codeeditor"}
},
{
"objectKey": "resource.store.teamsbot",
"label": {"en": "Store: Teams Bot", "de": "Store: Teams Bot", "fr": "Store: Teams Bot"},

View file

@ -1,11 +1,7 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Workflow feature - handles workflow execution, scheduling, and chat playground operations.
Combines functionality from:
- automation: Automation workflow execution and scheduling
- chatPlayground: Chat playground workflow start/stop operations
Workflow feature - handles workflow execution, scheduling, and start/stop operations.
"""
from .mainWorkflow import chatStart, chatStop, executeAutomation, syncAutomationEvents, createAutomationEventHandler

View file

@ -1,11 +1,7 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Main workflow service - handles workflow execution, scheduling, and chat playground operations.
Combines functionality from:
- mainAutomation.py: Automation workflow execution and scheduling
- mainChatPlayground.py: Chat playground workflow start/stop operations
Main workflow service - handles workflow execution, scheduling, and start/stop operations.
"""
import logging
@ -35,11 +31,11 @@ async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode
workflowMode: Workflow mode (Dynamic, Automation, etc.)
mandateId: Mandate ID (required for billing)
featureInstanceId: Feature instance ID (required for billing)
featureCode: Feature code (e.g., 'chatplayground', 'automation')
featureCode: Feature code (e.g., 'automation')
services: Pre-built service hub from the calling feature (required). Each feature must pass its own services.
"""
if services is None:
raise ValueError("services is required: each feature must pass its own service hub (e.g. getChatplaygroundServices, getAutomationServices)")
raise ValueError("services is required: each feature must pass its own service hub (e.g. getAutomationServices)")
try:
# Store allowedProviders in services context for model selection
@ -61,7 +57,7 @@ async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode
async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None, services=None) -> ChatWorkflow:
"""Stops a running chat. Caller must pass services from the owning feature."""
if services is None:
raise ValueError("services is required: each feature must pass its own service hub (e.g. getChatplaygroundServices, getAutomationServices)")
raise ValueError("services is required: each feature must pass its own service hub (e.g. getAutomationServices)")
try:
if featureCode:
services.featureCode = featureCode

View file

@ -76,10 +76,6 @@ _MUST_STAY_ASYNC: Dict[str, Set[str]] = {
"event_stream", # await request.is_disconnected(), await asyncio.wait_for(...)
"stop_chatbot", # await event_manager.emit_event(...)
},
"modules/features/chatplayground/routeFeatureChatplayground.py": {
"start_workflow", # await chatStart(...)
"stop_workflow", # await chatStop(...)
},
"modules/features/neutralization/routeFeatureNeutralizer.py": {
"process_sharepoint_files", # await service.processSharepointFiles(...)
},
@ -105,7 +101,6 @@ _SKIP_FILES: Set[str] = {
# Helper functions that are fake-async (async def but no await inside)
# These will be converted from async def -> def
_FAKE_ASYNC_HELPERS: Dict[str, Set[str]] = {
"modules/features/chatplayground/routeFeatureChatplayground.py": {"_validateInstanceAccess"},
"modules/features/trustee/routeFeatureTrustee.py": {"_validateInstanceAccess", "_validateInstanceAdmin"},
"modules/features/realestate/routeFeatureRealEstate.py": {"_validateInstanceAccess"},
"modules/features/chatbot/routeFeatureChatbot.py": {"_validateInstanceAccess"},

View file

@ -96,7 +96,7 @@ class MethodAiOperationsTester:
import logging
logging.getLogger().setLevel(logging.DEBUG)
# Import and initialize services - use the same approach as routeChatPlayground
# Import and initialize services
import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)