automation unification implemented

This commit is contained in:
ValueOn AG 2026-04-07 00:49:08 +02:00
parent 2171460b9e
commit f65223137e
67 changed files with 5979 additions and 10985 deletions

22
app.py
View file

@ -20,10 +20,8 @@ from datetime import datetime
from modules.shared.configuration import APP_CONFIG
from modules.shared.eventManagement import eventManager
from modules.workflows.automation import subAutomationSchedule
from modules.workflows.automation2 import subAutomation2Schedule
from modules.features.automation2.emailPoller import start as startAutomation2EmailPoller
from modules.features.automation2.emailPoller import stop as stopAutomation2EmailPoller
from modules.features.graphicalEditor.emailPoller import start as startGraphicalEditorEmailPoller
from modules.features.graphicalEditor.emailPoller import stop as stopGraphicalEditorEmailPoller
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.system.registry import loadFeatureMainModules
@ -357,15 +355,14 @@ async def lifespan(app: FastAPI):
# --- Init Managers ---
import asyncio
from modules.workflows.automation2 import subAutomation2Schedule
try:
main_loop = asyncio.get_running_loop()
eventManager.set_event_loop(main_loop)
subAutomation2Schedule.set_main_loop(main_loop)
except RuntimeError:
pass
subAutomationSchedule.start(eventUser) # Automation scheduler
subAutomation2Schedule.start(eventUser) # Automation2 schedule trigger (cron)
# Automation2 email poller: started on-demand when a run pauses for email.checkEmail
subAutomation2Schedule.start(eventUser)
eventManager.start()
# Register audit log cleanup scheduler
@ -394,10 +391,9 @@ async def lifespan(app: FastAPI):
yield
# --- Stop Managers ---
stopAutomation2EmailPoller(eventUser) # Automation2 email poller (no-op if not running)
subAutomation2Schedule.stop(eventUser) # Automation2 schedule
stopGraphicalEditorEmailPoller(eventUser)
subAutomation2Schedule.stop(eventUser)
eventManager.stop()
subAutomationSchedule.stop(eventUser) # Automation scheduler
# --- Stop Feature Containers (Plug&Play) ---
try:
@ -592,12 +588,6 @@ app.include_router(adminSecurityRouter)
from modules.routes.routeSharepoint import router as sharepointRouter
app.include_router(sharepointRouter)
from modules.routes.routeAdminAutomationEvents import router as adminAutomationEventsRouter
app.include_router(adminAutomationEventsRouter)
from modules.routes.routeAdminAutomationLogs import router as adminAutomationLogsRouter
app.include_router(adminAutomationLogsRouter)
from modules.routes.routeAdminLogs import router as adminLogsRouter
app.include_router(adminLogsRouter)

View file

@ -4,7 +4,7 @@
APP_ENV_TYPE = dev
APP_ENV_LABEL = Development Instance Patrick
APP_API_URL = http://localhost:8000
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/key.txt
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/notes/key.txt
APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEeFFtRGtQeVUtcjlrU3dab1ZxUm9WSks0MlJVYUtERFlqUElHemZrOGNENk1tcmJNX3Vxc01UMDhlNU40VzZZRVBpUGNmT3podzZrOGhOeEJIUEt4eVlSWG5UYXA3d09DVXlLT21Kb1JYSUU9
APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERzZjNm56WGVBdjJTeG5Udjd6OGQwUVotYXUzQjJ1YVNyVXVBa3NZVml3ODU0MVNkZjhWWmJwNUFkc19BcHlHMTU1Q3BRcHU0cDBoZkFlR2l6UEZQU3d2U3MtMDh5UDZteGFoQ0EyMUE1ckE9

View file

@ -1,97 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Automation models: AutomationDefinition, AutomationTemplate."""
from typing import List, Dict, Any, Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
from modules.shared.attributeUtils import registerModelLabels
from modules.datamodels.datamodelUtils import TextMultilingual
import uuid
class AutomationDefinition(BaseModel):
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
mandateId: str = Field(description="Mandate ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
featureInstanceId: str = Field(description="ID of the feature instance this automation belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
label: str = Field(description="User-friendly name", json_schema_extra={"frontend_type": "text", "frontend_required": True})
schedule: str = Field(description="Cron schedule pattern", json_schema_extra={"frontend_type": "select", "frontend_required": True, "frontend_options": [
{"value": "0 */4 * * *", "label": {"en": "Every 4 hours", "fr": "Toutes les 4 heures"}},
{"value": "0 22 * * *", "label": {"en": "Daily at 22:00", "fr": "Quotidien à 22:00"}},
{"value": "0 10 * * 1", "label": {"en": "Weekly Monday 10:00", "fr": "Hebdomadaire lundi 10:00"}}
]})
template: str = Field(description="JSON template with placeholders (format: {{KEY:PLACEHOLDER_NAME}})", json_schema_extra={"frontend_type": "textarea", "frontend_required": True})
placeholders: Dict[str, str] = Field(default_factory=dict, description="Dictionary of placeholder key/value pairs (e.g., {'connectionName': 'MyConnection', 'sharepointFolderNameSource': '/folder/path', 'webResearchUrl': 'https://...', 'webResearchPrompt': '...', 'documentPrompt': '...'})", json_schema_extra={"frontend_type": "textarea"})
active: bool = Field(default=False, description="Whether automation should be launched in event handler", json_schema_extra={"frontend_type": "checkbox", "frontend_required": False})
eventId: Optional[str] = Field(None, description="Event ID from event management (None if not registered)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
status: Optional[str] = Field(None, description="Status: 'active' if event is registered, 'inactive' if not (computed, readonly)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
executionLogs: List[Dict[str, Any]] = Field(default_factory=list, description="List of execution logs, each containing timestamp, workflowId, status, and messages", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
allowedProviders: List[str] = Field(default_factory=list, description="List of allowed AICore providers (e.g., 'anthropic', 'openai'). Empty means all RBAC-permitted providers are allowed.", json_schema_extra={"frontend_type": "multiselect", "frontend_readonly": False, "frontend_required": False})
registerModelLabels(
"AutomationDefinition",
{"en": "Automation Definition", "ge": "Automatisierungs-Definition", "fr": "Définition d'automatisation"},
{
"id": {"en": "ID", "ge": "ID", "fr": "ID"},
"mandateId": {"en": "Mandate ID", "ge": "Mandanten-ID", "fr": "ID du mandat"},
"featureInstanceId": {"en": "Feature Instance ID", "ge": "Feature-Instanz-ID", "fr": "ID de l'instance de fonctionnalité"},
"label": {"en": "Label", "ge": "Bezeichnung", "fr": "Libellé"},
"schedule": {"en": "Schedule", "ge": "Zeitplan", "fr": "Planification"},
"template": {"en": "Template", "ge": "Vorlage", "fr": "Modèle"},
"placeholders": {"en": "Placeholders", "ge": "Platzhalter", "fr": "Espaces réservés"},
"active": {"en": "Active", "ge": "Aktiv", "fr": "Actif"},
"eventId": {"en": "Event ID", "ge": "Event-ID", "fr": "ID de l'événement"},
"status": {"en": "Status", "ge": "Status", "fr": "Statut"},
"executionLogs": {"en": "Execution Logs", "ge": "Ausführungsprotokolle", "fr": "Journaux d'exécution"},
"allowedProviders": {"en": "Allowed Providers", "ge": "Erlaubte Provider", "fr": "Fournisseurs autorisés"},
},
)
class AutomationTemplate(PowerOnModel):
"""Automation-Vorlage ohne scharfe Placeholder-Werte (DB-persistiert).
System-Templates (isSystem=True): Nur durch SysAdmin aenderbar. Alle User koennen lesen.
Instance-Templates (isSystem=False, featureInstanceId gesetzt): CRUD durch Instance-Admin/Editor.
"""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True}
)
label: TextMultilingual = Field(
description="Template name (multilingual)",
json_schema_extra={"frontend_type": "multilingual", "frontend_required": True}
)
overview: Optional[TextMultilingual] = Field(
None,
description="Short description (multilingual)",
json_schema_extra={"frontend_type": "multilingual", "frontend_required": False}
)
template: str = Field(
description="JSON workflow structure with {{KEY:...}} placeholders",
json_schema_extra={"frontend_type": "textarea", "frontend_required": True}
)
isSystem: bool = Field(
default=False,
description="System template (only SysAdmin can modify, all users can read)",
json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
)
featureInstanceId: Optional[str] = Field(
None,
description="Feature instance ID (null for system templates, set for instance-scoped templates)",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
registerModelLabels(
"AutomationTemplate",
{"en": "Automation Template", "ge": "Automation-Vorlage", "fr": "Modèle d'automatisation"},
{
"id": {"en": "ID", "ge": "ID", "fr": "ID"},
"label": {"en": "Label", "ge": "Bezeichnung", "fr": "Libellé"},
"overview": {"en": "Overview", "ge": "Übersicht", "fr": "Aperçu"},
"template": {"en": "Template", "ge": "Vorlage", "fr": "Modèle"},
"isSystem": {"en": "System Template", "ge": "System-Vorlage", "fr": "Modèle système"},
"featureInstanceId": {"en": "Feature Instance", "ge": "Feature-Instanz", "fr": "Instance de fonctionnalité"},
},
)

View file

@ -1,872 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Interface for Automation feature - manages AutomationDefinition and AutomationTemplate.
Uses the PostgreSQL connector for data access with user/mandate filtering.
"""
import logging
import uuid
import math
from typing import Dict, Any, List, Optional, Union
from modules.security.rbac import RbacClass
from modules.datamodels.datamodelRbac import AccessRuleContext
from modules.datamodels.datamodelUam import AccessLevel, User
from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition, AutomationTemplate
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResult
from modules.interfaces.interfaceRbac import getRecordsetWithRBAC, buildDataObjectKey
from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
def _automationDefinitionPayload(data: Dict[str, Any]) -> Dict[str, Any]:
"""Strip connector/enrichment keys; only fields defined on AutomationDefinition."""
allowed = AutomationDefinition.model_fields.keys()
return {k: v for k, v in (data or {}).items() if k in allowed}
# Singleton factory for Automation instances
_automationInterfaces = {}
class AutomationObjects:
"""
Interface for Automation database operations.
Manages AutomationDefinition and AutomationTemplate with RBAC support.
"""
def __init__(self, currentUser: User, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None):
self.currentUser = currentUser
self.mandateId = mandateId
self.featureInstanceId = featureInstanceId
self.userId = currentUser.id if currentUser else None
# Initialize database with proper configuration
self._initializeDatabase()
# Initialize RBAC - AccessRules are in poweron_app, not poweron_automation!
from modules.security.rootAccess import getRootDbAppConnector
dbApp = getRootDbAppConnector()
self.rbac = RbacClass(self.db, dbApp=dbApp)
# Update database context
self.db.updateContext(self.userId)
def _initializeDatabase(self):
"""Initializes the database connection with proper configuration."""
# Get configuration values
dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data")
dbDatabase = "poweron_automation"
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
# Create database connector with full configuration
self.db = DatabaseConnector(
dbHost=dbHost,
dbDatabase=dbDatabase,
dbUser=dbUser,
dbPassword=dbPassword,
dbPort=dbPort,
userId=self.userId,
)
logger.debug(f"Automation database initialized for user {self.userId}")
def setUserContext(self, currentUser: User, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None):
"""Update user context for the interface."""
self.currentUser = currentUser
self.mandateId = mandateId
self.featureInstanceId = featureInstanceId
self.userId = currentUser.id if currentUser else None
if hasattr(self.db, 'updateContext'):
self.db.updateContext(self.userId)
def checkRbacPermission(self, model, action: str, recordId: str = None) -> bool:
"""Check RBAC permission for a specific action on a model."""
objectKey = buildDataObjectKey(model.__name__)
permissions = self.rbac.getUserPermissions(
user=self.currentUser,
context=AccessRuleContext.DATA,
item=objectKey,
mandateId=self.mandateId,
featureInstanceId=self.featureInstanceId
)
accessLevel = getattr(permissions, action, AccessLevel.NONE)
if accessLevel == AccessLevel.ALL:
return True
elif accessLevel == AccessLevel.GROUP:
return True
elif accessLevel == AccessLevel.MY:
if recordId:
record = self.db.getRecordset(model, recordFilter={"id": recordId})
if record:
return record[0].get("sysCreatedBy") == self.userId
else:
return False # Record not found = no access
return True # No recordId needed (e.g., for CREATE)
return False
# =========================================================================
# AutomationDefinition CRUD methods
# =========================================================================
def _computeAutomationStatus(self, automation: Dict[str, Any]) -> str:
"""Compute status field based on eventId presence"""
eventId = automation.get("eventId")
return "Running" if eventId else "Idle"
def _enrichAutomationsWithUserAndMandate(self, automations: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Batch enrich automations with user names, mandate names and feature instance labels.
Uses direct DB lookup (no RBAC) because this is purely cosmetic enrichment
the user already has RBAC-verified access to the automations themselves.
"""
if not automations:
return automations
# Collect all unique IDs
userIds = set()
mandateIds = set()
featureInstanceIds = set()
for automation in automations:
createdBy = automation.get("sysCreatedBy")
if createdBy:
userIds.add(createdBy)
mandateId = automation.get("mandateId")
if mandateId:
mandateIds.add(mandateId)
featureInstanceId = automation.get("featureInstanceId")
if featureInstanceId:
featureInstanceIds.add(featureInstanceId)
# Use root DB connector for display-only lookups (no RBAC needed)
usersMap = {}
mandatesMap = {}
featureInstancesMap = {}
try:
from modules.datamodels.datamodelUam import UserInDB, Mandate
from modules.datamodels.datamodelFeatures import FeatureInstance
from modules.security.rootAccess import getRootDbAppConnector
dbAppConn = getRootDbAppConnector()
# Batch fetch user display names
if userIds:
for userId in userIds:
users = dbAppConn.getRecordset(UserInDB, recordFilter={"id": userId})
if users:
user = users[0]
displayName = user.get("fullName") or user.get("username") or user.get("email") or None
if displayName:
usersMap[userId] = displayName
# Batch fetch mandate display names
if mandateIds:
for mandateId in mandateIds:
mandates = dbAppConn.getRecordset(Mandate, recordFilter={"id": mandateId})
if mandates:
label = mandates[0].get("label") or mandates[0].get("name") or None
if label:
mandatesMap[mandateId] = label
# Batch fetch feature instance labels
if featureInstanceIds:
for fiId in featureInstanceIds:
instances = dbAppConn.getRecordset(FeatureInstance, recordFilter={"id": fiId})
if instances:
fi = instances[0]
label = fi.get("label") or fi.get("featureCode") or None
if label:
featureInstancesMap[fiId] = label
except Exception as e:
logger.warning(f"Could not enrich automations with display names: {e}")
# Enrich each automation with the fetched data
# SECURITY: Never show a fallback name — if lookup fails, show empty string
for automation in automations:
createdBy = automation.get("sysCreatedBy")
automation["sysCreatedByUserName"] = usersMap.get(createdBy, "") if createdBy else ""
mandateId = automation.get("mandateId")
automation["mandateName"] = mandatesMap.get(mandateId, "") if mandateId else ""
featureInstanceId = automation.get("featureInstanceId")
automation["featureInstanceName"] = featureInstancesMap.get(featureInstanceId, "") if featureInstanceId else ""
return automations
def _enrichAutomationWithUserAndMandate(self, automation: Dict[str, Any]) -> Dict[str, Any]:
"""
Enrich a single automation with user name and mandate name for display.
For multiple automations, use _enrichAutomationsWithUserAndMandate for better performance.
"""
return self._enrichAutomationsWithUserAndMandate([automation])[0]
def getAllAutomationDefinitions(self, pagination: Optional[PaginationParams] = None) -> Union[List[Dict[str, Any]], PaginatedResult]:
"""
Returns automation definitions based on user access level.
Supports optional pagination, sorting, and filtering.
Computes status field for each automation.
"""
# AutomationDefinitions can belong to any feature instance within a mandate.
# Filter by mandateId only — not by featureInstanceId — to show all definitions across features.
filteredAutomations = getRecordsetWithRBAC(
self.db,
AutomationDefinition,
self.currentUser,
mandateId=self.mandateId
)
# Compute status for each automation and normalize executionLogs
for automation in filteredAutomations:
automation["status"] = self._computeAutomationStatus(automation)
# Ensure executionLogs is always a list, not None
if automation.get("executionLogs") is None:
automation["executionLogs"] = []
# Batch enrich with user and mandate names
self._enrichAutomationsWithUserAndMandate(filteredAutomations)
# If no pagination requested, return all items
if pagination is None:
return filteredAutomations
# Apply filtering (if filters provided)
if pagination.filters:
filteredAutomations = self._applyFilters(filteredAutomations, pagination.filters)
# Apply sorting (in order of sortFields)
if pagination.sort:
filteredAutomations = self._applySorting(filteredAutomations, pagination.sort)
# Count total items after filters
totalItems = len(filteredAutomations)
totalPages = math.ceil(totalItems / pagination.pageSize) if totalItems > 0 else 0
# Apply pagination (skip/limit)
startIdx = (pagination.page - 1) * pagination.pageSize
endIdx = startIdx + pagination.pageSize
pagedAutomations = filteredAutomations[startIdx:endIdx]
return PaginatedResult(
items=pagedAutomations,
totalItems=totalItems,
totalPages=totalPages
)
def _applyFilters(self, items: List[Dict], filters: Dict[str, Any]) -> List[Dict]:
"""Apply filters to a list of items."""
if not filters:
return items
filtered = []
for item in items:
match = True
for key, value in filters.items():
itemValue = item.get(key)
if isinstance(value, str) and isinstance(itemValue, str):
if value.lower() not in itemValue.lower():
match = False
break
elif str(itemValue).lower() != str(value).lower():
match = False
break
if match:
filtered.append(item)
return filtered
def _applySorting(self, items: List[Dict], sortFields: List[Dict]) -> List[Dict]:
"""Apply sorting to a list of items."""
if not sortFields:
return items
for sortField in reversed(sortFields):
field = sortField.get("field", "")
direction = sortField.get("direction", "asc")
reverse = direction.lower() == "desc"
items = sorted(items, key=lambda x: x.get(field, ""), reverse=reverse)
return items
def getAutomationDefinition(self, automationId: str, includeSystemFields: bool = False) -> Optional[AutomationDefinition]:
"""Returns an automation definition by ID if user has access, with computed status.
Args:
automationId: ID of the automation to get
includeSystemFields: If True, returns raw dict with system fields (sysCreatedBy, etc).
If False (default), returns Pydantic model without system fields.
"""
try:
# AutomationDefinitions can belong to any feature instance within a mandate.
# Filter by mandateId only — not by featureInstanceId.
filtered = getRecordsetWithRBAC(
self.db,
AutomationDefinition,
self.currentUser,
recordFilter={"id": automationId},
mandateId=self.mandateId
)
if not filtered:
return None
automation = filtered[0]
automation["status"] = self._computeAutomationStatus(automation)
# Ensure executionLogs is always a list, not None
if automation.get("executionLogs") is None:
automation["executionLogs"] = []
# Enrich with user and mandate names
self._enrichAutomationWithUserAndMandate(automation)
# For internal use (execution), return raw dict with system fields
if includeSystemFields:
# Return as simple namespace object so getattr works
class AutomationWithSystemFields:
def __init__(self, data):
for key, value in data.items():
setattr(self, key, value)
return AutomationWithSystemFields(automation)
# Clean metadata fields and return Pydantic model
cleanedRecord = _automationDefinitionPayload(automation)
return AutomationDefinition(**cleanedRecord)
except Exception as e:
logger.error(f"Error getting automation definition: {str(e)}")
return None
def createAutomationDefinition(self, automationData: Dict[str, Any]) -> AutomationDefinition:
"""Creates a new automation definition, then triggers sync."""
try:
# Ensure ID is present
if "id" not in automationData or not automationData["id"]:
automationData["id"] = str(uuid.uuid4())
# Ensure mandateId and featureInstanceId are set for proper data isolation
if "mandateId" not in automationData or not automationData.get("mandateId"):
# Use request context mandateId, or fall back to Root mandate
effectiveMandateId = self.mandateId
if not effectiveMandateId:
# Fall back to Root mandate (first mandate in system)
try:
from modules.datamodels.datamodelUam import Mandate
from modules.security.rootAccess import getRootDbAppConnector
dbAppConn = getRootDbAppConnector()
allMandates = dbAppConn.getRecordset(Mandate)
if allMandates:
effectiveMandateId = allMandates[0].get("id")
logger.debug(f"createAutomationDefinition: Using Root mandate {effectiveMandateId}")
except Exception as e:
logger.warning(f"Could not get Root mandate: {e}")
automationData["mandateId"] = effectiveMandateId
if "featureInstanceId" not in automationData:
automationData["featureInstanceId"] = self.featureInstanceId
# Ensure database connector has correct userId context
if not self.userId:
logger.error(f"createAutomationDefinition: userId is not set! Cannot set sysCreatedBy. currentUser={self.currentUser}")
elif hasattr(self.db, 'updateContext'):
try:
self.db.updateContext(self.userId)
logger.debug(f"createAutomationDefinition: Updated database context with userId={self.userId}")
except Exception as e:
logger.warning(f"Could not update database context: {e}")
# Create automation in database
createdAutomation = self.db.recordCreate(AutomationDefinition, automationData)
# Compute status
createdAutomation["status"] = self._computeAutomationStatus(createdAutomation)
# Ensure executionLogs is always a list, not None
if createdAutomation.get("executionLogs") is None:
createdAutomation["executionLogs"] = []
# Trigger automation change callback
self._notifyAutomationChanged()
# Clean metadata fields and return Pydantic model
cleanedRecord = _automationDefinitionPayload(createdAutomation)
return AutomationDefinition(**cleanedRecord)
except Exception as e:
logger.error(f"Error creating automation definition: {str(e)}")
raise
def _saveExecutionLog(self, automationId: str, executionLogs: List[Dict[str, Any]]) -> None:
"""
Save execution logs to an automation definition WITHOUT RBAC check.
This is a system-level operation: when a user executes an automation,
the execution log must be saved regardless of whether the user has
'update' permission on the AutomationDefinition. The user already
proved they have execute/read access by loading the automation.
"""
try:
self.db.recordModify(AutomationDefinition, automationId, {"executionLogs": executionLogs})
logger.debug(f"Saved execution log for automation {automationId}")
except Exception as e:
logger.warning(f"Could not save execution log for automation {automationId}: {e}")
def updateAutomationDefinition(self, automationId: str, automationData: Dict[str, Any]) -> AutomationDefinition:
"""Updates an automation definition, then triggers sync."""
try:
# Check access
existing = self.getAutomationDefinition(automationId)
if not existing:
raise PermissionError(f"No access to automation {automationId}")
if not self.checkRbacPermission(AutomationDefinition, "update", automationId):
raise PermissionError(f"No permission to modify automation {automationId}")
automationData.pop("executionLogs", None)
# If deactivating: immediately remove scheduler job (don't rely on async callback)
isBeingDeactivated = "active" in automationData and not automationData["active"]
if isBeingDeactivated:
existingEventId = getattr(existing, "eventId", None) if not isinstance(existing, dict) else existing.get("eventId")
if existingEventId:
try:
from modules.shared.eventManagement import eventManager
eventManager.remove(existingEventId)
logger.info(f"Removed scheduler job {existingEventId} (automation deactivated)")
except Exception as e:
logger.warning(f"Could not remove scheduler job {existingEventId}: {e}")
automationData["eventId"] = None
# Update automation in database
updatedAutomation = self.db.recordModify(AutomationDefinition, automationId, automationData)
# Compute status
updatedAutomation["status"] = self._computeAutomationStatus(updatedAutomation)
# Ensure executionLogs is always a list, not None
if updatedAutomation.get("executionLogs") is None:
updatedAutomation["executionLogs"] = []
# Trigger automation change callback
self._notifyAutomationChanged()
# Clean metadata fields and return Pydantic model
cleanedRecord = _automationDefinitionPayload(updatedAutomation)
return AutomationDefinition(**cleanedRecord)
except Exception as e:
logger.error(f"Error updating automation definition: {str(e)}")
raise
def deleteAutomationDefinition(self, automationId: str) -> bool:
"""Deletes an automation definition, then triggers sync."""
try:
# Check access
existing = self.getAutomationDefinition(automationId)
if not existing:
raise PermissionError(f"No access to automation {automationId}")
if not self.checkRbacPermission(AutomationDefinition, "delete", automationId):
raise PermissionError(f"No permission to delete automation {automationId}")
# Delete automation from database
self.db.recordDelete(AutomationDefinition, automationId)
# Trigger automation change callback
self._notifyAutomationChanged()
return True
except Exception as e:
logger.error(f"Error deleting automation definition: {str(e)}")
raise
def getAllAutomationDefinitionsWithRBAC(self, user: User) -> List[Dict[str, Any]]:
"""
Get all automation definitions filtered by RBAC for a specific user.
This method encapsulates getRecordsetWithRBAC() to avoid exposing the connector.
Args:
user: User object for RBAC filtering
Returns:
List of automation definition dictionaries filtered by RBAC
"""
return getRecordsetWithRBAC(
self.db,
AutomationDefinition,
user,
mandateId=self.mandateId,
featureInstanceId=self.featureInstanceId
)
# =========================================================================
# AutomationTemplate CRUD methods
# =========================================================================
def getAllAutomationTemplates(self, pagination: Optional[PaginationParams] = None) -> Union[List[Dict[str, Any]], PaginatedResult]:
"""
Returns automation templates: system templates + instance templates for current instance.
System templates (isSystem=True) are always included (read-only for non-SysAdmin).
Instance templates (featureInstanceId matches) are included with RBAC filtering.
"""
# Load ALL templates and filter in Python.
# Reason: seeded/legacy templates may have isSystem=NULL (not False/True),
# which breaks SQL equality filters (NULL != True AND NULL != False).
allTemplates = self.db.getRecordset(AutomationTemplate)
filteredTemplates = []
for t in allTemplates:
isSystem = t.get("isSystem")
fid = t.get("featureInstanceId")
if isSystem is True:
# System templates — always visible to all users
filteredTemplates.append(t)
elif fid and fid == self.featureInstanceId:
# Instance templates — scoped to current feature instance
filteredTemplates.append(t)
elif not fid:
# Global/legacy templates (no featureInstanceId) — visible to all users
filteredTemplates.append(t)
# Enrich with user names
self._enrichTemplatesWithUserName(filteredTemplates)
# If no pagination requested, return all items
if pagination is None:
return filteredTemplates
# Apply filtering (if filters provided)
if pagination.filters:
filteredTemplates = self._applyFilters(filteredTemplates, pagination.filters)
# Apply sorting (in order of sortFields)
if pagination.sort:
filteredTemplates = self._applySorting(filteredTemplates, pagination.sort)
# Count total items after filters
totalItems = len(filteredTemplates)
totalPages = math.ceil(totalItems / pagination.pageSize) if totalItems > 0 else 0
# Apply pagination (skip/limit)
startIdx = (pagination.page - 1) * pagination.pageSize
endIdx = startIdx + pagination.pageSize
pagedTemplates = filteredTemplates[startIdx:endIdx]
return PaginatedResult(
items=pagedTemplates,
totalItems=totalItems,
totalPages=totalPages
)
def _enrichTemplatesWithUserName(self, templates: List[Dict[str, Any]]) -> None:
"""Batch enrich templates with creator user names."""
if not templates:
return
# Collect unique user IDs
userIds = set()
for template in templates:
createdBy = template.get("sysCreatedBy")
if createdBy:
userIds.add(createdBy)
if not userIds:
return
# Batch fetch users
try:
from modules.datamodels.datamodelUam import UserInDB
from modules.security.rootAccess import getRootDbAppConnector
dbAppConn = getRootDbAppConnector()
userNameMap = {}
for userId in userIds:
users = dbAppConn.getRecordset(UserInDB, recordFilter={"id": userId})
if users:
user = users[0]
displayName = user.get("fullName") or user.get("username") or user.get("email") or None
if displayName:
userNameMap[userId] = displayName
# Apply to templates — SECURITY: no fallback, empty if not found
for template in templates:
createdBy = template.get("sysCreatedBy")
template["sysCreatedByUserName"] = userNameMap.get(createdBy, "") if createdBy else ""
except Exception as e:
logger.warning(f"Could not enrich templates with user names: {e}")
def getAutomationTemplate(self, templateId: str) -> Optional[Dict[str, Any]]:
"""Returns an automation template by ID (system templates always accessible, instance templates scoped)."""
try:
records = self.db.getRecordset(
AutomationTemplate,
recordFilter={"id": templateId}
)
if not records:
return None
template = records[0]
# System templates are readable by everyone
if template.get("isSystem"):
self._enrichTemplatesWithUserName([template])
return template
# Instance templates: must belong to current feature instance
templateInstanceId = template.get("featureInstanceId")
if templateInstanceId and self.featureInstanceId and str(templateInstanceId) != str(self.featureInstanceId):
return None # Not in this instance
self._enrichTemplatesWithUserName([template])
return template
except Exception as e:
logger.error(f"Error getting automation template: {str(e)}")
return None
def createAutomationTemplate(self, templateData: Dict[str, Any], isSysAdmin: bool = False) -> Dict[str, Any]:
"""Creates a new automation template.
System templates (isSystem=True) can only be created by SysAdmin.
Instance templates get featureInstanceId from context.
"""
try:
# Ensure ID is present
if "id" not in templateData or not templateData["id"]:
templateData["id"] = str(uuid.uuid4())
# System template protection
if templateData.get("isSystem") and not isSysAdmin:
raise PermissionError("Only SysAdmin can create system templates")
# Set featureInstanceId for non-system templates
if not templateData.get("isSystem"):
templateData["featureInstanceId"] = self.featureInstanceId
templateData["isSystem"] = False
# RBAC check (for non-system templates)
if not isSysAdmin and not self.checkRbacPermission(AutomationTemplate, "create"):
raise PermissionError("No permission to create template")
# Ensure database connector has correct userId context
if self.userId and hasattr(self.db, 'updateContext'):
try:
self.db.updateContext(self.userId)
except Exception as e:
logger.warning(f"Could not update database context: {e}")
# Convert template field to string if it's a dict (frontend may send parsed JSON)
if "template" in templateData and isinstance(templateData["template"], dict):
import json
templateData["template"] = json.dumps(templateData["template"])
# Validate through Pydantic model to ensure proper type conversion
validatedTemplate = AutomationTemplate(**templateData)
# Create template in database using model_dump for proper serialization
createdTemplate = self.db.recordCreate(AutomationTemplate, validatedTemplate.model_dump())
return createdTemplate
except Exception as e:
logger.error(f"Error creating automation template: {str(e)}")
raise
def updateAutomationTemplate(self, templateId: str, templateData: Dict[str, Any], isSysAdmin: bool = False) -> Dict[str, Any]:
"""Updates an automation template.
System templates can only be updated by SysAdmin.
"""
try:
# Check access
existing = self.getAutomationTemplate(templateId)
if not existing:
raise PermissionError(f"No access to template {templateId}")
# System template protection
if existing.get("isSystem") and not isSysAdmin:
raise PermissionError("Only SysAdmin can modify system templates")
if not isSysAdmin and not self.checkRbacPermission(AutomationTemplate, "update", templateId):
raise PermissionError(f"No permission to modify template {templateId}")
# Prevent changing isSystem/featureInstanceId
templateData.pop("isSystem", None)
templateData.pop("featureInstanceId", None)
# Convert template field to string if it's a dict (frontend may send parsed JSON)
if "template" in templateData and isinstance(templateData["template"], dict):
import json
templateData["template"] = json.dumps(templateData["template"])
# Merge existing data with update data for partial updates
mergedData = {**existing, **templateData}
mergedData["id"] = templateId # Ensure ID is preserved
# Validate through Pydantic model to ensure proper type conversion
validatedTemplate = AutomationTemplate(**mergedData)
# Update template in database using model_dump for proper serialization
updatedTemplate = self.db.recordModify(AutomationTemplate, templateId, validatedTemplate.model_dump())
return updatedTemplate
except Exception as e:
logger.error(f"Error updating automation template: {str(e)}")
raise
def deleteAutomationTemplate(self, templateId: str, isSysAdmin: bool = False) -> bool:
"""Deletes an automation template.
System templates can only be deleted by SysAdmin.
"""
try:
# Check access
existing = self.getAutomationTemplate(templateId)
if not existing:
return False
# System template protection
if existing.get("isSystem") and not isSysAdmin:
raise PermissionError("Only SysAdmin can delete system templates")
if not isSysAdmin and not self.checkRbacPermission(AutomationTemplate, "delete", templateId):
raise PermissionError(f"No permission to delete template {templateId}")
# Delete template from database
self.db.recordDelete(AutomationTemplate, templateId)
return True
except Exception as e:
logger.error(f"Error deleting automation template: {str(e)}")
raise
def duplicateAutomationTemplate(self, templateId: str) -> Dict[str, Any]:
"""Duplicates a template into the current feature instance.
Creates a copy with new ID, isSystem=False, featureInstanceId from context.
Works for both system and instance templates.
"""
try:
existing = self.getAutomationTemplate(templateId)
if not existing:
raise PermissionError(f"Template {templateId} not found")
# RBAC check for creating templates
if not self.checkRbacPermission(AutomationTemplate, "create"):
raise PermissionError("No permission to create templates")
# Build duplicate data
duplicateData = {
"id": str(uuid.uuid4()),
"label": existing.get("label", {}),
"overview": existing.get("overview"),
"template": existing.get("template", ""),
"isSystem": False,
"featureInstanceId": self.featureInstanceId,
}
# Append "(Kopie)" to label
label = duplicateData["label"]
if isinstance(label, dict):
for lang in label:
if label[lang]:
label[lang] = f"{label[lang]} (Kopie)"
# Ensure database connector has correct userId context
if self.userId and hasattr(self.db, 'updateContext'):
self.db.updateContext(self.userId)
validatedTemplate = AutomationTemplate(**duplicateData)
createdTemplate = self.db.recordCreate(AutomationTemplate, validatedTemplate.model_dump())
logger.info(f"Duplicated template {templateId} -> {duplicateData['id']}")
return createdTemplate
except Exception as e:
logger.error(f"Error duplicating template: {str(e)}")
raise
def duplicateAutomationDefinition(self, definitionId: str) -> Dict[str, Any]:
"""Duplicates an automation definition within the same feature instance.
Creates a copy with new ID, active=False, no eventId.
"""
try:
existing = self.getAutomationDefinition(definitionId)
if not existing:
raise PermissionError(f"Definition {definitionId} not found")
# RBAC check for creating definitions
if not self.checkRbacPermission(AutomationDefinition, "create"):
raise PermissionError("No permission to create definitions")
# getAutomationDefinition returns Pydantic model; convert to dict for .get() access
existing_data = existing.model_dump() if hasattr(existing, "model_dump") else existing
# Build duplicate data
duplicateData = {
"id": str(uuid.uuid4()),
"mandateId": existing_data.get("mandateId"),
"featureInstanceId": existing_data.get("featureInstanceId"),
"label": f"{existing_data.get('label', '')} (Kopie)",
"schedule": existing_data.get("schedule", ""),
"template": existing_data.get("template", ""),
"placeholders": existing_data.get("placeholders", {}),
"active": False,
"eventId": None,
"status": None,
"executionLogs": [],
"allowedProviders": existing_data.get("allowedProviders", []),
}
# Ensure database connector has correct userId context
if self.userId and hasattr(self.db, 'updateContext'):
self.db.updateContext(self.userId)
validatedDefinition = AutomationDefinition(**duplicateData)
createdDefinition = self.db.recordCreate(AutomationDefinition, validatedDefinition.model_dump())
logger.info(f"Duplicated definition {definitionId} -> {duplicateData['id']}")
return createdDefinition
except Exception as e:
logger.error(f"Error duplicating definition: {str(e)}")
raise
def _notifyAutomationChanged(self):
"""Notify registered callbacks about automation changes (decoupled from features).
Sync-safe: works from both sync and async contexts."""
try:
from modules.shared.callbackRegistry import callbackRegistry
# Trigger callbacks without knowing which features are listening
callbackRegistry.trigger('automation.changed', self)
except Exception as e:
logger.error(f"Error notifying automation change: {str(e)}")
def getInterface(currentUser: Optional[User] = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None) -> 'AutomationObjects':
"""
Returns an AutomationObjects instance for the current user.
Handles initialization of database and records.
Args:
currentUser: The authenticated user
mandateId: The mandate ID from RequestContext (X-Mandate-Id header).
featureInstanceId: The feature instance ID from RequestContext (X-Feature-Instance-Id header).
"""
if not currentUser:
raise ValueError("Invalid user context: user is required")
effectiveMandateId = str(mandateId) if mandateId else None
effectiveFeatureInstanceId = str(featureInstanceId) if featureInstanceId else None
# Create context key including featureInstanceId for proper isolation
contextKey = f"automation_{effectiveMandateId}_{effectiveFeatureInstanceId}_{currentUser.id}"
# Create new instance if not exists
if contextKey not in _automationInterfaces:
_automationInterfaces[contextKey] = AutomationObjects(currentUser, mandateId=effectiveMandateId, featureInstanceId=effectiveFeatureInstanceId)
else:
# Update user context if needed
_automationInterfaces[contextKey].setUserContext(currentUser, mandateId=effectiveMandateId, featureInstanceId=effectiveFeatureInstanceId)
return _automationInterfaces[contextKey]

View file

@ -1,446 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Automation Feature Container - Main Module.
Handles feature initialization and RBAC catalog registration.
"""
import logging
from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
# Feature metadata
FEATURE_CODE = "automation"
FEATURE_LABEL = {"en": "Automation", "de": "Automatisierung", "fr": "Automatisation"}
FEATURE_ICON = "mdi-cog-clockwise"
# UI Objects for RBAC catalog
UI_OBJECTS = [
{
"objectKey": "ui.feature.automation.definitions",
"label": {"en": "Automation Definitions", "de": "Automatisierungs-Definitionen", "fr": "Définitions d'automatisation"},
"meta": {"area": "definitions"}
},
{
"objectKey": "ui.feature.automation.templates",
"label": {"en": "Templates", "de": "Vorlagen", "fr": "Modèles"},
"meta": {"area": "templates"}
},
]
# Resource Objects for RBAC catalog
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.automation.create",
"label": {"en": "Create Automation", "de": "Automatisierung erstellen", "fr": "Créer automatisation"},
"meta": {"endpoint": "/api/automations", "method": "POST"}
},
{
"objectKey": "resource.feature.automation.update",
"label": {"en": "Update Automation", "de": "Automatisierung aktualisieren", "fr": "Modifier automatisation"},
"meta": {"endpoint": "/api/automations/{automationId}", "method": "PUT"}
},
{
"objectKey": "resource.feature.automation.delete",
"label": {"en": "Delete Automation", "de": "Automatisierung löschen", "fr": "Supprimer automatisation"},
"meta": {"endpoint": "/api/automations/{automationId}", "method": "DELETE"}
},
{
"objectKey": "resource.feature.automation.execute",
"label": {"en": "Execute Automation", "de": "Automatisierung ausführen", "fr": "Exécuter automatisation"},
"meta": {"endpoint": "/api/automations/{automationId}/execute", "method": "POST"}
},
]
# Template roles for this feature
TEMPLATE_ROLES = [
{
"roleLabel": "automation-admin",
"description": {
"en": "Automation Administrator - Full access to automation configuration and execution",
"de": "Automatisierungs-Administrator - Vollzugriff auf Automatisierungs-Konfiguration und Ausführung",
"fr": "Administrateur automatisation - Accès complet à la configuration et exécution"
},
"accessRules": [
# Full UI access
{"context": "UI", "item": None, "view": True},
# Full DATA access
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
]
},
{
"roleLabel": "automation-editor",
"description": {
"en": "Automation Editor - Create and modify automations",
"de": "Automatisierungs-Editor - Automatisierungen erstellen und bearbeiten",
"fr": "Éditeur automatisation - Créer et modifier les automatisations"
},
"accessRules": [
# UI access to definitions and templates - vollqualifizierte ObjectKeys
{"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
{"context": "UI", "item": "ui.feature.automation.templates", "view": True},
{"context": "UI", "item": "ui.feature.automation.logs", "view": True},
# Group-level DATA access
{"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "n"},
]
},
{
"roleLabel": "automation-user",
"description": {
"en": "Automation User - Create and manage own automations",
"de": "Automatisierungs-Benutzer - Eigene Automatisierungen erstellen und verwalten",
"fr": "Utilisateur automatisation - Créer et gérer ses propres automatisations"
},
"accessRules": [
{"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
{"context": "UI", "item": "ui.feature.automation.templates", "view": True},
{"context": "UI", "item": "ui.feature.automation.logs", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
]
},
{
"roleLabel": "automation-viewer",
"description": {
"en": "Automation Viewer - View automations and execution results",
"de": "Automatisierungs-Betrachter - Automatisierungen und Ausführungsergebnisse einsehen",
"fr": "Visualiseur automatisation - Consulter les automatisations et résultats"
},
"accessRules": [
# UI access to view only
{"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
{"context": "UI", "item": "ui.feature.automation.logs", "view": True},
# Read-only DATA access (my level)
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
]
},
]
# Service requirements - services this feature needs from the service center
REQUIRED_SERVICES = [
{"serviceKey": "chat", "meta": {"usage": "Workflow CRUD, messages, logs"}},
{"serviceKey": "ai", "meta": {"usage": "AI planning for workflow execution"}},
{"serviceKey": "utils", "meta": {"usage": "Timestamps, utilities"}},
{"serviceKey": "billing", "meta": {"usage": "AI call billing"}},
{"serviceKey": "extraction", "meta": {"usage": "Workflow method actions"}},
{"serviceKey": "sharepoint", "meta": {"usage": "SharePoint actions (listDocuments, uploadDocument, etc.)"}},
{"serviceKey": "generation", "meta": {"usage": "Action completion messages, document creation from results"}},
]
def getRequiredServiceKeys() -> List[str]:
"""Return list of service keys this feature requires."""
return [s["serviceKey"] for s in REQUIRED_SERVICES]
def getAutomationServices(
user,
mandateId: Optional[str] = None,
featureInstanceId: Optional[str] = None,
workflow=None,
) -> "_AutomationServiceHub":
"""
Get a service hub for the automation feature using the service center.
Resolves only the services declared in REQUIRED_SERVICES.
No legacy fallback - service center only.
Returns a hub-like object with: chat, ai, utils, billing, extraction,
sharepoint, rbac, interfaceDbApp, interfaceDbComponent, interfaceDbChat,
interfaceDbAutomation.
"""
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
from modules.features.automation.interfaceFeatureAutomation import getInterface as getAutomationInterface
_workflow = workflow
if _workflow is None:
# Placeholder must have 'id' and 'workflowMode' to avoid AttributeError when services use context.workflow
_workflow = type("_Placeholder", (), {"featureCode": FEATURE_CODE, "id": None, "workflowMode": None})()
ctx = ServiceCenterContext(
user=user,
mandate_id=mandateId,
feature_instance_id=featureInstanceId,
workflow=_workflow,
)
hub = _AutomationServiceHub()
hub.user = user
hub.mandateId = mandateId
hub.featureInstanceId = featureInstanceId
hub._service_context = ctx # Store context so workflow updates propagate to services
hub.workflow = workflow
hub.featureCode = FEATURE_CODE
hub.allowedProviders = None
for spec in REQUIRED_SERVICES:
key = spec["serviceKey"]
try:
svc = getService(key, ctx)
setattr(hub, key, svc)
except Exception as e:
logger.warning(f"Could not resolve service '{key}' for automation: {e}")
setattr(hub, key, None)
# Copy interfaces from chat service for WorkflowManager compatibility
if hub.chat:
hub.interfaceDbApp = getattr(hub.chat, "interfaceDbApp", None)
hub.interfaceDbComponent = getattr(hub.chat, "interfaceDbComponent", None)
hub.interfaceDbChat = getattr(hub.chat, "interfaceDbChat", None)
# RBAC for MethodBase action permission checks (workflow methods)
hub.rbac = getattr(hub.interfaceDbApp, "rbac", None) if hub.interfaceDbApp else None
# Set interfaceDbAutomation from feature interface
hub.interfaceDbAutomation = getAutomationInterface(
user, mandateId=mandateId, featureInstanceId=featureInstanceId
)
return hub
class _AutomationServiceHub:
"""Lightweight hub exposing only services required by the automation feature."""
user = None
mandateId = None
featureInstanceId = None
_service_context = None # ServiceCenterContext; when workflow is set, context.workflow is updated
workflow = None
featureCode = "automation"
allowedProviders = None
interfaceDbApp = None
interfaceDbComponent = None
interfaceDbChat = None
interfaceDbAutomation = None
rbac = None
chat = None
ai = None
utils = None
billing = None
extraction = None
sharepoint = None
def getFeatureDefinition() -> Dict[str, Any]:
"""Return the feature definition for registration."""
return {
"code": FEATURE_CODE,
"label": FEATURE_LABEL,
"icon": FEATURE_ICON,
"autoCreateInstance": False,
}
def getUiObjects() -> List[Dict[str, Any]]:
"""Return UI objects for RBAC catalog registration."""
return UI_OBJECTS
def getResourceObjects() -> List[Dict[str, Any]]:
"""Return resource objects for RBAC catalog registration."""
return RESOURCE_OBJECTS
def getTemplateRoles() -> List[Dict[str, Any]]:
"""Return template roles for this feature."""
return TEMPLATE_ROLES
def registerFeature(catalogService) -> bool:
"""
Register this feature's RBAC objects in the catalog.
Args:
catalogService: The RBAC catalog service instance
Returns:
True if registration was successful
"""
try:
# Register UI objects
for uiObj in UI_OBJECTS:
catalogService.registerUiObject(
featureCode=FEATURE_CODE,
objectKey=uiObj["objectKey"],
label=uiObj["label"],
meta=uiObj.get("meta")
)
# Register Resource objects
for resObj in RESOURCE_OBJECTS:
catalogService.registerResourceObject(
featureCode=FEATURE_CODE,
objectKey=resObj["objectKey"],
label=resObj["label"],
meta=resObj.get("meta")
)
# Sync template roles to database
_syncTemplateRolesToDb()
# Mark existing templates without isSystem field as system templates (migration)
_migrateExistingTemplates()
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI objects and {len(RESOURCE_OBJECTS)} resource objects")
return True
except Exception as e:
logger.error(f"Failed to register feature '{FEATURE_CODE}': {e}")
return False
def _syncTemplateRolesToDb() -> int:
"""
Sync template roles and their AccessRules to the database.
Creates global template roles (mandateId=None) if they don't exist.
Returns:
Number of roles created/updated
"""
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
rootInterface = getRootInterface()
# Get existing template roles for this feature (Pydantic models)
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
# Filter to template roles (mandateId is None)
templateRoles = [r for r in existingRoles if r.mandateId is None]
existingRoleLabels = {r.roleLabel: str(r.id) for r in templateRoles}
createdCount = 0
for roleTemplate in TEMPLATE_ROLES:
roleLabel = roleTemplate["roleLabel"]
if roleLabel in existingRoleLabels:
roleId = existingRoleLabels[roleLabel]
# Ensure AccessRules exist for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
else:
# Create new template role
newRole = Role(
roleLabel=roleLabel,
description=roleTemplate.get("description", {}),
featureCode=FEATURE_CODE,
mandateId=None, # Global template
featureInstanceId=None,
isSystemRole=False
)
createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
roleId = createdRole.get("id")
# Create AccessRules for this role
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
logger.info(f"Created template role '{roleLabel}' with ID {roleId}")
createdCount += 1
if createdCount > 0:
logger.info(f"Feature '{FEATURE_CODE}': Created {createdCount} template roles")
return createdCount
except Exception as e:
logger.error(f"Error syncing template roles for feature '{FEATURE_CODE}': {e}")
return 0
def _ensureAccessRulesForRole(rootInterface, roleId: str, ruleTemplates: List[Dict[str, Any]]) -> int:
"""
Ensure AccessRules exist for a role based on templates.
Args:
rootInterface: Root interface instance
roleId: Role ID
ruleTemplates: List of rule templates
Returns:
Number of rules created
"""
from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext
# Get existing rules for this role (Pydantic models)
existingRules = rootInterface.getAccessRulesByRole(roleId)
# Create a set of existing rule signatures to avoid duplicates
# IMPORTANT: Use .value for enum comparison, not str() which gives "AccessRuleContext.DATA" in Python 3.11+
existingSignatures = set()
for rule in existingRules:
sig = (rule.context.value if rule.context else None, rule.item)
existingSignatures.add(sig)
createdCount = 0
for template in ruleTemplates:
context = template.get("context", "UI")
item = template.get("item")
sig = (context, item)
if sig in existingSignatures:
continue
# Map context string to enum
if context == "UI":
contextEnum = AccessRuleContext.UI
elif context == "DATA":
contextEnum = AccessRuleContext.DATA
elif context == "RESOURCE":
contextEnum = AccessRuleContext.RESOURCE
else:
contextEnum = context
newRule = AccessRule(
roleId=roleId,
context=contextEnum,
item=item,
view=template.get("view", False),
read=template.get("read"),
create=template.get("create"),
update=template.get("update"),
delete=template.get("delete"),
)
rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
createdCount += 1
if createdCount > 0:
logger.debug(f"Created {createdCount} AccessRules for role {roleId}")
return createdCount
def _migrateExistingTemplates() -> None:
"""
Migration: Mark existing templates that have no isSystem/featureInstanceId fields
as system templates (isSystem=True). This runs idempotently during feature registration.
"""
try:
from modules.features.automation.interfaceFeatureAutomation import getInterface
from modules.security.rootAccess import getRootUser
from modules.features.automation.datamodelFeatureAutomation import AutomationTemplate
rootUser = getRootUser()
automationInterface = getInterface(rootUser)
# Get all templates from DB
allTemplates = automationInterface.db.getRecordset(AutomationTemplate)
migratedCount = 0
for template in allTemplates:
templateId = template.get("id")
isSystem = template.get("isSystem")
featureInstanceId = template.get("featureInstanceId")
# Templates without isSystem set (old templates) → mark as system
if isSystem is None and featureInstanceId is None:
automationInterface.db.recordModify(
AutomationTemplate,
templateId,
{"isSystem": True, "featureInstanceId": None}
)
migratedCount += 1
if migratedCount > 0:
logger.info(f"Migrated {migratedCount} existing templates to isSystem=True")
except Exception as e:
logger.warning(f"Template migration check failed (non-critical): {e}")

File diff suppressed because it is too large Load diff

View file

@ -1,433 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Automation templates for workflow definitions.
Contains predefined workflow templates that can be used to create automation definitions.
"""
from typing import Dict, Any, List
# Automation templates structure
AUTOMATION_TEMPLATES: Dict[str, Any] = {
"sets": [
{
"template": {
"overview": "SharePoint Themen Zusammenfassung",
"tasks": [
{
"id": "Task01",
"title": "SharePoint Themen Zusammenfassung",
"description": "Erstellt eine Zusammenfassung aller SharePoint Sites und deren Inhalte",
"objective": "Erstelle eine Zusammenfassung aller SharePoint Themen (Sites) und deren Inhalte als Word-Dokument",
"actionList": [
{
"execMethod": "sharepoint",
"execAction": "findDocumentPath",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"searchQuery": "*",
"maxResults": 100
},
"execResultLabel": "sharepoint_sites_found"
},
{
"execMethod": "sharepoint",
"execAction": "listDocuments",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"pathQuery": "{{KEY:sharepointBasePath}}",
"includeSubfolders": True
},
"execResultLabel": "sharepoint_structure"
},
{
"execMethod": "ai",
"execAction": "process",
"execParameters": {
"aiPrompt": "{{KEY:summaryPrompt}}",
"documentList": ["sharepoint_sites_found", "sharepoint_structure"],
"resultType": "docx"
},
"execResultLabel": "sharepoint_summary"
},
{
"execMethod": "sharepoint",
"execAction": "uploadDocument",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"documentList": ["sharepoint_summary"],
"pathQuery": "{{KEY:sharepointFolderNameDestination}}"
},
"execResultLabel": "sharepoint_upload_result"
}
]
}
]
},
"parameters": {
"connectionName": "connection:msft:p.motsch@valueon.ch",
"sharepointBasePath": "/sites/company-share",
"sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
"summaryPrompt": "Erstelle eine umfassende Zusammenfassung aller SharePoint Sites und deren Inhalte. Strukturiere das Dokument nach Sites und fasse für jede Site die wichtigsten Themen, Ordnerstrukturen und Dokumente zusammen. Erstelle ein professionelles Word-Dokument mit Überschriften, Abschnitten und einer klaren Gliederung. Berücksichtige alle gefundenen Sites, deren Ordnerstrukturen und dokumentiere die wichtigsten Inhalte pro Site."
}
},
{
"template": {
"overview": "Immobilienrecherche Zürich",
"tasks": [
{
"id": "Task02",
"title": "Immobilienrecherche Zürich",
"description": "Webrecherche nach Immobilien im Kanton Zürich und Speicherung in Excel",
"objective": "Immobilienrecherche im Kanton Zürich zum Verkauf (5-20 Mio. CHF) und speichere Ergebnisse in Excel-Liste auf SharePoint",
"actionList": [
{
"execMethod": "ai",
"execAction": "webResearch",
"execParameters": {
"prompt": "{{KEY:immobilienResearchPrompt}}",
"urlList": ["{{KEY:immobilienResearchUrl}}"]
},
"execResultLabel": "immobilien_research_results"
},
{
"execMethod": "ai",
"execAction": "process",
"execParameters": {
"aiPrompt": "{{KEY:excelFormatPrompt}}",
"documentList": ["immobilien_research_results"],
"resultType": "xlsx"
},
"execResultLabel": "immobilien_excel_list"
},
{
"execMethod": "sharepoint",
"execAction": "uploadDocument",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"documentList": ["immobilien_excel_list"],
"pathQuery": "{{KEY:sharepointFolderNameDestination}}"
},
"execResultLabel": "immobilien_upload_result"
}
]
}
]
},
"parameters": {
"connectionName": "connection:msft:p.motsch@valueon.ch",
"sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
"immobilienResearchUrl": ["https://www.homegate.ch", "https://www.immoscout24.ch", "https://www.immowelt.ch"],
"immobilienResearchPrompt": "Suche nach Immobilien zum Verkauf im Kanton Zürich, Schweiz, im Preisbereich von 5-20 Millionen CHF. Sammle Informationen zu: Ort, Preis, Beschreibung, URL zu Bildern, Verkäufer/Kontaktinformationen.",
"excelFormatPrompt": "Erstelle eine Excel-Datei mit den recherchierten Immobilien. Jede Immobilie soll eine Zeile sein mit den folgenden Spalten: Ort, Preis (in CHF), Beschreibung, URL zu Bild, Verkäufer. Verwende die Daten aus der Webrecherche."
}
},
{
"template": {
"overview": "Spesenbelege Zusammenfassung",
"tasks": [
{
"id": "Task03",
"title": "Spesenbelege CSV Zusammenfassung",
"description": "Liest PDF-Spesenbelege aus SharePoint-Ordner und erstellt CSV-Zusammenfassung",
"objective": "Extrahiere alle PDF-Spesenbelege aus einem SharePoint-Ordner und erstelle eine CSV-Datei mit allen Spesendaten im selben Ordner",
"actionList": [
{
"execMethod": "sharepoint",
"execAction": "findDocumentPath",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"searchQuery": "{{KEY:sharepointFolderNameSource}}:files:.pdf",
"maxResults": 100
},
"execResultLabel": "sharepoint_pdf_files"
},
{
"execMethod": "sharepoint",
"execAction": "readDocuments",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"pathObject": "sharepoint_pdf_files"
},
"execResultLabel": "spesenbelege_documents"
},
{
"execMethod": "ai",
"execAction": "process",
"execParameters": {
"aiPrompt": "{{KEY:expenseExtractionPrompt}}",
"documentList": ["spesenbelege_documents"],
"resultType": "csv"
},
"execResultLabel": "spesenbelege_csv"
},
{
"execMethod": "sharepoint",
"execAction": "uploadDocument",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"documentList": ["spesenbelege_csv"],
"pathQuery": "{{KEY:sharepointFolderNameDestination}}"
},
"execResultLabel": "spesenbelege_upload_result"
}
]
}
]
},
"parameters": {
"connectionName": "connection:msft:p.motsch@valueon.ch",
"sharepointFolderNameSource": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/expenses",
"sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
"expenseExtractionPrompt": "Verarbeite alle bereitgestellten Dokumente, aber extrahiere nur Daten aus PDF-Spesenbelegen (ignoriere andere Dateitypen). Für jeden gefundenen PDF-Spesenbeleg extrahiere als separaten Datensatz: Datum, Betrag, MWST %, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler/Verkäufer, Steuerbetrag. Erstelle eine CSV-Datei mit einer Zeile pro Spesenbeleg. Verwende die folgenden Spaltenüberschriften: Datum, Betrag, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler, Steuerbetrag. Stelle sicher, dass alle Beträge numerisch sind und Datumswerte im Format YYYY-MM-DD vorliegen. Wenn ein Dokument kein Spesenbeleg ist, ignoriere es."
}
},
{
"template": {
"overview": "Preprocessing Server Data Update",
"tasks": [
{
"id": "Task04",
"title": "Trigger Preprocessing Server",
"description": "Triggers the preprocessing server at customer tenant to update database with configuration",
"objective": "Call preprocessing server endpoint to update database with provided configuration JSON",
"actionList": [
{
"execMethod": "context",
"execAction": "triggerPreprocessingServer",
"execParameters": {
"endpoint": "{{KEY:endpoint}}",
"configJson": "{{KEY:configJson}}",
"authSecretConfigKey": "{{KEY:authSecretConfigKey}}"
},
"execResultLabel": "preprocessing_server_result"
}
]
}
]
},
"parameters": {
"endpoint": "https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataprocessor/update-db-with-config",
"authSecretConfigKey": "PREPROCESS_ALTHAUS_CHAT_SECRET",
"configJson": "{\"tables\":[{\"name\":\"Artikel\",\"powerbi_table_name\":\"Artikel\",\"steps\":[{\"keep\":{\"columns\":[\"I_ID\",\"Artikelbeschrieb\",\"Artikelbezeichnung\",\"Artikelgruppe\",\"Artikelkategorie\",\"Artikelkürzel\",\"Artikelnummer\",\"Einheit\",\"Gesperrt\",\"Keywords\",\"Lieferant\",\"Warengruppe\"]}},{\"fillna\":{\"column\":\"Lieferant\",\"value\":\"Unbekannt\"}}]},{\"name\":\"Einkaufspreis\",\"powerbi_table_name\":\"Einkaufspreis\",\"steps\":[{\"to_numeric\":{\"column\":\"EP_CHF\",\"errors\":\"coerce\"}},{\"dropna\":{\"subset\":[\"EP_CHF\"]}}]}]}"
}
},
{
"template": {
"overview": "JIRA to SharePoint Ticket Synchronization",
"tasks": [
{
"id": "Task01",
"title": "Sync JIRA Tickets to SharePoint",
"description": "Export JIRA tickets, merge with SharePoint file, upload back, and import changes to JIRA",
"objective": "Synchronize JIRA tickets with SharePoint file (bidirectional sync)",
"actionList": [
{
"execMethod": "sharepoint",
"execAction": "findSiteByUrl",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"hostname": "{{KEY:sharepointHostname}}",
"sitePath": "{{KEY:sharepointSitePath}}"
},
"execResultLabel": "sharepoint_site"
},
{
"execMethod": "jira",
"execAction": "connectJira",
"execParameters": {
"apiUsername": "{{KEY:jiraUsername}}",
"apiTokenConfigKey": "{{KEY:jiraTokenConfigKey}}",
"apiUrl": "{{KEY:jiraUrl}}",
"projectCode": "{{KEY:jiraProjectCode}}",
"issueType": "{{KEY:jiraIssueType}}",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "jira_connection"
},
{
"execMethod": "jira",
"execAction": "exportTicketsAsJson",
"execParameters": {
"connectionId": "jira_connection",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "jira_exported_tickets"
},
{
"execMethod": "sharepoint",
"execAction": "downloadFileByPath",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
},
"execResultLabel": "existing_file_content"
},
{
"execMethod": "jira",
"execAction": "parseExcelContent",
"execParameters": {
"excelContent": "existing_file_content",
"skipRows": 3,
"hasCustomHeaders": True
},
"execResultLabel": "existing_parsed_data"
},
{
"execMethod": "jira",
"execAction": "mergeTicketData",
"execParameters": {
"jiraData": "jira_exported_tickets",
"existingData": "existing_parsed_data",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}",
"idField": "ID"
},
"execResultLabel": "merged_ticket_data"
},
{
"execMethod": "sharepoint",
"execAction": "copyFile",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"sourceFolder": "{{KEY:sharepointMainFolder}}",
"sourceFile": "{{KEY:syncFileName}}",
"destFolder": "{{KEY:sharepointBackupFolder}}",
"destFile": "backup_{{TIMESTAMP}}_{{KEY:syncFileName}}"
},
"execResultLabel": "file_backup"
},
{
"execMethod": "jira",
"execAction": "createExcelContent",
"execParameters": {
"data": "merged_ticket_data",
"headers": "existing_parsed_data",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "new_file_content"
},
{
"execMethod": "sharepoint",
"execAction": "uploadFile",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"folderPath": "{{KEY:sharepointMainFolder}}",
"fileName": "{{KEY:syncFileName}}",
"content": "new_file_content"
},
"execResultLabel": "uploaded_file"
},
{
"execMethod": "sharepoint",
"execAction": "downloadFileByPath",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
},
"execResultLabel": "uploaded_file_content"
},
{
"execMethod": "jira",
"execAction": "parseExcelContent",
"execParameters": {
"excelContent": "uploaded_file_content",
"skipRows": 3,
"hasCustomHeaders": True
},
"execResultLabel": "import_data"
},
{
"execMethod": "jira",
"execAction": "importTicketsFromJson",
"execParameters": {
"connectionId": "jira_connection",
"ticketData": "import_data",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "import_result"
}
]
}
]
},
"parameters": {
"sharepointConnection": "connection:msft:patrick.motsch@delta.ch",
"sharepointHostname": "deltasecurityag.sharepoint.com",
"sharepointSitePath": "SteeringBPM",
"sharepointMainFolder": "/General/50 Docs hosted by SELISE",
"sharepointBackupFolder": "/General/50 Docs hosted by SELISE/SyncHistory",
"syncFileName": "DELTAgroup x SELISE Ticket Exchange List.xlsx",
"jiraUsername": "p.motsch@valueon.ch",
"jiraTokenConfigKey": "Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET",
"jiraUrl": "https://deltasecurity.atlassian.net",
"jiraProjectCode": "DCS",
"jiraIssueType": "Task",
"taskSyncDefinition": "{\"ID\":[\"get\",[\"key\"]],\"Module Category\":[\"get\",[\"fields\",\"customfield_10058\",\"value\"]],\"Summary\":[\"get\",[\"fields\",\"summary\"]],\"Description\":[\"get\",[\"fields\",\"description\"]],\"References\":[\"get\",[\"fields\",\"customfield_10066\"]],\"Priority\":[\"get\",[\"fields\",\"priority\",\"name\"]],\"Issue Status\":[\"get\",[\"fields\",\"status\",\"name\"]],\"Assignee\":[\"get\",[\"fields\",\"assignee\",\"displayName\"]],\"Issue Created\":[\"get\",[\"fields\",\"created\"]],\"Due Date\":[\"get\",[\"fields\",\"duedate\"]],\"DELTA Comments\":[\"get\",[\"fields\",\"customfield_10167\"]],\"SELISE Ticket References\":[\"put\",[\"fields\",\"customfield_10067\"]],\"SELISE Status Values\":[\"put\",[\"fields\",\"customfield_10065\"]],\"SELISE Comments\":[\"put\",[\"fields\",\"customfield_10168\"]]}"
}
},
{
"template": {
"overview": "Expenses PDF to Trustee Position",
"tasks": [
{
"id": "Task01",
"title": "Run trustee pipeline on SharePoint files",
"description": "Extract expenses from SharePoint PDFs, create positions + documents, sync to accounting",
"objective": "End-to-end: SharePoint folder → AI extraction → Trustee DB → Accounting sync",
"actionList": [
{
"execMethod": "trustee",
"execAction": "extractFromFiles",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"sharepointFolder": "{{KEY:sharepointFolder}}",
"featureInstanceId": "{{KEY:featureInstanceId}}"
},
"execResultLabel": "extract_result"
},
{
"execMethod": "trustee",
"execAction": "processDocuments",
"execParameters": {
"documentList": "docList:{{PREV_MESSAGE_ID}}:extract_result",
"featureInstanceId": "{{KEY:featureInstanceId}}"
},
"execResultLabel": "process_result"
},
{
"execMethod": "trustee",
"execAction": "syncToAccounting",
"execParameters": {
"documentList": "docList:{{PREV_MESSAGE_ID}}:process_result",
"featureInstanceId": "{{KEY:featureInstanceId}}"
},
"execResultLabel": "sync_result"
}
]
}
]
},
"parameters": {
"connectionName": "",
"sharepointFolder": "",
"featureInstanceId": ""
}
}
]
}
def getAutomationTemplates() -> Dict[str, Any]:
"""
Get automation templates.
Returns:
Dict containing the automation templates structure with 'sets' key.
"""
return AUTOMATION_TEMPLATES

View file

@ -1,118 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Utility functions for automation feature.
Moved from interfaces/interfaceDbChat.py.
"""
import json
from typing import Dict, Any
from datetime import datetime, UTC
def parseScheduleToCron(schedule: str) -> Dict[str, Any]:
"""Parse schedule string to cron kwargs for APScheduler"""
parts = schedule.split()
if len(parts) != 5:
raise ValueError(f"Invalid schedule format: {schedule}")
return {
"minute": parts[0],
"hour": parts[1],
"day": parts[2],
"month": parts[3],
"day_of_week": parts[4]
}
def planToPrompt(plan: Dict) -> str:
"""Convert plan structure to prompt string for workflow execution"""
return plan.get("userMessage", plan.get("overview", "Execute automation workflow"))
def replacePlaceholders(template: str, placeholders: Dict[str, str]) -> str:
"""Replace placeholders in template with actual values. Placeholder format: {{KEY:PLACEHOLDER_NAME}} or {{TIMESTAMP}}"""
result = template
# Replace TIMESTAMP placeholder first (calculated placeholder, not from parameters)
timestampPattern = "{{TIMESTAMP}}"
if timestampPattern in result:
timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
result = result.replace(timestampPattern, timestamp)
for placeholderName, value in placeholders.items():
pattern = f"{{{{KEY:{placeholderName}}}}}"
# Check if placeholder is in an array context like ["{{KEY:...}}"]
# If value is a JSON array/dict, we should replace the entire ["{{KEY:...}}"] with the array
arrayPattern = f'["{pattern}"]'
if arrayPattern in result:
# Check if value is a JSON array/dict
isArrayValue = False
arrayValue = None
if isinstance(value, (list, dict)):
isArrayValue = True
arrayValue = json.dumps(value)
elif isinstance(value, str):
try:
parsed = json.loads(value)
if isinstance(parsed, (list, dict)):
isArrayValue = True
arrayValue = value # Already valid JSON string
except (json.JSONDecodeError, ValueError):
pass
if isArrayValue:
# Replace ["{{KEY:...}}"] with the array value
result = result.replace(arrayPattern, arrayValue)
continue # Skip the regular replacement below
# Regular replacement - check if in quoted context
patternStart = result.find(pattern)
isQuoted = False
if patternStart > 0:
charBefore = result[patternStart - 1] if patternStart > 0 else None
patternEnd = patternStart + len(pattern)
charAfter = result[patternEnd] if patternEnd < len(result) else None
if charBefore == '"' and charAfter == '"':
isQuoted = True
# Handle different value types
if isinstance(value, (list, dict)):
# Python list/dict - convert to JSON
replacement = json.dumps(value)
elif isinstance(value, str):
# String value - check if it's a JSON string representing list/dict
try:
parsed = json.loads(value)
if isinstance(parsed, (list, dict)):
# It's a JSON string of a list/dict
if isQuoted:
# In quoted context, escape the JSON string
escaped = json.dumps(value)
replacement = escaped[1:-1] # Remove outer quotes
else:
# In unquoted context, use JSON directly
replacement = value
else:
# It's a JSON string of a primitive
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
except (json.JSONDecodeError, ValueError):
# Not valid JSON - treat as plain string
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
else:
# Numbers, booleans, None - convert to string
replacement = str(value)
result = result.replace(pattern, replacement)
return result

View file

@ -1,2 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# Automation2 feature - n8n-style flow automation (backup/parallel to legacy automation)

View file

@ -1,166 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Automation2 models: Automation2Workflow, Automation2WorkflowRun, Automation2HumanTask."""
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
from modules.shared.attributeUtils import registerModelLabels
import uuid
class Automation2Workflow(BaseModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
mandateId: str = Field(
description="Mandate ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
featureInstanceId: str = Field(
description="Feature instance ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
label: str = Field(
description="User-friendly workflow name",
json_schema_extra={"frontend_type": "text", "frontend_required": True},
)
graph: Dict[str, Any] = Field(
default_factory=dict,
description="Graph with nodes and connections (incl. node parameters)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": True},
)
active: bool = Field(
default=True,
description="Whether workflow is active",
json_schema_extra={"frontend_type": "checkbox", "frontend_required": False},
)
invocations: List[Dict[str, Any]] = Field(
default_factory=list,
description="Entry points / starts (manual, form, schedule, webhook, …) configured outside the canvas",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
registerModelLabels(
"Automation2Workflow",
{"en": "Automation2 Workflow", "de": "Automation2 Workflow", "fr": "Workflow Automation2"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
"featureInstanceId": {"en": "Feature Instance ID", "de": "Feature-Instanz-ID", "fr": "ID instance"},
"label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
"graph": {"en": "Graph", "de": "Graph", "fr": "Graphe"},
"active": {"en": "Active", "de": "Aktiv", "fr": "Actif"},
"invocations": {"en": "Starts / Entry points", "de": "Starts / Einstiegspunkte", "fr": "Points d'entrée"},
},
)
class Automation2WorkflowRun(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
workflowId: str = Field(
description="Workflow ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
status: str = Field(
default="running",
description="Status: running|paused|completed|failed",
json_schema_extra={"frontend_type": "text", "frontend_required": False},
)
nodeOutputs: Dict[str, Any] = Field(
default_factory=dict,
description="Outputs from executed nodes",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
currentNodeId: Optional[str] = Field(
default=None,
description="Node ID when paused (human task)",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
context: Dict[str, Any] = Field(
default_factory=dict,
description="Context for resume (connectionMap, inputSources, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
registerModelLabels(
"Automation2WorkflowRun",
{"en": "Automation2 Workflow Run", "de": "Automation2 Workflow-Ausführung", "fr": "Exécution workflow"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"},
"nodeOutputs": {"en": "Node Outputs", "de": "Node-Ausgaben", "fr": "Sorties nœuds"},
"currentNodeId": {"en": "Current Node", "de": "Aktueller Knoten", "fr": "Nœud actuel"},
"context": {"en": "Context", "de": "Kontext", "fr": "Contexte"},
},
)
class Automation2HumanTask(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
runId: str = Field(
description="Workflow run ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
workflowId: str = Field(
description="Workflow ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
nodeId: str = Field(
description="Node ID in the graph",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
nodeType: str = Field(
description="Node type: form|approval|upload|comment|review|selection|confirmation",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
config: Dict[str, Any] = Field(
default_factory=dict,
description="Node config (form schema, approval text, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
assigneeId: Optional[str] = Field(
default=None,
description="User ID assigned to complete the task",
json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
)
status: str = Field(
default="pending",
description="Status: pending|completed|rejected",
json_schema_extra={"frontend_type": "text", "frontend_required": False},
)
result: Optional[Dict[str, Any]] = Field(
default=None,
description="Task result (form data, approval decision, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
registerModelLabels(
"Automation2HumanTask",
{"en": "Automation2 Human Task", "de": "Automation2 Benutzer-Aufgabe", "fr": "Tâche utilisateur"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"runId": {"en": "Run ID", "de": "Lauf-ID", "fr": "ID exécution"},
"workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
"nodeId": {"en": "Node ID", "de": "Knoten-ID", "fr": "ID nœud"},
"nodeType": {"en": "Node Type", "de": "Knotentyp", "fr": "Type nœud"},
"config": {"en": "Config", "de": "Konfiguration", "fr": "Configuration"},
"assigneeId": {"en": "Assignee", "de": "Zugewiesen an", "fr": "Assigné à"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"},
"result": {"en": "Result", "de": "Ergebnis", "fr": "Résultat"},
},
)

View file

@ -0,0 +1,2 @@
# Copyright (c) 2025 Patrick Motsch
# GraphicalEditor feature - n8n-style flow automation with visual editor

View file

@ -0,0 +1,482 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""GraphicalEditor models with Auto-prefix: AutoWorkflow, AutoVersion, AutoRun, AutoStepLog, AutoTask."""
from enum import Enum
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
from modules.shared.attributeUtils import registerModelLabels
import uuid
# ---------------------------------------------------------------------------
# Enums
# ---------------------------------------------------------------------------
class AutoWorkflowStatus(str, Enum):
DRAFT = "draft"
PUBLISHED = "published"
ARCHIVED = "archived"
class AutoRunStatus(str, Enum):
RUNNING = "running"
PAUSED = "paused"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
class AutoStepStatus(str, Enum):
PENDING = "pending"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
SKIPPED = "skipped"
class AutoTaskStatus(str, Enum):
PENDING = "pending"
COMPLETED = "completed"
CANCELLED = "cancelled"
EXPIRED = "expired"
class AutoTemplateScope(str, Enum):
USER = "user"
INSTANCE = "instance"
MANDATE = "mandate"
SYSTEM = "system"
# ---------------------------------------------------------------------------
# AutoWorkflow
# ---------------------------------------------------------------------------
class AutoWorkflow(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
mandateId: str = Field(
description="Mandate ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
featureInstanceId: str = Field(
description="Feature instance ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
label: str = Field(
description="User-friendly workflow name",
json_schema_extra={"frontend_type": "text", "frontend_required": True},
)
description: Optional[str] = Field(
default=None,
description="Workflow description",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
tags: List[str] = Field(
default_factory=list,
description="Tags for categorization",
json_schema_extra={"frontend_type": "tags", "frontend_required": False},
)
isTemplate: bool = Field(
default=False,
description="Whether this workflow is a template",
json_schema_extra={"frontend_type": "checkbox", "frontend_required": False},
)
templateSourceId: Optional[str] = Field(
default=None,
description="ID of the template this workflow was created from",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
templateScope: Optional[str] = Field(
default=None,
description="Template scope: user, instance, mandate, system (AutoTemplateScope)",
json_schema_extra={"frontend_type": "select", "frontend_required": False},
)
sharedReadOnly: bool = Field(
default=False,
description="If true, shared template is read-only for non-owners",
json_schema_extra={"frontend_type": "checkbox", "frontend_required": False},
)
currentVersionId: Optional[str] = Field(
default=None,
description="ID of the currently published AutoVersion",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
active: bool = Field(
default=True,
description="Whether workflow is active",
json_schema_extra={"frontend_type": "checkbox", "frontend_required": False},
)
eventId: Optional[str] = Field(
default=None,
description="Scheduler event ID for incremental sync",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
notifyOnFailure: bool = Field(
default=True,
description="Send notification (in-app + email) when a run fails",
json_schema_extra={"frontend_type": "checkbox", "frontend_required": False},
)
# Legacy fields kept for backward compatibility during transition
graph: Dict[str, Any] = Field(
default_factory=dict,
description="Graph with nodes and connections (legacy; prefer AutoVersion.graph)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
invocations: List[Dict[str, Any]] = Field(
default_factory=list,
description="Entry points / starts (manual, form, schedule, webhook, ...)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
registerModelLabels(
"AutoWorkflow",
{"en": "Workflow", "de": "Workflow", "fr": "Workflow"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
"featureInstanceId": {"en": "Feature Instance ID", "de": "Feature-Instanz-ID", "fr": "ID instance"},
"label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
"description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
"tags": {"en": "Tags", "de": "Tags", "fr": "Tags"},
"isTemplate": {"en": "Is Template", "de": "Ist Vorlage", "fr": "Est modèle"},
"templateSourceId": {"en": "Template Source", "de": "Vorlagen-Quelle", "fr": "Source du modèle"},
"templateScope": {"en": "Template Scope", "de": "Vorlagen-Bereich", "fr": "Portée du modèle"},
"sharedReadOnly": {"en": "Shared Read-Only", "de": "Freigabe nur-lesen", "fr": "Partage lecture seule"},
"currentVersionId": {"en": "Current Version", "de": "Aktuelle Version", "fr": "Version actuelle"},
"active": {"en": "Active", "de": "Aktiv", "fr": "Actif"},
"eventId": {"en": "Event ID", "de": "Event-ID", "fr": "ID événement"},
"graph": {"en": "Graph", "de": "Graph", "fr": "Graphe"},
"invocations": {"en": "Starts / Entry points", "de": "Starts / Einstiegspunkte", "fr": "Points d'entrée"},
},
)
# ---------------------------------------------------------------------------
# AutoVersion
# ---------------------------------------------------------------------------
class AutoVersion(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
workflowId: str = Field(
description="FK -> AutoWorkflow",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
versionNumber: int = Field(
default=1,
description="Incrementing version number",
json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False},
)
status: str = Field(
default=AutoWorkflowStatus.DRAFT.value,
description="Version status: draft, published, archived",
json_schema_extra={"frontend_type": "select", "frontend_required": False},
)
graph: Dict[str, Any] = Field(
default_factory=dict,
description="Graph with nodes and connections (incl. node parameters)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": True},
)
invocations: List[Dict[str, Any]] = Field(
default_factory=list,
description="Entry points / starts for this version",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
publishedAt: Optional[float] = Field(
default=None,
description="Timestamp when version was published",
json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False},
)
publishedBy: Optional[str] = Field(
default=None,
description="User ID who published this version",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
registerModelLabels(
"AutoVersion",
{"en": "Workflow Version", "de": "Workflow-Version", "fr": "Version workflow"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
"versionNumber": {"en": "Version", "de": "Version", "fr": "Version"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"},
"graph": {"en": "Graph", "de": "Graph", "fr": "Graphe"},
"invocations": {"en": "Entry Points", "de": "Einstiegspunkte", "fr": "Points d'entrée"},
"publishedAt": {"en": "Published At", "de": "Veröffentlicht am", "fr": "Publié le"},
"publishedBy": {"en": "Published By", "de": "Veröffentlicht von", "fr": "Publié par"},
},
)
# ---------------------------------------------------------------------------
# AutoRun
# ---------------------------------------------------------------------------
class AutoRun(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
workflowId: str = Field(
description="Workflow ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
versionId: Optional[str] = Field(
default=None,
description="AutoVersion ID used for this run",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
status: str = Field(
default=AutoRunStatus.RUNNING.value,
description="Status: running, paused, completed, failed, cancelled",
json_schema_extra={"frontend_type": "text", "frontend_required": False},
)
trigger: Dict[str, Any] = Field(
default_factory=dict,
description="Trigger info (type, entryPointId, payload, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
startedAt: Optional[float] = Field(
default=None,
description="Run start timestamp",
json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False},
)
completedAt: Optional[float] = Field(
default=None,
description="Run completion timestamp",
json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False},
)
nodeOutputs: Dict[str, Any] = Field(
default_factory=dict,
description="Outputs from executed nodes",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
currentNodeId: Optional[str] = Field(
default=None,
description="Node ID when paused (human task / email wait)",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
resumeContext: Dict[str, Any] = Field(
default_factory=dict,
description="Context for resume (connectionMap, inputSources, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
error: Optional[str] = Field(
default=None,
description="Error message if failed",
json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False},
)
costTokens: int = Field(
default=0,
description="Total tokens consumed by AI nodes",
json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False},
)
costCredits: float = Field(
default=0.0,
description="Total credits consumed",
json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False},
)
registerModelLabels(
"AutoRun",
{"en": "Workflow Run", "de": "Workflow-Ausführung", "fr": "Exécution workflow"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
"versionId": {"en": "Version ID", "de": "Versions-ID", "fr": "ID version"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"},
"trigger": {"en": "Trigger", "de": "Auslöser", "fr": "Déclencheur"},
"startedAt": {"en": "Started At", "de": "Gestartet am", "fr": "Démarré le"},
"completedAt": {"en": "Completed At", "de": "Abgeschlossen am", "fr": "Terminé le"},
"nodeOutputs": {"en": "Node Outputs", "de": "Node-Ausgaben", "fr": "Sorties nœuds"},
"currentNodeId": {"en": "Current Node", "de": "Aktueller Knoten", "fr": "Nœud actuel"},
"resumeContext": {"en": "Resume Context", "de": "Wiederaufnahme-Kontext", "fr": "Contexte reprise"},
"error": {"en": "Error", "de": "Fehler", "fr": "Erreur"},
"costTokens": {"en": "Tokens Used", "de": "Verbrauchte Tokens", "fr": "Tokens utilisés"},
"costCredits": {"en": "Credits Used", "de": "Verbrauchte Credits", "fr": "Crédits utilisés"},
},
)
# ---------------------------------------------------------------------------
# AutoStepLog
# ---------------------------------------------------------------------------
class AutoStepLog(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
runId: str = Field(
description="FK -> AutoRun",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
nodeId: str = Field(
description="Node ID in the graph",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
nodeType: str = Field(
description="Node type (e.g. ai.chat, email.send)",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
status: str = Field(
default=AutoStepStatus.PENDING.value,
description="Step status: pending, running, completed, failed, skipped",
json_schema_extra={"frontend_type": "text", "frontend_required": False},
)
inputSnapshot: Dict[str, Any] = Field(
default_factory=dict,
description="Snapshot of inputs at execution time",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
output: Dict[str, Any] = Field(
default_factory=dict,
description="Node output",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
error: Optional[str] = Field(
default=None,
description="Error message if step failed",
json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False},
)
startedAt: Optional[float] = Field(
default=None,
description="Step start timestamp",
json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False},
)
completedAt: Optional[float] = Field(
default=None,
description="Step completion timestamp",
json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False},
)
durationMs: Optional[int] = Field(
default=None,
description="Execution duration in milliseconds",
json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False},
)
tokensUsed: int = Field(
default=0,
description="Tokens consumed by this step",
json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False},
)
retryCount: int = Field(
default=0,
description="Number of retries executed",
json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False},
)
registerModelLabels(
"AutoStepLog",
{"en": "Step Log", "de": "Schritt-Protokoll", "fr": "Journal d'étape"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"runId": {"en": "Run ID", "de": "Lauf-ID", "fr": "ID exécution"},
"nodeId": {"en": "Node ID", "de": "Knoten-ID", "fr": "ID nœud"},
"nodeType": {"en": "Node Type", "de": "Knotentyp", "fr": "Type nœud"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"},
"inputSnapshot": {"en": "Input Snapshot", "de": "Eingabe-Snapshot", "fr": "Snapshot entrée"},
"output": {"en": "Output", "de": "Ausgabe", "fr": "Sortie"},
"error": {"en": "Error", "de": "Fehler", "fr": "Erreur"},
"startedAt": {"en": "Started At", "de": "Gestartet am", "fr": "Démarré le"},
"completedAt": {"en": "Completed At", "de": "Abgeschlossen am", "fr": "Terminé le"},
"durationMs": {"en": "Duration (ms)", "de": "Dauer (ms)", "fr": "Durée (ms)"},
"tokensUsed": {"en": "Tokens Used", "de": "Verbrauchte Tokens", "fr": "Tokens utilisés"},
"retryCount": {"en": "Retry Count", "de": "Wiederholungen", "fr": "Nombre de tentatives"},
},
)
# ---------------------------------------------------------------------------
# AutoTask
# ---------------------------------------------------------------------------
class AutoTask(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Primary key",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
)
runId: str = Field(
description="FK -> AutoRun",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
workflowId: str = Field(
description="Workflow ID",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
nodeId: str = Field(
description="Node ID in the graph",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
nodeType: str = Field(
description="Node type: form, approval, upload, comment, review, selection, confirmation",
json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
)
config: Dict[str, Any] = Field(
default_factory=dict,
description="Node config (form schema, approval text, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
assigneeId: Optional[str] = Field(
default=None,
description="User ID assigned to complete the task",
json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
)
status: str = Field(
default=AutoTaskStatus.PENDING.value,
description="Status: pending, completed, cancelled, expired",
json_schema_extra={"frontend_type": "text", "frontend_required": False},
)
result: Optional[Dict[str, Any]] = Field(
default=None,
description="Task result (form data, approval decision, etc.)",
json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
)
expiresAt: Optional[float] = Field(
default=None,
description="Expiration timestamp for the task",
json_schema_extra={"frontend_type": "datetime", "frontend_required": False},
)
registerModelLabels(
"AutoTask",
{"en": "Task", "de": "Aufgabe", "fr": "Tâche"},
{
"id": {"en": "ID", "de": "ID", "fr": "ID"},
"runId": {"en": "Run ID", "de": "Lauf-ID", "fr": "ID exécution"},
"workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
"nodeId": {"en": "Node ID", "de": "Knoten-ID", "fr": "ID nœud"},
"nodeType": {"en": "Node Type", "de": "Knotentyp", "fr": "Type nœud"},
"config": {"en": "Config", "de": "Konfiguration", "fr": "Configuration"},
"assigneeId": {"en": "Assignee", "de": "Zugewiesen an", "fr": "Assigné à"},
"status": {"en": "Status", "de": "Status", "fr": "Statut"},
"result": {"en": "Result", "de": "Ergebnis", "fr": "Résultat"},
"expiresAt": {"en": "Expires At", "de": "Läuft ab am", "fr": "Expire le"},
},
)
# ---------------------------------------------------------------------------
# Backward-compatible aliases for transition period
# ---------------------------------------------------------------------------
Automation2Workflow = AutoWorkflow
Automation2WorkflowRun = AutoRun
Automation2HumanTask = AutoTask

View file

@ -25,8 +25,8 @@ async def _pollEmailWaits(eventUser) -> None:
Stops the poller when no runs are waiting.
"""
try:
from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface
from modules.features.automation2.mainAutomation2 import getAutomation2Services
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface as getAutomation2Interface
from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices as getAutomation2Services
from modules.workflows.automation2.executionEngine import executeGraph
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
from modules.interfaces.interfaceDbApp import getRootInterface

View file

@ -1,8 +1,8 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Interface for Automation2 feature - Workflows, Runs, Human Tasks.
Uses PostgreSQL poweron_automation2 database.
Interface for GraphicalEditor feature - Workflows, Runs, Human Tasks.
Uses PostgreSQL poweron_graphicaleditor database (Greenfield).
"""
import base64
@ -25,38 +25,50 @@ def _make_json_serializable(obj: Any) -> Any:
return obj
from modules.datamodels.datamodelUam import User
from modules.features.automation2.datamodelFeatureAutomation2 import (
Automation2Workflow,
Automation2WorkflowRun,
Automation2HumanTask,
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
AutoWorkflow,
AutoVersion,
AutoRun,
AutoStepLog,
AutoTask,
AutoWorkflow as Automation2Workflow,
AutoRun as Automation2WorkflowRun,
AutoTask as Automation2HumanTask,
)
from modules.features.automation2.entryPoints import normalize_invocations_list
from modules.features.graphicalEditor.entryPoints import normalize_invocations_list
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
_GREENFIELD_DB = "poweron_graphicaleditor"
_CALLBACK_WORKFLOW_CHANGED = "graphicalEditor.workflow.changed"
def getAutomation2Interface(
def getGraphicalEditorInterface(
currentUser: User,
mandateId: str,
featureInstanceId: str,
) -> "Automation2Objects":
"""Factory for Automation2 interface with user context."""
return Automation2Objects(
) -> "GraphicalEditorObjects":
"""Factory for GraphicalEditor interface with user context."""
return GraphicalEditorObjects(
currentUser=currentUser,
mandateId=mandateId,
featureInstanceId=featureInstanceId,
)
# Backward-compatible alias used by workflows/automation2/ execution engine
getAutomation2Interface = getGraphicalEditorInterface
def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
"""
Get all active Automation2 workflows that have a schedule entry point (primary invocation).
Get all active workflows that have a schedule entry point (primary invocation).
Used by the scheduler to register cron jobs. Does not filter by mandate/instance.
"""
dbHost = APP_CONFIG.get("DB_HOST", "localhost")
dbDatabase = "poweron_automation2"
dbDatabase = _GREENFIELD_DB
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
@ -69,10 +81,8 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
userId=None,
)
if not connector._ensureTableExists(Automation2Workflow):
logger.warning("Automation2 schedule: table Automation2Workflow does not exist")
logger.warning("GraphicalEditor schedule: table Automation2Workflow does not exist yet")
return []
# Don't filter by active in SQL: existing workflows may have active=NULL.
# Treat NULL as active; skip only when active is explicitly False.
records = connector.getRecordset(
Automation2Workflow,
recordFilter=None,
@ -89,7 +99,6 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
if not isinstance(primary, dict):
primary = {}
# Cron comes from graph start node params (trigger.schedule)
graph = wf.get("graph") or {}
nodes = graph.get("nodes") or []
cron = None
@ -103,7 +112,6 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
if not cron or not isinstance(cron, str) or not cron.strip():
continue
# Prefer invocations; if graph has trigger.schedule but invocations say manual, still schedule
if primary.get("kind") == "schedule" and primary.get("enabled", True):
entry_point_id = primary.get("id")
elif invocations and isinstance(invocations[0], dict) and invocations[0].get("id"):
@ -120,15 +128,15 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
"workflow": wf,
})
logger.info(
"Automation2 schedule: DB has %d workflow(s), %d active with trigger.schedule+cron",
"GraphicalEditor schedule: DB has %d workflow(s), %d active with trigger.schedule+cron",
raw_count,
len(result),
)
return result
class Automation2Objects:
"""Interface for Automation2 database operations."""
class GraphicalEditorObjects:
"""Interface for GraphicalEditor database operations (Greenfield DB)."""
def __init__(
self,
@ -145,9 +153,9 @@ class Automation2Objects:
self.db.updateContext(self.userId)
def _init_db(self):
"""Initialize database connection to poweron_automation2."""
"""Initialize database connection to poweron_graphicaleditor (Greenfield)."""
dbHost = APP_CONFIG.get("DB_HOST", "localhost")
dbDatabase = "poweron_automation2"
dbDatabase = _GREENFIELD_DB
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
@ -159,16 +167,14 @@ class Automation2Objects:
dbPort=dbPort,
userId=self.userId,
)
logger.debug("Automation2 database initialized for user %s", self.userId)
logger.debug("GraphicalEditor database initialized for user %s", self.userId)
# -------------------------------------------------------------------------
# Workflow CRUD
# -------------------------------------------------------------------------
def getWorkflows(self, active: Optional[bool] = None) -> List[Dict[str, Any]]:
"""Get all workflows for this mandate and feature instance.
Optional active filter: True=only active, False=only inactive, None=all.
"""
"""Get all workflows for this mandate and feature instance."""
if not self.db._ensureTableExists(Automation2Workflow):
return []
rf: Dict[str, Any] = {
@ -218,7 +224,7 @@ class Automation2Objects:
out["invocations"] = normalize_invocations_list(out.get("invocations"))
try:
from modules.shared.callbackRegistry import callbackRegistry
callbackRegistry.trigger("automation2.workflow.changed")
callbackRegistry.trigger(_CALLBACK_WORKFLOW_CHANGED)
except Exception:
pass
return out
@ -228,7 +234,6 @@ class Automation2Objects:
existing = self.getWorkflow(workflowId)
if not existing:
return None
# Don't overwrite mandateId/featureInstanceId
data.pop("mandateId", None)
data.pop("featureInstanceId", None)
if "invocations" in data:
@ -238,7 +243,7 @@ class Automation2Objects:
out["invocations"] = normalize_invocations_list(out.get("invocations"))
try:
from modules.shared.callbackRegistry import callbackRegistry
callbackRegistry.trigger("automation2.workflow.changed")
callbackRegistry.trigger(_CALLBACK_WORKFLOW_CHANGED)
except Exception:
pass
return out
@ -251,7 +256,7 @@ class Automation2Objects:
self.db.recordDelete(Automation2Workflow, workflowId)
try:
from modules.shared.callbackRegistry import callbackRegistry
callbackRegistry.trigger("automation2.workflow.changed")
callbackRegistry.trigger(_CALLBACK_WORKFLOW_CHANGED)
except Exception:
pass
return True
@ -322,7 +327,7 @@ class Automation2Objects:
return [dict(r) for r in records] if records else []
def getRecentCompletedRuns(self, limit: int = 20) -> List[Dict[str, Any]]:
"""Get recently completed runs for workflows in this instance (for output display)."""
"""Get recently completed runs for workflows in this instance."""
if not self.db._ensureTableExists(Automation2WorkflowRun):
return []
workflows = self.getWorkflows()
@ -426,10 +431,7 @@ class Automation2Objects:
status: str = None,
assigneeId: str = None,
) -> List[Dict[str, Any]]:
"""Get tasks with optional filters.
When assigneeId is set: returns tasks assigned to that user OR unassigned (so schedule tasks show up).
When assigneeId is None: returns all tasks.
"""
"""Get tasks with optional filters."""
if not self.db._ensureTableExists(Automation2HumanTask):
return []
base_rf: Dict[str, Any] = {}
@ -461,3 +463,171 @@ class Automation2Objects:
workflows = {w["id"]: w for w in self.getWorkflows()}
filtered = [t for t in items if t.get("workflowId") in workflows]
return filtered
# -------------------------------------------------------------------------
# Versions (AutoVersion Lifecycle)
# -------------------------------------------------------------------------
def getVersions(self, workflowId: str) -> List[Dict[str, Any]]:
"""Get all versions for a workflow, ordered by versionNumber desc."""
if not self.db._ensureTableExists(AutoVersion):
return []
records = self.db.getRecordset(AutoVersion, recordFilter={"workflowId": workflowId})
versions = [dict(r) for r in records] if records else []
versions.sort(key=lambda v: v.get("versionNumber", 0), reverse=True)
return versions
def getVersion(self, versionId: str) -> Optional[Dict[str, Any]]:
"""Get a single version by ID."""
if not self.db._ensureTableExists(AutoVersion):
return None
record = self.db.getRecord(AutoVersion, versionId)
return dict(record) if record else None
def createDraftVersion(self, workflowId: str) -> Optional[Dict[str, Any]]:
"""Create a new draft version from the workflow's current graph."""
wf = self.getWorkflow(workflowId)
if not wf:
return None
existing = self.getVersions(workflowId)
nextNumber = max((v.get("versionNumber", 0) for v in existing), default=0) + 1
import time
data = {
"id": str(uuid.uuid4()),
"workflowId": workflowId,
"versionNumber": nextNumber,
"status": "draft",
"graph": wf.get("graph", {}),
"invocations": wf.get("invocations", []),
}
created = self.db.recordCreate(AutoVersion, data)
return dict(created)
def publishVersion(self, versionId: str, userId: str = None) -> Optional[Dict[str, Any]]:
"""Publish a draft version. Archives the previously published version."""
version = self.getVersion(versionId)
if not version or version.get("status") != "draft":
return None
workflowId = version.get("workflowId")
existing = self.getVersions(workflowId)
for v in existing:
if v.get("status") == "published" and v.get("id") != versionId:
self.db.recordModify(AutoVersion, v["id"], {"status": "archived"})
import time
updated = self.db.recordModify(AutoVersion, versionId, {
"status": "published",
"publishedAt": time.time(),
"publishedBy": userId,
})
if workflowId:
self.db.recordModify(AutoWorkflow, workflowId, {
"currentVersionId": versionId,
"graph": version.get("graph", {}),
"invocations": version.get("invocations", []),
})
return dict(updated)
def unpublishVersion(self, versionId: str) -> Optional[Dict[str, Any]]:
"""Revert a published version back to draft status."""
version = self.getVersion(versionId)
if not version or version.get("status") != "published":
return None
workflowId = version.get("workflowId")
updated = self.db.recordModify(AutoVersion, versionId, {
"status": "draft",
"publishedAt": None,
"publishedBy": None,
})
if workflowId:
self.db.recordModify(AutoWorkflow, workflowId, {"currentVersionId": None})
return dict(updated)
def archiveVersion(self, versionId: str) -> Optional[Dict[str, Any]]:
"""Archive a version."""
version = self.getVersion(versionId)
if not version:
return None
updated = self.db.recordModify(AutoVersion, versionId, {"status": "archived"})
return dict(updated)
# -------------------------------------------------------------------------
# Templates
# -------------------------------------------------------------------------
def getTemplates(self, scope: str = None) -> List[Dict[str, Any]]:
"""Get workflow templates, optionally filtered by scope."""
if not self.db._ensureTableExists(AutoWorkflow):
return []
rf: Dict[str, Any] = {
"mandateId": self.mandateId,
"featureInstanceId": self.featureInstanceId,
"isTemplate": True,
}
if scope:
rf["templateScope"] = scope
records = self.db.getRecordset(AutoWorkflow, recordFilter=rf)
return [dict(r) for r in records] if records else []
def createTemplateFromWorkflow(self, workflowId: str, scope: str = "user") -> Optional[Dict[str, Any]]:
"""Create a template by copying the published AutoVersion's graph (or workflow graph as fallback)."""
wf = self.getWorkflow(workflowId)
if not wf:
return None
graph = wf.get("graph", {})
invocations = wf.get("invocations", [])
currentVersionId = wf.get("currentVersionId")
if currentVersionId:
version = self.getVersion(currentVersionId)
if version:
graph = version.get("graph", graph)
invocations = version.get("invocations", invocations)
data = {
"id": str(uuid.uuid4()),
"mandateId": self.mandateId,
"featureInstanceId": self.featureInstanceId,
"label": f"{wf.get('label', 'Workflow')} (Template)",
"graph": graph,
"invocations": invocations,
"isTemplate": True,
"templateScope": scope,
"templateSourceId": workflowId,
"active": False,
}
created = self.db.recordCreate(AutoWorkflow, data)
return dict(created)
def copyTemplateToUser(self, templateId: str) -> Optional[Dict[str, Any]]:
"""Copy a template to a new user-owned workflow with templateScope='user'."""
template = self.getWorkflow(templateId)
if not template or not template.get("isTemplate"):
return None
data = {
"id": str(uuid.uuid4()),
"mandateId": self.mandateId,
"featureInstanceId": self.featureInstanceId,
"label": template.get("label", "Workflow").replace(" (Template)", ""),
"graph": template.get("graph", {}),
"invocations": template.get("invocations", []),
"isTemplate": False,
"templateSourceId": templateId,
"templateScope": "user",
"active": True,
}
created = self.db.recordCreate(AutoWorkflow, data)
return dict(created)
def shareTemplate(self, templateId: str, scope: str) -> Optional[Dict[str, Any]]:
"""Share a template by changing its scope and setting sharedReadOnly."""
template = self.getWorkflow(templateId)
if not template or not template.get("isTemplate"):
return None
updated = self.db.recordModify(AutoWorkflow, templateId, {
"templateScope": scope,
"sharedReadOnly": True,
})
return dict(updated)
# Backward-compatible alias
Automation2Objects = GraphicalEditorObjects

View file

@ -1,7 +1,7 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Automation2 Feature - n8n-style flow automation.
GraphicalEditor Feature - n8n-style flow automation.
Minimal bootstrap for feature instance creation. Build from here.
"""
@ -10,9 +10,8 @@ from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
FEATURE_CODE = "automation2"
FEATURE_CODE = "graphicalEditor"
# Services required for automation2 (methodDiscovery, ActionExecutor, etc.)
REQUIRED_SERVICES = [
{"serviceKey": "chat", "meta": {"usage": "Interfaces, RBAC"}},
{"serviceKey": "utils", "meta": {"usage": "Timestamps, utilities"}},
@ -22,82 +21,96 @@ REQUIRED_SERVICES = [
{"serviceKey": "clickup", "meta": {"usage": "ClickUp actions"}},
{"serviceKey": "generation", "meta": {"usage": "file.create document rendering"}},
]
FEATURE_LABEL = {"en": "Automation 2", "de": "Automatisierung 2", "fr": "Automatisation 2"}
FEATURE_LABEL = {"en": "Graphical Editor", "de": "Grafischer Editor", "fr": "Éditeur graphique"}
FEATURE_ICON = "mdi-sitemap"
UI_OBJECTS = [
{
"objectKey": "ui.feature.automation2.editor",
"objectKey": "ui.feature.graphicalEditor.dashboard",
"label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
"meta": {"area": "dashboard"}
},
{
"objectKey": "ui.feature.graphicalEditor.editor",
"label": {"en": "Editor", "de": "Editor", "fr": "Éditeur"},
"meta": {"area": "editor"}
},
{
"objectKey": "ui.feature.automation2.workflows",
"objectKey": "ui.feature.graphicalEditor.workflows",
"label": {"en": "Workflows", "de": "Workflows", "fr": "Workflows"},
"meta": {"area": "workflows"}
},
{
"objectKey": "ui.feature.automation2.workflows-tasks",
"objectKey": "ui.feature.graphicalEditor.workflows-tasks",
"label": {"en": "Tasks", "de": "Tasks", "fr": "Tâches"},
"meta": {"area": "tasks"}
},
{
"objectKey": "ui.feature.graphicalEditor.templates",
"label": {"en": "Templates", "de": "Vorlagen", "fr": "Modèles"},
"meta": {"area": "templates"}
},
]
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.automation2.dashboard",
"objectKey": "resource.feature.graphicalEditor.dashboard",
"label": {"en": "Access Dashboard", "de": "Dashboard aufrufen", "fr": "Acceder au tableau de bord"},
"meta": {"endpoint": "/api/automation2/{instanceId}/info", "method": "GET"}
"meta": {"endpoint": "/api/workflows/{instanceId}/info", "method": "GET"}
},
{
"objectKey": "resource.feature.automation2.node-types",
"objectKey": "resource.feature.graphicalEditor.node-types",
"label": {"en": "Get Node Types", "de": "Node-Typen abrufen", "fr": "Obtenir types de nœuds"},
"meta": {"endpoint": "/api/automation2/{instanceId}/node-types", "method": "GET"}
"meta": {"endpoint": "/api/workflows/{instanceId}/node-types", "method": "GET"}
},
{
"objectKey": "resource.feature.automation2.execute",
"objectKey": "resource.feature.graphicalEditor.execute",
"label": {"en": "Execute Workflow", "de": "Workflow ausführen", "fr": "Exécuter le workflow"},
"meta": {"endpoint": "/api/automation2/{instanceId}/execute", "method": "POST"}
"meta": {"endpoint": "/api/workflows/{instanceId}/execute", "method": "POST"}
},
]
TEMPLATE_ROLES = [
{
"roleLabel": "automation2-viewer",
"roleLabel": "graphicalEditor-viewer",
"description": {
"en": "Automation2 Viewer - View workflows (read-only)",
"de": "Automation2 Betrachter - Workflows ansehen (nur lesen)",
"fr": "Visualiseur Automation2 - Consulter les workflows (lecture seule)",
"en": "GraphicalEditor Viewer - View workflows (read-only)",
"de": "Grafischer Editor Betrachter - Workflows ansehen (nur lesen)",
"fr": "Visualiseur Éditeur graphique - Consulter les workflows (lecture seule)",
},
"accessRules": [
{"context": "UI", "item": "ui.feature.automation2.workflows", "view": True},
{"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
],
},
{
"roleLabel": "automation2-user",
"roleLabel": "graphicalEditor-user",
"description": {
"en": "Automation2 User - Use automation2 flow builder",
"de": "Automation2 Benutzer - Flow-Builder nutzen",
"fr": "Utilisateur Automation2 - Utiliser le flow builder",
"en": "GraphicalEditor User - Use flow builder",
"de": "Grafischer Editor Benutzer - Flow-Builder nutzen",
"fr": "Utilisateur Éditeur graphique - Utiliser le flow builder",
},
"accessRules": [
{"context": "UI", "item": "ui.feature.automation2.editor", "view": True},
{"context": "UI", "item": "ui.feature.automation2.workflows", "view": True},
{"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True},
{"context": "RESOURCE", "item": "resource.feature.automation2.dashboard", "view": True},
{"context": "RESOURCE", "item": "resource.feature.automation2.node-types", "view": True},
{"context": "RESOURCE", "item": "resource.feature.automation2.execute", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.editor", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True},
{"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True},
{"context": "RESOURCE", "item": "resource.feature.graphicalEditor.dashboard", "view": True},
{"context": "RESOURCE", "item": "resource.feature.graphicalEditor.node-types", "view": True},
{"context": "RESOURCE", "item": "resource.feature.graphicalEditor.execute", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
],
},
{
"roleLabel": "automation2-admin",
"roleLabel": "graphicalEditor-admin",
"description": {
"en": "Automation2 Admin - Full UI and API for the instance; data remains user-scoped (MY)",
"de": "Automation2 Admin - Volle UI und API für die Instanz; Daten weiterhin benutzerspezifisch (MY)",
"fr": "Administrateur Automation2 - UI et API complets pour l'instance; donnees limitees a l'utilisateur (MY)",
"en": "GraphicalEditor Admin - Full UI and API for the instance; data remains user-scoped (MY)",
"de": "Grafischer Editor Admin - Volle UI und API für die Instanz; Daten weiterhin benutzerspezifisch (MY)",
"fr": "Administrateur Éditeur graphique - UI et API complets pour l'instance; donnees limitees a l'utilisateur (MY)",
},
"accessRules": [
{"context": "UI", "item": None, "view": True},
@ -113,14 +126,14 @@ def getRequiredServiceKeys() -> List[str]:
return [s["serviceKey"] for s in REQUIRED_SERVICES]
def getAutomation2Services(
def getGraphicalEditorServices(
user,
mandateId: Optional[str] = None,
featureInstanceId: Optional[str] = None,
workflow=None,
) -> "_Automation2ServiceHub":
) -> "_GraphicalEditorServiceHub":
"""
Get a service hub for automation2 using the service center.
Get a service hub for graphicalEditor using the service center.
Used for methodDiscovery (I/O nodes) and execution (ActionExecutor).
"""
from modules.serviceCenter import getService
@ -141,7 +154,7 @@ def getAutomation2Services(
workflow=_workflow,
)
hub = _Automation2ServiceHub()
hub = _GraphicalEditorServiceHub()
hub.user = user
hub.mandateId = mandateId
hub.featureInstanceId = featureInstanceId
@ -155,7 +168,7 @@ def getAutomation2Services(
svc = getService(key, ctx)
setattr(hub, key, svc)
except Exception as e:
logger.warning(f"Could not resolve service '{key}' for automation2: {e}")
logger.warning(f"Could not resolve service '{key}' for graphicalEditor: {e}")
setattr(hub, key, None)
if hub.chat:
@ -167,8 +180,12 @@ def getAutomation2Services(
return hub
class _Automation2ServiceHub:
"""Lightweight hub for automation2 (methodDiscovery, execution)."""
# Backward-compatible alias used by workflows/automation2/ execution engine
getAutomation2Services = getGraphicalEditorServices
class _GraphicalEditorServiceHub:
"""Lightweight hub for graphicalEditor (methodDiscovery, execution)."""
user = None
mandateId = None
@ -190,12 +207,14 @@ class _Automation2ServiceHub:
async def onStart(eventUser) -> None:
"""Feature startup. Email poller is started on-demand when a run pauses for email.checkEmail."""
"""Feature startup: start consolidated scheduler."""
from modules.workflows.scheduler.mainScheduler import start as startScheduler
startScheduler(eventUser)
async def onStop(eventUser) -> None:
"""Feature shutdown - remove email poller if running."""
from modules.features.automation2.emailPoller import stop as stopEmailPoller
from modules.features.graphicalEditor.emailPoller import stop as stopEmailPoller
stopEmailPoller(eventUser)
@ -283,7 +302,6 @@ def _syncTemplateRolesToDb() -> int:
_ensureAccessRulesForRole(rootInterface, roleId, template.get("accessRules", []))
# Sync same rules to mandate-specific roles (so Workflows & Tasks etc. appear in sidebar)
for r in existingRoles:
if r.mandateId and r.roleLabel == roleLabel:
added = _ensureAccessRulesForRole(

View file

@ -1,14 +1,14 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Node Type Registry for automation2 - static node definitions (ai, email, sharepoint, trigger, flow, data, input).
Node Type Registry for graphicalEditor - static node definitions (ai, email, sharepoint, trigger, flow, data, input).
Nodes are defined first; IO/method actions are used at execution time.
"""
import logging
from typing import Dict, List, Any
from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
logger = logging.getLogger(__name__)

View file

@ -1,26 +1,31 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Automation2 routes - node-types, execute, workflows, runs, tasks, connections, browse.
GraphicalEditor routes - node-types, execute, workflows, runs, tasks, connections, browse.
"""
import asyncio
import json
import logging
from typing import Any, Dict, Optional
import math
from typing import Any, Dict, List, Optional
from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException
from fastapi.responses import JSONResponse
from fastapi.responses import JSONResponse, StreamingResponse
from modules.auth import limiter, getRequestContext, RequestContext
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
from modules.routes.routeDataUsers import _applyFiltersAndSort
from modules.features.automation2.mainAutomation2 import getAutomation2Services
from modules.features.automation2.nodeRegistry import getNodeTypesForApi
from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface
from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices
from modules.features.graphicalEditor.nodeRegistry import getNodeTypesForApi
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
from modules.workflows.automation2.executionEngine import executeGraph
from modules.workflows.automation2.runEnvelope import (
default_run_envelope,
merge_run_envelope,
normalize_run_envelope,
)
from modules.features.automation2.entryPoints import find_invocation
from modules.features.graphicalEditor.entryPoints import find_invocation
logger = logging.getLogger(__name__)
@ -85,14 +90,14 @@ def _build_execute_run_envelope(
return env
router = APIRouter(
prefix="/api/automation2",
tags=["Automation2"],
prefix="/api/workflows",
tags=["GraphicalEditor"],
responses={404: {"description": "Not found"}, 403: {"description": "Forbidden"}},
)
def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
"""Validate user has access to the automation2 feature instance. Returns mandateId."""
"""Validate user has access to the graphicalEditor feature instance. Returns mandateId."""
from fastapi import HTTPException
from modules.interfaces.interfaceDbApp import getRootInterface
@ -108,7 +113,7 @@ def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
@router.get("/{instanceId}/info")
@limiter.limit("60/minute")
def get_automation2_info(
def get_info(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext),
@ -116,10 +121,10 @@ def get_automation2_info(
"""Minimal info endpoint - proves the feature works."""
_validateInstanceAccess(instanceId, context)
return {
"featureCode": "automation2",
"featureCode": "graphicalEditor",
"instanceId": instanceId,
"status": "ok",
"message": "Automation2 feature ready. Build from here.",
"message": "GraphicalEditor feature ready.",
}
@ -152,16 +157,16 @@ def get_node_types(
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Return node types for the flow builder: static + I/O from methodDiscovery."""
logger.info("automation2 node-types request: instanceId=%s language=%s", instanceId, language)
logger.info("graphicalEditor node-types request: instanceId=%s language=%s", instanceId, language)
mandateId = _validateInstanceAccess(instanceId, context)
services = getAutomation2Services(
services = getGraphicalEditorServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
result = getNodeTypesForApi(services, language=language)
logger.info(
"automation2 node-types response: %d nodeTypes %d categories",
"graphicalEditor node-types response: %d nodeTypes %d categories",
len(result.get("nodeTypes", [])),
len(result.get("categories", [])),
)
@ -176,21 +181,20 @@ async def post_execute(
body: dict = Body(..., description="{ workflowId?, graph: { nodes, connections } }"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Execute automation2 graph. Body: { workflowId?, graph: { nodes, connections } }."""
"""Execute workflow graph. Body: { workflowId?, graph: { nodes, connections } }."""
userId = str(context.user.id) if context.user else None
logger.info(
"automation2 execute request: instanceId=%s userId=%s body_keys=%s",
"graphicalEditor execute request: instanceId=%s userId=%s body_keys=%s",
instanceId,
userId,
list(body.keys()),
)
mandateId = _validateInstanceAccess(instanceId, context)
services = getAutomation2Services(
services = getGraphicalEditorServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
# Ensure workflow methods (outlook, ai, sharepoint, etc.) are discovered for ActionExecutor
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
@ -199,27 +203,23 @@ async def post_execute(
req_nodes = graph.get("nodes") or []
workflow_for_envelope: Optional[Dict[str, Any]] = None
if workflowId and not str(workflowId).startswith("transient-"):
a2_pre = getAutomation2Interface(context.user, mandateId, instanceId)
workflow_for_envelope = a2_pre.getWorkflow(workflowId)
# When workflowId is set: prefer graph from request (current editor state) if it has nodes.
# Only fall back to stored workflow graph when request graph is empty (e.g. resume from email).
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
workflow_for_envelope = iface.getWorkflow(workflowId)
if workflowId and len(req_nodes) == 0:
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
wf = a2.getWorkflow(workflowId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if wf and wf.get("graph"):
graph = wf["graph"]
logger.info("automation2 execute: loaded graph from workflow %s", workflowId)
logger.info("graphicalEditor execute: loaded graph from workflow %s", workflowId)
workflow_for_envelope = wf
# Use transient workflowId when none provided (e.g. execute from editor without save)
# Required for email.checkEmail pause/resume - run must be created
if not workflowId:
import uuid
workflowId = f"transient-{uuid.uuid4().hex[:12]}"
logger.info("automation2 execute: using transient workflowId=%s", workflowId)
logger.info("graphicalEditor execute: using transient workflowId=%s", workflowId)
nodes_count = len(graph.get("nodes") or [])
connections_count = len(graph.get("connections") or [])
logger.info(
"automation2 execute: graph nodes=%d connections=%d workflowId=%s mandateId=%s",
"graphicalEditor execute: graph nodes=%d connections=%d workflowId=%s mandateId=%s",
nodes_count,
connections_count,
workflowId,
@ -227,7 +227,7 @@ async def post_execute(
)
run_env = _build_execute_run_envelope(body, workflow_for_envelope, userId)
a2_interface = getAutomation2Interface(context.user, mandateId, instanceId)
ge_interface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
result = await executeGraph(
graph=graph,
services=services,
@ -235,11 +235,11 @@ async def post_execute(
instanceId=instanceId,
userId=userId,
mandateId=mandateId,
automation2_interface=a2_interface,
automation2_interface=ge_interface,
run_envelope=run_env,
)
logger.info(
"automation2 execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s",
"graphicalEditor execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s",
result.get("success"),
result.get("error"),
list(result.get("nodeOutputs", {}).keys()) if result.get("nodeOutputs") else [],
@ -250,7 +250,368 @@ async def post_execute(
# -------------------------------------------------------------------------
# Connections and Browse (for Email/SharePoint node config - like workspace)
# Versions (AutoVersion Lifecycle)
# -------------------------------------------------------------------------
@router.get("/{instanceId}/workflows/{workflowId}/versions")
@limiter.limit("60/minute")
def get_versions(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="Workflow ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""List all versions for a workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
versions = iface.getVersions(workflowId)
return {"versions": versions}
@router.post("/{instanceId}/workflows/{workflowId}/versions/draft")
@limiter.limit("30/minute")
def create_draft_version(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="Workflow ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Create a new draft version from the workflow's current graph."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
version = iface.createDraftVersion(workflowId)
if not version:
raise HTTPException(status_code=404, detail="Workflow not found")
return version
@router.post("/{instanceId}/versions/{versionId}/publish")
@limiter.limit("30/minute")
def publish_version(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
versionId: str = Path(..., description="Version ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Publish a draft version. Archives the previously published version."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
userId = str(context.user.id) if context.user else None
version = iface.publishVersion(versionId, userId=userId)
if not version:
raise HTTPException(status_code=400, detail="Version not found or not in draft status")
return version
@router.post("/{instanceId}/versions/{versionId}/unpublish")
@limiter.limit("30/minute")
def unpublish_version(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
versionId: str = Path(..., description="Version ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Unpublish a version (revert to draft)."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
version = iface.unpublishVersion(versionId)
if not version:
raise HTTPException(status_code=400, detail="Version not found or not published")
return version
@router.post("/{instanceId}/versions/{versionId}/archive")
@limiter.limit("30/minute")
def archive_version(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
versionId: str = Path(..., description="Version ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Archive a version."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
version = iface.archiveVersion(versionId)
if not version:
raise HTTPException(status_code=404, detail="Version not found")
return version
# -------------------------------------------------------------------------
# Templates
# -------------------------------------------------------------------------
@router.get("/{instanceId}/templates")
@limiter.limit("60/minute")
def get_templates(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
scope: Optional[str] = Query(None, description="Filter by scope: user, instance, mandate, system"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext),
):
"""List workflow templates with optional pagination."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
templates = iface.getTemplates(scope=scope)
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
filtered = _applyFiltersAndSort(templates, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
totalItems=totalItems, totalPages=totalPages,
sort=paginationParams.sort, filters=paginationParams.filters,
).model_dump(),
}
return {"templates": templates}
@router.post("/{instanceId}/templates/from-workflow")
@limiter.limit("30/minute")
def create_template_from_workflow(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
body: dict = Body(..., description="{ workflowId, scope? }"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Create a template from an existing workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
workflowId = body.get("workflowId")
scope = body.get("scope", "user")
if not workflowId:
raise HTTPException(status_code=400, detail="workflowId required")
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
template = iface.createTemplateFromWorkflow(workflowId, scope=scope)
if not template:
raise HTTPException(status_code=404, detail="Workflow not found")
return template
@router.post("/{instanceId}/templates/{templateId}/copy")
@limiter.limit("30/minute")
def copy_template(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
templateId: str = Path(..., description="Template ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Copy a template to a new user-owned workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
workflow = iface.copyTemplateToUser(templateId)
if not workflow:
raise HTTPException(status_code=404, detail="Template not found")
return workflow
@router.post("/{instanceId}/templates/{templateId}/share")
@limiter.limit("30/minute")
def share_template(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
templateId: str = Path(..., description="Template ID"),
body: dict = Body(..., description="{ scope }"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Share a template by changing its scope."""
mandateId = _validateInstanceAccess(instanceId, context)
scope = body.get("scope")
if not scope or scope not in ("user", "instance", "mandate", "system"):
raise HTTPException(status_code=400, detail="scope must be user, instance, mandate, or system")
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
template = iface.shareTemplate(templateId, scope=scope)
if not template:
raise HTTPException(status_code=404, detail="Template not found")
return template
# -------------------------------------------------------------------------
# AI Chat for Editor
# -------------------------------------------------------------------------
@router.post("/{instanceId}/{workflowId}/chat/stream")
@limiter.limit("30/minute")
async def post_editor_chat(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
workflowId: str = Path(..., description="Workflow ID"),
body: dict = Body(..., description="{ message, conversationHistory?, userLanguage? }"),
context: RequestContext = Depends(getRequestContext),
):
"""AI chat endpoint for the editor with SSE streaming. Uses workflow tools to mutate the graph."""
mandateId = _validateInstanceAccess(instanceId, context)
message = body.get("message", "")
if not message:
raise HTTPException(status_code=400, detail="message required")
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
raise HTTPException(status_code=404, detail="Workflow not found")
userLanguage = body.get("userLanguage", "de")
conversationHistory = body.get("conversationHistory") or []
from modules.serviceCenter.core.serviceStreaming import get_event_manager
sseEventManager = get_event_manager()
queueId = f"ge-chat-{workflowId}-{id(request)}"
sseEventManager.create_queue(queueId)
agentTask = asyncio.ensure_future(
_runEditorAgent(
workflowId=workflowId,
queueId=queueId,
prompt=message,
instanceId=instanceId,
user=context.user,
mandateId=mandateId,
sseEventManager=sseEventManager,
userLanguage=userLanguage,
conversationHistory=conversationHistory,
)
)
sseEventManager.register_agent_task(queueId, agentTask)
async def _sseGenerator():
queue = sseEventManager.get_queue(queueId)
if not queue:
return
while True:
try:
event = await asyncio.wait_for(queue.get(), timeout=120)
except asyncio.TimeoutError:
yield "data: {\"type\": \"keepalive\"}\n\n"
continue
if event is None:
break
ssePayload = event.get("data", event) if isinstance(event, dict) else event
yield f"data: {json.dumps(ssePayload, default=str)}\n\n"
eventType = ssePayload.get("type", "") if isinstance(ssePayload, dict) else ""
if eventType in ("complete", "error", "stopped"):
break
await sseEventManager.cleanup(queueId, delay=30)
return StreamingResponse(
_sseGenerator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
async def _runEditorAgent(
workflowId: str,
queueId: str,
prompt: str,
instanceId: str,
user=None,
mandateId: str = "",
sseEventManager=None,
userLanguage: str = "de",
conversationHistory: List[Dict[str, Any]] = None,
):
"""Run the serviceAgent loop with workflow toolbox and forward events to the SSE queue."""
try:
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentEventTypeEnum
ctx = ServiceCenterContext(
user=user,
mandate_id=mandateId,
feature_instance_id=instanceId,
workflow_id=workflowId,
feature_code="graphicalEditor",
)
agentService = getService("agent", ctx)
systemPrompt = (
"You are a workflow editor assistant. The user describes changes to a workflow graph. "
"Use the available workflow tools (readWorkflowGraph, addNode, removeNode, connectNodes, "
"setNodeParameter, listAvailableNodeTypes, validateGraph) to modify the graph. "
"Always read the current graph first before making changes. "
"Respond concisely and confirm what you changed."
)
accumulatedText = ""
async for event in agentService.runAgent(
prompt=prompt,
workflowId=workflowId,
userLanguage=userLanguage,
conversationHistory=conversationHistory or [],
toolSet="core",
additionalTools=None,
systemPromptOverride=systemPrompt,
):
if sseEventManager.is_cancelled(queueId):
logger.info("Editor chat agent cancelled for workflow %s", workflowId)
break
if event.type == AgentEventTypeEnum.CHUNK and event.content:
accumulatedText += event.content
sseEvent = {
"type": event.type.value if hasattr(event.type, "value") else event.type,
"workflowId": workflowId,
}
if event.content:
sseEvent["content"] = event.content
if event.data:
sseEvent["item"] = event.data
await sseEventManager.emit_event(queueId, sseEvent["type"], sseEvent)
if event.type in (AgentEventTypeEnum.FINAL, AgentEventTypeEnum.ERROR):
break
await sseEventManager.emit_event(queueId, "complete", {
"type": "complete",
"workflowId": workflowId,
})
except asyncio.CancelledError:
logger.info("Editor chat agent task cancelled for workflow %s", workflowId)
await sseEventManager.emit_event(queueId, "stopped", {
"type": "stopped",
"workflowId": workflowId,
})
except Exception as e:
logger.error("Editor chat agent error: %s", e, exc_info=True)
await sseEventManager.emit_event(queueId, "error", {
"type": "error",
"content": str(e),
"workflowId": workflowId,
})
finally:
sseEventManager._unregister_agent_task(queueId)
# -------------------------------------------------------------------------
# Connections and Browse (for Email/SharePoint node config)
# -------------------------------------------------------------------------
@ -273,7 +634,7 @@ def _buildResolverDbInterface(chatService):
@router.get("/{instanceId}/connections")
@limiter.limit("300/minute")
def list_automation2_connections(
def list_connections(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext),
@ -441,20 +802,17 @@ def get_workflows(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
active: Optional[bool] = Query(None, description="Filter by active: true|false"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""List all workflows for this feature instance.
Enriches each workflow with runCount, isRunning, stuckAtNodeId, stuckAtNodeLabel,
createdAt, lastStartedAt.
Query param active: filter by active status (true|false).
"""
):
"""List all workflows for this feature instance."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
items = a2.getWorkflows(active=active)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
items = iface.getWorkflows(active=active)
enriched = []
for wf in items:
wf_id = wf.get("id")
runs = a2.getRunsByWorkflow(wf_id) if wf_id else []
runs = iface.getRunsByWorkflow(wf_id) if wf_id else []
run_count = len(runs)
active_run = None
last_started_at = None
@ -478,6 +836,31 @@ def get_workflows(
"createdAt": wf.get("sysCreatedAt"),
"lastStartedAt": last_started_at,
})
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
filtered = _applyFiltersAndSort(enriched, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
totalItems=totalItems, totalPages=totalPages,
sort=paginationParams.sort, filters=paginationParams.filters,
).model_dump(),
}
return {"workflows": enriched}
@ -491,8 +874,8 @@ def get_workflow(
) -> dict:
"""Get a single workflow by ID."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
wf = a2.getWorkflow(workflowId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
raise HTTPException(status_code=404, detail="Workflow not found")
return wf
@ -508,8 +891,8 @@ def create_workflow(
) -> dict:
"""Create a new workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
created = a2.createWorkflow(body)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
created = iface.createWorkflow(body)
return created
@ -524,8 +907,8 @@ def update_workflow(
) -> dict:
"""Update a workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
updated = a2.updateWorkflow(workflowId, body)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
updated = iface.updateWorkflow(workflowId, body)
if not updated:
raise HTTPException(status_code=404, detail="Workflow not found")
return updated
@ -541,8 +924,8 @@ def delete_workflow(
) -> dict:
"""Delete a workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
if not a2.deleteWorkflow(workflowId):
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
if not iface.deleteWorkflow(workflowId):
raise HTTPException(status_code=404, detail="Workflow not found")
return {"success": True}
@ -557,14 +940,11 @@ async def post_workflow_webhook(
body: dict = Body(default_factory=dict),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""
Invoke a workflow via a webhook entry point. Optional shared secret in
X-Automation2-Webhook-Secret or X-Webhook-Secret when config.webhookSecret is set.
"""
"""Invoke a workflow via a webhook entry point."""
mandateId = _validateInstanceAccess(instanceId, context)
userId = str(context.user.id) if context.user else None
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
wf = a2.getWorkflow(workflowId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf or not wf.get("graph"):
raise HTTPException(status_code=404, detail="Workflow not found")
inv = find_invocation(wf, entryPointId)
@ -577,19 +957,16 @@ async def post_workflow_webhook(
cfg = inv.get("config") or {}
secret = cfg.get("webhookSecret")
if secret:
hdr = request.headers.get("X-Automation2-Webhook-Secret") or request.headers.get(
"X-Webhook-Secret"
)
hdr = request.headers.get("X-Webhook-Secret")
if hdr != str(secret):
raise HTTPException(status_code=403, detail="Invalid webhook secret")
services = getAutomation2Services(
services = getGraphicalEditorServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
title = inv.get("title") or {}
@ -615,7 +992,7 @@ async def post_workflow_webhook(
instanceId=instanceId,
userId=userId,
mandateId=mandateId,
automation2_interface=a2,
automation2_interface=iface,
run_envelope=run_env,
)
return result
@ -634,8 +1011,8 @@ async def post_workflow_form_submit(
"""Form-style submit: same as execute with trigger.type form and payload from body."""
mandateId = _validateInstanceAccess(instanceId, context)
userId = str(context.user.id) if context.user else None
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
wf = a2.getWorkflow(workflowId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf or not wf.get("graph"):
raise HTTPException(status_code=404, detail="Workflow not found")
inv = find_invocation(wf, entryPointId)
@ -646,13 +1023,12 @@ async def post_workflow_form_submit(
if not inv.get("enabled", True):
raise HTTPException(status_code=400, detail="Entry point is disabled")
services = getAutomation2Services(
services = getGraphicalEditorServices(
context.user,
mandateId=mandateId,
featureInstanceId=instanceId,
)
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
title = inv.get("title") or {}
@ -678,7 +1054,7 @@ async def post_workflow_form_submit(
instanceId=instanceId,
userId=userId,
mandateId=mandateId,
automation2_interface=a2,
automation2_interface=iface,
run_envelope=run_env,
)
return result
@ -697,10 +1073,10 @@ def get_completed_runs(
limit: int = Query(20, ge=1, le=50),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Get recently completed runs with output (for Tasks page output section)."""
"""Get recently completed runs with output."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
runs = a2.getRecentCompletedRuns(limit=limit)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
runs = iface.getRecentCompletedRuns(limit=limit)
return {"runs": runs}
@ -714,13 +1090,33 @@ def get_workflow_runs(
) -> dict:
"""Get runs for a workflow."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
if not a2.getWorkflow(workflowId):
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
if not iface.getWorkflow(workflowId):
raise HTTPException(status_code=404, detail="Workflow not found")
runs = a2.getRunsByWorkflow(workflowId)
runs = iface.getRunsByWorkflow(workflowId)
return {"runs": runs}
@router.get("/{instanceId}/runs/{runId}/steps")
@limiter.limit("60/minute")
def get_run_steps(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
runId: str = Path(..., description="Run ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Get step logs for a run (AutoStepLog entries)."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoStepLog
if not iface.db._ensureTableExists(AutoStepLog):
return {"steps": []}
records = iface.db.getRecordset(AutoStepLog, recordFilter={"runId": runId})
steps = [dict(r) for r in records] if records else []
steps.sort(key=lambda s: s.get("startedAt") or 0)
return {"steps": steps}
@router.post("/{instanceId}/runs/{runId}/resume")
@limiter.limit("30/minute")
async def resume_run(
@ -732,32 +1128,29 @@ async def resume_run(
) -> dict:
"""Resume a paused run after task completion."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
run = a2.getRun(runId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
run = iface.getRun(runId)
if not run:
raise HTTPException(status_code=404, detail="Run not found")
taskId = body.get("taskId")
result = body.get("result")
if not taskId or result is None:
raise HTTPException(status_code=400, detail="taskId and result required")
task = a2.getTask(taskId)
task = iface.getTask(taskId)
if not task or task.get("runId") != runId:
raise HTTPException(status_code=404, detail="Task not found")
if task.get("status") != "pending":
raise HTTPException(status_code=400, detail="Task already completed")
a2.updateTask(taskId, status="completed", result=result)
iface.updateTask(taskId, status="completed", result=result)
nodeId = task.get("nodeId")
nodeOutputs = dict(run.get("nodeOutputs") or {})
nodeOutputs[nodeId] = result
runContext = run.get("context") or {}
connectionMap = runContext.get("connectionMap", {})
inputSources = runContext.get("inputSources", {})
workflowId = run.get("workflowId")
wf = a2.getWorkflow(workflowId) if workflowId else None
wf = iface.getWorkflow(workflowId) if workflowId else None
if not wf or not wf.get("graph"):
raise HTTPException(status_code=400, detail="Workflow graph not found")
graph = wf["graph"]
services = getAutomation2Services(context.user, mandateId=mandateId, featureInstanceId=instanceId)
services = getGraphicalEditorServices(context.user, mandateId=mandateId, featureInstanceId=instanceId)
resume_result = await executeGraph(
graph=graph,
services=services,
@ -765,7 +1158,7 @@ async def resume_run(
instanceId=instanceId,
userId=str(context.user.id) if context.user else None,
mandateId=mandateId,
automation2_interface=a2,
automation2_interface=iface,
initialNodeOutputs=nodeOutputs,
startAfterNodeId=nodeId,
runId=runId,
@ -787,14 +1180,12 @@ def get_tasks(
status: str = Query(None, description="Filter: pending, completed, rejected"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Get tasks - by default those assigned to current user, or all if no assignee filter.
Enriches each task with workflowLabel and createdAt (from sysCreatedAt).
"""
"""Get tasks assigned to current user."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
assigneeId = str(context.user.id) if context.user else None
items = a2.getTasks(workflowId=workflowId, status=status, assigneeId=assigneeId)
workflows = {w["id"]: w for w in a2.getWorkflows()}
items = iface.getTasks(workflowId=workflowId, status=status, assigneeId=assigneeId)
workflows = {w["id"]: w for w in iface.getWorkflows()}
enriched = []
for t in items:
wf = workflows.get(t.get("workflowId") or "")
@ -817,29 +1208,29 @@ async def complete_task(
) -> dict:
"""Complete a task and resume the run."""
mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId)
task = a2.getTask(taskId)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
task = iface.getTask(taskId)
if not task:
raise HTTPException(status_code=404, detail="Task not found")
runId = task.get("runId")
result = body.get("result")
if result is None:
raise HTTPException(status_code=400, detail="result required")
run = a2.getRun(runId)
run = iface.getRun(runId)
if not run:
raise HTTPException(status_code=404, detail="Run not found")
if task.get("status") != "pending":
raise HTTPException(status_code=400, detail="Task already completed")
a2.updateTask(taskId, status="completed", result=result)
iface.updateTask(taskId, status="completed", result=result)
nodeId = task.get("nodeId")
nodeOutputs = dict(run.get("nodeOutputs") or {})
nodeOutputs[nodeId] = result
workflowId = run.get("workflowId")
wf = a2.getWorkflow(workflowId) if workflowId else None
wf = iface.getWorkflow(workflowId) if workflowId else None
if not wf or not wf.get("graph"):
raise HTTPException(status_code=400, detail="Workflow graph not found")
graph = wf["graph"]
services = getAutomation2Services(context.user, mandateId=mandateId, featureInstanceId=instanceId)
services = getGraphicalEditorServices(context.user, mandateId=mandateId, featureInstanceId=instanceId)
return await executeGraph(
graph=graph,
services=services,
@ -847,8 +1238,64 @@ async def complete_task(
instanceId=instanceId,
userId=str(context.user.id) if context.user else None,
mandateId=mandateId,
automation2_interface=a2,
automation2_interface=iface,
initialNodeOutputs=nodeOutputs,
startAfterNodeId=nodeId,
runId=runId,
)
# -------------------------------------------------------------------------
# Monitoring / Metrics
# -------------------------------------------------------------------------
@router.get("/{instanceId}/metrics")
@limiter.limit("60/minute")
def get_metrics(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Aggregated metrics for the monitoring dashboard."""
mandateId = _validateInstanceAccess(instanceId, context)
iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
AutoWorkflow, AutoRun, AutoStepLog, AutoTask,
)
workflows = iface.db.getRecordset(AutoWorkflow, recordFilter={
"mandateId": mandateId, "featureInstanceId": instanceId, "isTemplate": False,
}) or []
runs = iface.db.getRecordset(AutoRun, recordFilter={
"workflowId": {"$in": [w.get("id") for w in workflows]} if workflows else "__none__",
}) or []
tasks = iface.db.getRecordset(AutoTask, recordFilter={
"workflowId": {"$in": [w.get("id") for w in workflows]} if workflows else "__none__",
}) or []
runsByStatus = {}
totalTokens = 0
totalCredits = 0.0
for r in runs:
s = r.get("status", "unknown")
runsByStatus[s] = runsByStatus.get(s, 0) + 1
totalTokens += r.get("costTokens", 0) or 0
totalCredits += r.get("costCredits", 0.0) or 0.0
tasksByStatus = {}
for t in tasks:
s = t.get("status", "unknown")
tasksByStatus[s] = tasksByStatus.get(s, 0) + 1
return {
"workflowCount": len(workflows),
"activeWorkflows": sum(1 for w in workflows if w.get("active")),
"totalRuns": len(runs),
"runsByStatus": runsByStatus,
"totalTasks": len(tasks),
"tasksByStatus": tasksByStatus,
"totalTokens": totalTokens,
"totalCredits": round(totalCredits, 4),
}

View file

@ -88,9 +88,6 @@ def initBootstrap(db: DatabaseConnector) -> None:
# Apply multi-tenant database optimizations (indexes, triggers, FKs)
_applyDatabaseOptimizations(db)
# Seed automation templates (after admin user exists)
initAutomationTemplates(db, adminUserId)
# Run root-user migration (one-time, sets completion flag)
migrationDone = False
try:
@ -148,86 +145,6 @@ def initBootstrap(db: DatabaseConnector) -> None:
logger.warning(f"Mandate retention purge failed: {e}")
def initAutomationTemplates(dbApp: DatabaseConnector, adminUserId: Optional[str] = None) -> None:
"""
Seed initial automation templates from subAutomationTemplates.py.
Only runs if no templates exist yet (bootstrap).
Creates templates with sysCreatedBy = admin user (SysAdmin privilege).
NOTE: AutomationTemplate lives in poweron_automation database, not poweron_app!
Args:
dbApp: Database connector for poweron_app (used to get admin user if needed)
adminUserId: Admin user ID for sysCreatedBy field
"""
import json
from modules.features.automation.subAutomationTemplates import AUTOMATION_TEMPLATES
from modules.features.automation.datamodelFeatureAutomation import AutomationTemplate
from modules.shared.configuration import APP_CONFIG
# Create connector for poweron_automation database (where templates live)
dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data")
dbDatabase = "poweron_automation"
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
dbAutomation = DatabaseConnector(
dbHost=dbHost,
dbDatabase=dbDatabase,
dbUser=dbUser,
dbPassword=dbPassword,
dbPort=dbPort,
userId=adminUserId,
)
dbAutomation.initDbSystem()
# Check if templates already exist in poweron_automation
existing = dbAutomation.getRecordset(AutomationTemplate)
if existing:
logger.info(f"Automation templates already seeded ({len(existing)} templates)")
return
# Get admin user ID if not provided (from poweron_app)
if not adminUserId:
adminUsers = dbApp.getRecordset(UserInDB, recordFilter={"email": APP_CONFIG.ADMIN_EMAIL})
adminUserId = adminUsers[0]["id"] if adminUsers else None
# Update context with admin user
if adminUserId:
dbAutomation.updateContext(adminUserId)
templates = AUTOMATION_TEMPLATES.get("sets", [])
createdCount = 0
for i, templateSet in enumerate(templates):
templateContent = templateSet.get("template", {})
overview = templateContent.get("overview", f"Template {i+1}")
# Create multilingual label from overview (use as German since current templates are German)
# English is required by TextMultilingual, so we use the same value
labelDict = {"en": overview, "ge": overview}
overviewDict = {"en": overview, "ge": overview}
# Create template WITHOUT parameters (no sharp values)
templateData = {
"label": labelDict,
"overview": overviewDict,
"template": json.dumps(templateContent), # Store entire template JSON
"isSystem": True, # Seeded templates are system-level, visible to all users
}
try:
dbAutomation.recordCreate(AutomationTemplate, templateData)
createdCount += 1
logger.debug(f"Created automation template: {overview}")
except Exception as e:
logger.error(f"Failed to create automation template '{overview}': {e}")
logger.info(f"Seeded {createdCount} automation templates in poweron_automation database")
logger.info("System bootstrap completed")
def initRootMandateFeatures(db: DatabaseConnector, mandateId: str) -> None:
"""
Create feature instances for root mandate.
@ -517,7 +434,7 @@ def initRoles(db: DatabaseConnector) -> None:
# Check specifically for system template roles:
# mandateId=NULL, isSystemRole=True, featureCode=NULL
# Feature templates (e.g. automation admin) share the same labels but have featureCode set!
# Feature templates share the same labels but have featureCode set!
allTemplates = db.getRecordset(
Role,
recordFilter={"mandateId": None, "isSystemRole": True}
@ -549,7 +466,7 @@ def _deduplicateRoles(db: DatabaseConnector) -> None:
# Group by (roleLabel, mandateId, featureInstanceId, featureCode)
# featureCode is essential: system template ('admin', None, None, None)
# must NOT be grouped with feature template ('admin', None, None, 'automation')
# must NOT be grouped with feature template ('admin', None, None, '<featureCode>')
groups: dict = {}
for role in allRoles:
key = (role.get("roleLabel"), role.get("mandateId"), role.get("featureInstanceId"), role.get("featureCode"))
@ -986,12 +903,11 @@ def _createTableSpecificRules(db: DatabaseConnector) -> None:
# - data.uam.* → User Access Management (mandantenübergreifend)
# - data.chat.* → Chat/AI-Daten (benutzer-eigen, kein Mandantenkontext)
# - data.files.* → Dateien (benutzer-eigen)
# - data.automation.* → Automation (benutzer-eigen)
# - data.feature.* → Mandanten-/Feature-spezifische Daten (dynamisch)
#
# GROUP-Berechtigung:
# - data.uam.*: GROUP filtert nach Mandant (via UserMandate)
# - data.chat.*, data.files.*, data.automation.*: GROUP = MY (benutzer-eigen)
# - data.chat.*, data.files.*: GROUP = MY (benutzer-eigen)
# ==========================================================================
# -------------------------------------------------------------------------
@ -1248,70 +1164,6 @@ def _createTableSpecificRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
# -------------------------------------------------------------------------
# Automation Namespace - User-owned, no mandate context
# -------------------------------------------------------------------------
# AutomationDefinition: Only MY-level access (user-owned)
for roleId in [adminId, userId]:
if roleId:
tableRules.append(AccessRule(
roleId=roleId,
context=AccessRuleContext.DATA,
item="data.automation.AutomationDefinition",
view=True,
read=AccessLevel.MY,
create=AccessLevel.MY,
update=AccessLevel.MY,
delete=AccessLevel.MY,
))
if viewerId:
tableRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item="data.automation.AutomationDefinition",
view=True,
read=AccessLevel.MY,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# AutomationTemplate: Admin sees ALL (system templates), User sees only MY
if adminId:
tableRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item="data.automation.AutomationTemplate",
view=True,
read=AccessLevel.ALL, # SysAdmin sees all templates
create=AccessLevel.ALL,
update=AccessLevel.ALL,
delete=AccessLevel.ALL,
))
if userId:
tableRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item="data.automation.AutomationTemplate",
view=True,
read=AccessLevel.MY,
create=AccessLevel.MY,
update=AccessLevel.MY,
delete=AccessLevel.MY,
))
if viewerId:
tableRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item="data.automation.AutomationTemplate",
view=True,
read=AccessLevel.ALL, # Viewer can see all templates (read-only)
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# -------------------------------------------------------------------------
# Billing Namespace - Billing accounts and transactions
# -------------------------------------------------------------------------
@ -1623,12 +1475,6 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
# Users can only manage their own records (MY-level access)
tablesNeedingMyRules = [
"data.chat.ChatWorkflow",
"data.automation.AutomationDefinition",
]
# Tables where admin sees ALL (system-wide templates)
tablesNeedingAllRulesForAdmin = [
"data.automation.AutomationTemplate",
]
# Billing tables: read-only for all roles, scoped by role level
@ -1681,47 +1527,6 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
# Admin rules for system templates (read ALL, write GROUP-scoped)
for objectKey in tablesNeedingAllRulesForAdmin:
# Admin: read ALL templates, create/update/delete within GROUP (mandate-scoped)
if adminId and (adminId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=adminId,
context=AccessRuleContext.DATA,
item=objectKey,
view=True,
read=AccessLevel.ALL,
create=AccessLevel.GROUP,
update=AccessLevel.GROUP,
delete=AccessLevel.GROUP,
))
# User: MY-level access
if userId and (userId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=userId,
context=AccessRuleContext.DATA,
item=objectKey,
view=True,
read=AccessLevel.MY,
create=AccessLevel.MY,
update=AccessLevel.MY,
delete=AccessLevel.MY,
))
# Viewer: ALL read-only (can see all templates)
if viewerId and (viewerId, objectKey) not in existingCombinations:
missingRules.append(AccessRule(
roleId=viewerId,
context=AccessRuleContext.DATA,
item=objectKey,
view=True,
read=AccessLevel.ALL,
create=AccessLevel.NONE,
update=AccessLevel.NONE,
delete=AccessLevel.NONE,
))
# Billing read-only rules: Admin=GROUP, User/Viewer=MY (own accounts/transactions)
for objectKey in billingReadOnlyTables:
# Admin: GROUP-level read (sees all accounts in their mandates)
@ -1806,59 +1611,6 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
logger.info(f"Created {len(missingRules)} missing DATA context rules")
# All DATA context rules already exist (nothing to create)
# Update existing AutomationTemplate rules for admin/viewer to ALL access
_updateAutomationTemplateRulesToAll(db, adminId, viewerId)
def _updateAutomationTemplateRulesToAll(db: DatabaseConnector, adminId: Optional[str], viewerId: Optional[str]) -> None:
"""
Update existing AutomationTemplate RBAC rules to correct levels.
- Admin: read=ALL, create/update/delete=GROUP (mandate-scoped writes)
- Viewer: read=ALL (read-only)
"""
if not adminId and not viewerId:
return
templateObjectKey = "data.automation.AutomationTemplate"
# Find existing rules for AutomationTemplate
existingRules = db.getRecordset(
AccessRule,
recordFilter={
"context": AccessRuleContext.DATA.value,
"item": templateObjectKey
}
)
updatedCount = 0
for rule in existingRules:
ruleId = rule.get("id")
roleId = rule.get("roleId")
currentReadLevel = rule.get("read")
if roleId == adminId:
# Admin: read ALL, write GROUP
updates = {}
if currentReadLevel != AccessLevel.ALL.value:
updates["read"] = AccessLevel.ALL.value
currentCreate = rule.get("create")
if currentCreate == AccessLevel.ALL.value:
updates["create"] = AccessLevel.GROUP.value
updates["update"] = AccessLevel.GROUP.value
updates["delete"] = AccessLevel.GROUP.value
if updates:
db.recordModify(AccessRule, ruleId, updates)
updatedCount += 1
logger.debug(f"Updated AutomationTemplate rule {ruleId} for admin to read=ALL, write=GROUP")
elif roleId == viewerId and currentReadLevel == AccessLevel.MY.value:
# Viewer: read ALL (read-only)
db.recordModify(AccessRule, ruleId, {"read": AccessLevel.ALL.value})
updatedCount += 1
logger.debug(f"Updated AutomationTemplate rule {ruleId} for viewer to read=ALL")
if updatedCount > 0:
logger.info(f"Updated {updatedCount} AutomationTemplate RBAC rules")
def _createResourceContextRules(db: DatabaseConnector) -> None:
@ -2002,7 +1754,6 @@ def _createStoreResourceRules(db: DatabaseConnector) -> None:
db: Database connector instance
"""
storeResources = [
"resource.store.automation",
"resource.store.teamsbot",
"resource.store.workspace",
"resource.store.commcoach",

View file

@ -74,10 +74,16 @@ TABLE_NAMESPACE = {
# Automation - benutzer-eigen
"AutomationDefinition": "automation",
"AutomationTemplate": "automation",
# Automation2 - feature-scoped
"Automation2Workflow": "automation2",
"Automation2WorkflowRun": "automation2",
"Automation2HumanTask": "automation2",
# GraphicalEditor - Greenfield DB poweron_graphicaleditor (Auto-prefix models)
"AutoWorkflow": "feature.graphicalEditor",
"AutoVersion": "feature.graphicalEditor",
"AutoRun": "feature.graphicalEditor",
"AutoStepLog": "feature.graphicalEditor",
"AutoTask": "feature.graphicalEditor",
# Legacy aliases (backward compat)
"Automation2Workflow": "feature.graphicalEditor",
"Automation2WorkflowRun": "feature.graphicalEditor",
"Automation2HumanTask": "feature.graphicalEditor",
# Knowledge Store - benutzer-eigen
"FileContentIndex": "knowledge",
"ContentChunk": "knowledge",

View file

@ -1,285 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Admin automation events routes for the backend API.
Sysadmin-only endpoints for viewing and controlling scheduler events.
"""
from fastapi import APIRouter, HTTPException, Depends, Path, Request, Response, Query
from typing import List, Dict, Any, Optional
from fastapi import status
import logging
import json
import math
# Import interfaces and models from feature containers
import modules.features.automation.interfaceFeatureAutomation as interfaceAutomation
from modules.auth import limiter, getRequestContext, requireSysAdminRole, RequestContext
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
from modules.routes.routeDataUsers import _applyFiltersAndSort, _extractDistinctValues
# Configure logger
logger = logging.getLogger(__name__)
# Create router for admin automation events endpoints
router = APIRouter(
prefix="/api/admin/automation-events",
tags=["Admin Automation Events"],
responses={
404: {"description": "Not found"},
400: {"description": "Bad request"},
401: {"description": "Unauthorized"},
403: {"description": "Forbidden - Sysadmin only"},
500: {"description": "Internal server error"}
}
)
def _buildEnrichedAutomationEvents(currentUser: User) -> List[Dict[str, Any]]:
"""Build the full enriched automation events list."""
from modules.shared.eventManagement import eventManager
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.features.automation.mainAutomation import getAutomationServices
if not eventManager.scheduler:
return []
jobs = []
for job in eventManager.scheduler.get_jobs():
if job.id.startswith("automation."):
automationId = job.id.replace("automation.", "")
jobs.append({
"eventId": job.id,
"id": job.id,
"automationId": automationId,
"nextRunTime": str(job.next_run_time) if job.next_run_time else None,
"trigger": str(job.trigger) if job.trigger else None,
"name": "",
"createdBy": "",
"mandate": "",
"featureInstance": ""
})
if jobs:
try:
rootInterface = getRootInterface()
eventUser = rootInterface.getUserByUsername("event")
if eventUser:
services = getAutomationServices(currentUser, mandateId=None, featureInstanceId=None)
allAutomations = services.interfaceDbAutomation.getAllAutomationDefinitionsWithRBAC(eventUser)
automationLookup = {}
for a in allAutomations:
aId = a.get("id", "") if isinstance(a, dict) else getattr(a, "id", "")
automationLookup[aId] = a
_userCache: Dict[str, str] = {}
_mandateCache: Dict[str, str] = {}
_featureCache: Dict[str, str] = {}
def _resolveUsername(userId):
if not userId: return ""
if userId not in _userCache:
try:
user = rootInterface.getUser(userId)
_userCache[userId] = user.username if user else userId[:8]
except Exception:
_userCache[userId] = userId[:8]
return _userCache[userId]
def _resolveMandateLabel(mandateId):
if not mandateId: return ""
if mandateId not in _mandateCache:
try:
mandate = rootInterface.getMandate(mandateId)
_mandateCache[mandateId] = getattr(mandate, "label", None) or mandateId[:8]
except Exception:
_mandateCache[mandateId] = mandateId[:8]
return _mandateCache[mandateId]
def _resolveFeatureLabel(featureInstanceId):
if not featureInstanceId: return ""
if featureInstanceId not in _featureCache:
try:
instance = rootInterface.getFeatureInstance(featureInstanceId)
_featureCache[featureInstanceId] = getattr(instance, "label", None) or getattr(instance, "featureCode", None) or featureInstanceId[:8]
except Exception:
_featureCache[featureInstanceId] = featureInstanceId[:8]
return _featureCache[featureInstanceId]
for job in jobs:
automation = automationLookup.get(job["automationId"])
if automation:
if isinstance(automation, dict):
job["name"] = automation.get("label", "")
job["createdBy"] = _resolveUsername(automation.get("sysCreatedBy", ""))
job["mandate"] = _resolveMandateLabel(automation.get("mandateId", ""))
job["featureInstance"] = _resolveFeatureLabel(automation.get("featureInstanceId", ""))
else:
job["name"] = getattr(automation, "label", "")
job["createdBy"] = _resolveUsername(getattr(automation, "sysCreatedBy", ""))
job["mandate"] = _resolveMandateLabel(getattr(automation, "mandateId", ""))
job["featureInstance"] = _resolveFeatureLabel(getattr(automation, "featureInstanceId", ""))
else:
job["name"] = "(orphaned)"
except Exception as e:
logger.warning(f"Could not enrich automation events with context: {e}")
return jobs
@router.get("")
@limiter.limit("30/minute")
def get_all_automation_events(
request: Request,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
currentUser: User = Depends(requireSysAdminRole),
):
"""Get all active scheduler jobs with pagination support (sysadmin only)."""
try:
paginationParams: Optional[PaginationParams] = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
enriched = _buildEnrichedAutomationEvents(currentUser)
filtered = _applyFiltersAndSort(enriched, paginationParams)
if paginationParams:
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters,
).model_dump(),
}
return {"items": enriched, "pagination": None}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting automation events: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error getting automation events: {str(e)}")
@router.get("/filter-values")
@limiter.limit("60/minute")
def get_automation_event_filter_values(
request: Request,
column: str = Query(..., description="Column key"),
pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
currentUser: User = Depends(requireSysAdminRole),
):
"""Return distinct filter values for a column in automation events."""
try:
crossFilterParams: Optional[PaginationParams] = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
filters = paginationDict.get("filters", {})
filters.pop(column, None)
paginationDict["filters"] = filters
paginationDict.pop("sort", None)
crossFilterParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError):
pass
enriched = _buildEnrichedAutomationEvents(currentUser)
crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams)
return _extractDistinctValues(crossFiltered, column)
except Exception as e:
logger.error(f"Error getting filter values: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/sync")
@limiter.limit("5/minute")
async def sync_all_automation_events(
request: Request,
currentUser: User = Depends(requireSysAdminRole)
) -> Dict[str, Any]:
"""
Manually trigger sync for all automations (sysadmin only).
This will register/remove events based on active flags.
"""
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.workflows.automation import syncAutomationEvents
# Get event user for sync operation (routes can import from interfaces)
rootInterface = getRootInterface()
eventUser = rootInterface.getUserByUsername("event")
if not eventUser:
raise HTTPException(
status_code=500,
detail="Event user not available"
)
from modules.features.automation.mainAutomation import getAutomationServices
services = getAutomationServices(currentUser, mandateId=None, featureInstanceId=None)
result = syncAutomationEvents(services, eventUser)
return {
"success": True,
"synced": result.get("synced", 0),
"events": result.get("events", {})
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error syncing automation events: {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Error syncing automation events: {str(e)}"
)
@router.post("/{eventId}/remove")
@limiter.limit("10/minute")
def remove_event(
request: Request,
eventId: str = Path(..., description="Event ID to remove"),
currentUser: User = Depends(requireSysAdminRole)
) -> Dict[str, Any]:
"""
Remove a scheduler job (sysadmin only).
Removes the job from the scheduler and clears the eventId on the automation definition.
Does NOT delete the automation definition itself.
"""
try:
from modules.shared.eventManagement import eventManager
# Remove scheduler job
eventManager.remove(eventId)
# Clear eventId on the automation definition (so it can be re-synced later)
if eventId.startswith("automation."):
automationId = eventId.replace("automation.", "")
automationInterface = interfaceAutomation.getInterface(currentUser)
automation = automationInterface.getAutomationDefinition(automationId)
if automation and getattr(automation, "eventId", None) == eventId:
automationInterface.updateAutomationDefinition(automationId, {"eventId": None})
return {
"success": True,
"eventId": eventId,
"message": f"Event {eventId} removed successfully"
}
except Exception as e:
logger.error(f"Error removing event: {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Error removing event: {str(e)}"
)

View file

@ -1,207 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Admin automation execution logs routes.
SysAdmin-only endpoints for viewing consolidated automation execution history
across all mandates and feature instances.
"""
from fastapi import APIRouter, HTTPException, Depends, Request, Query
from typing import List, Dict, Any, Optional
import logging
import json
import math
import uuid
from modules.auth import limiter, requireSysAdminRole
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
from modules.routes.routeDataUsers import _applyFiltersAndSort, _extractDistinctValues
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/api/admin/automation-logs",
tags=["Admin Automation Logs"],
responses={
401: {"description": "Unauthorized"},
403: {"description": "Forbidden - Sysadmin only"},
500: {"description": "Internal server error"},
},
)
def _buildFlattenedExecutionLogs(currentUser: User) -> List[Dict[str, Any]]:
"""Flatten executionLogs from all AutomationDefinitions across all mandates.
Called from a SysAdmin-only endpoint bypasses RBAC, reads directly from DB."""
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.features.automation.mainAutomation import getAutomationServices
from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition
rootInterface = getRootInterface()
services = getAutomationServices(currentUser, mandateId=None, featureInstanceId=None)
allAutomations = services.interfaceDbAutomation.db.getRecordset(AutomationDefinition)
userCache: Dict[str, str] = {}
mandateCache: Dict[str, str] = {}
featureCache: Dict[str, str] = {}
def _resolveUsername(userId: str) -> str:
if not userId:
return ""
if userId not in userCache:
try:
user = rootInterface.getUser(userId)
userCache[userId] = user.username if user else userId[:8]
except Exception:
userCache[userId] = userId[:8]
return userCache[userId]
def _resolveMandateLabel(mandateId: str) -> str:
if not mandateId:
return ""
if mandateId not in mandateCache:
try:
mandate = rootInterface.getMandate(mandateId)
mandateCache[mandateId] = getattr(mandate, "label", None) or mandateId[:8]
except Exception:
mandateCache[mandateId] = mandateId[:8]
return mandateCache[mandateId]
def _resolveFeatureLabel(featureInstanceId: str) -> str:
if not featureInstanceId:
return ""
if featureInstanceId not in featureCache:
try:
instance = rootInterface.getFeatureInstance(featureInstanceId)
featureCache[featureInstanceId] = (
getattr(instance, "label", None)
or getattr(instance, "featureCode", None)
or featureInstanceId[:8]
)
except Exception:
featureCache[featureInstanceId] = featureInstanceId[:8]
return featureCache[featureInstanceId]
flatLogs: List[Dict[str, Any]] = []
for automation in allAutomations:
if isinstance(automation, dict):
automationId = automation.get("id", "")
automationLabel = automation.get("label", "")
mandateId = automation.get("mandateId", "")
featureInstanceId = automation.get("featureInstanceId", "")
createdBy = automation.get("sysCreatedBy", "")
logs = automation.get("executionLogs") or []
else:
automationId = getattr(automation, "id", "")
automationLabel = getattr(automation, "label", "")
mandateId = getattr(automation, "mandateId", "")
featureInstanceId = getattr(automation, "featureInstanceId", "")
createdBy = getattr(automation, "sysCreatedBy", "")
logs = getattr(automation, "executionLogs", None) or []
mandateName = _resolveMandateLabel(mandateId)
featureInstanceName = _resolveFeatureLabel(featureInstanceId)
executedByName = _resolveUsername(createdBy)
for log in logs:
timestamp = log.get("timestamp", 0) if isinstance(log, dict) else 0
status = log.get("status", "") if isinstance(log, dict) else ""
workflowId = log.get("workflowId", "") if isinstance(log, dict) else ""
messages = log.get("messages", []) if isinstance(log, dict) else []
flatLogs.append({
"id": str(uuid.uuid4()),
"timestamp": timestamp,
"automationId": automationId,
"automationLabel": automationLabel,
"mandateName": mandateName,
"featureInstanceName": featureInstanceName,
"executedBy": executedByName,
"status": status,
"workflowId": workflowId,
"messages": "; ".join(messages) if messages else "",
})
flatLogs.sort(key=lambda x: x.get("timestamp", 0), reverse=True)
return flatLogs
@router.get("")
@limiter.limit("30/minute")
def get_all_automation_logs(
request: Request,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
currentUser: User = Depends(requireSysAdminRole),
):
"""Get consolidated execution logs from all automations (sysadmin only)."""
try:
paginationParams: Optional[PaginationParams] = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
logs = _buildFlattenedExecutionLogs(currentUser)
filtered = _applyFiltersAndSort(logs, paginationParams)
if paginationParams:
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters,
).model_dump(),
}
return {"items": logs, "pagination": None}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting automation logs: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error getting automation logs: {str(e)}")
@router.get("/filter-values")
@limiter.limit("60/minute")
def get_automation_log_filter_values(
request: Request,
column: str = Query(..., description="Column key"),
pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
currentUser: User = Depends(requireSysAdminRole),
):
"""Return distinct filter values for a column in automation logs."""
try:
crossFilterParams: Optional[PaginationParams] = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
filters = paginationDict.get("filters", {})
filters.pop(column, None)
paginationDict["filters"] = filters
paginationDict.pop("sort", None)
crossFilterParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError):
pass
logs = _buildFlattenedExecutionLogs(currentUser)
crossFiltered = _applyFiltersAndSort(logs, crossFilterParams)
return _extractDistinctValues(crossFiltered, column)
except Exception as e:
logger.error(f"Error getting filter values: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))

View file

@ -397,20 +397,23 @@ def create_feature(
# Feature Instance Endpoints (Mandate-scoped)
# =============================================================================
@router.get("/instances", response_model=List[Dict[str, Any]])
@router.get("/instances")
@limiter.limit("60/minute")
def list_feature_instances(
request: Request,
featureCode: Optional[str] = Query(None, description="Filter by feature code"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
) -> List[Dict[str, Any]]:
):
"""
List feature instances for the current mandate.
Returns instances the user has access to within the selected mandate.
Supports server-side pagination, filtering, sorting, and search.
Args:
featureCode: Optional filter by feature code
pagination: JSON-encoded PaginationParams (page, pageSize, sort, filters)
"""
if not context.mandateId:
raise HTTPException(
@ -419,6 +422,16 @@ def list_feature_instances(
)
try:
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
rootInterface = getRootInterface()
featureInterface = getFeatureInterface(rootInterface.db)
@ -427,7 +440,27 @@ def list_feature_instances(
featureCode=featureCode
)
return [inst.model_dump() for inst in instances]
items = [inst.model_dump() for inst in instances]
if paginationParams:
filtered = _applyFiltersAndSort(items, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page,
pageSize=paginationParams.pageSize,
totalItems=totalItems,
totalPages=totalPages,
sort=paginationParams.sort,
filters=paginationParams.filters,
).model_dump(),
}
else:
return items
except HTTPException:
raise
@ -995,13 +1028,14 @@ class FeatureInstanceUserUpdate(BaseModel):
enabled: Optional[bool] = Field(None, description="Whether this user's access is active (omit to leave unchanged)")
@router.get("/instances/{instanceId}/users", response_model=List[FeatureInstanceUserResponse])
@router.get("/instances/{instanceId}/users")
@limiter.limit("60/minute")
def list_feature_instance_users(
request: Request,
instanceId: str,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
) -> List[FeatureInstanceUserResponse]:
):
"""
List all users with access to a specific feature instance.
@ -1061,7 +1095,33 @@ def list_feature_instance_users(
enabled=fa.enabled
))
return result
items = [r.model_dump() for r in result]
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
filtered = _applyFiltersAndSort(items, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
totalItems=totalItems, totalPages=totalPages,
sort=paginationParams.sort, filters=paginationParams.filters,
).model_dump(),
}
return items
except HTTPException:
raise

View file

@ -14,10 +14,14 @@ from fastapi import APIRouter, HTTPException, Depends, Request, Query
from typing import List, Dict, Any, Optional
from fastapi import status
import logging
import json
import math
from pydantic import BaseModel, Field, model_validator
from modules.auth import limiter, getRequestContext, RequestContext, getCurrentUser
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
from modules.routes.routeDataUsers import _applyFiltersAndSort
from modules.datamodels.datamodelInvitation import Invitation
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.shared.timeUtils import getUtcTimestamp
@ -394,15 +398,16 @@ def create_invitation(
)
@router.get("/", response_model=List[Dict[str, Any]])
@router.get("/")
@limiter.limit("60/minute")
def list_invitations(
request: Request,
frontendUrl: str = Query(..., description="Frontend URL for building invite links (provided by frontend)"),
includeUsed: bool = Query(False, description="Include already used invitations"),
includeExpired: bool = Query(False, description="Include expired invitations"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
) -> List[Dict[str, Any]]:
):
"""
List invitations for the current mandate.
@ -468,6 +473,30 @@ def list_invitations(
"isUsedUp": currentUses >= maxUses
})
paginationParams = None
if pagination:
try:
paginationDict = json.loads(pagination)
if paginationDict:
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
if paginationParams:
filtered = _applyFiltersAndSort(result, paginationParams)
totalItems = len(filtered)
totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
startIdx = (paginationParams.page - 1) * paginationParams.pageSize
endIdx = startIdx + paginationParams.pageSize
return {
"items": filtered[startIdx:endIdx],
"pagination": PaginationMetadata(
currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
totalItems=totalItems, totalPages=totalPages,
sort=paginationParams.sort, filters=paginationParams.filters,
).model_dump(),
}
return result
except HTTPException:

View file

@ -102,11 +102,8 @@ def _getFeatureUiObjects(featureCode: str) -> List[Dict[str, Any]]:
elif featureCode == "realestate":
from modules.features.realEstate.mainRealEstate import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "automation":
from modules.features.automation.mainAutomation import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "automation2":
from modules.features.automation2.mainAutomation2 import UI_OBJECTS
elif featureCode == "graphicalEditor":
from modules.features.graphicalEditor.mainGraphicalEditor import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "teamsbot":
from modules.features.teamsbot.mainTeamsbot import UI_OBJECTS

View file

@ -0,0 +1,7 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Core agent tools: registration of built-in ToolRegistry handlers."""
from .registerCore import registerCoreTools
__all__ = ["registerCoreTools"]

View file

@ -0,0 +1,193 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""External connection tools (list connections, upload, send mail)."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
def _registerConnectionTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
# ---- Connection tools (external data sources) ----
def _buildResolverDb():
"""Build a DB adapter that ConnectorResolver can use to load UserConnections.
interfaceDbApp has getUserConnectionById; ConnectorResolver expects getUserConnection."""
chatService = services.chat
appIf = getattr(chatService, "interfaceDbApp", None)
if appIf and hasattr(appIf, "getUserConnectionById"):
class _Adapter:
def __init__(self, app):
self._app = app
def getUserConnection(self, connectionId: str):
return self._app.getUserConnectionById(connectionId)
return _Adapter(appIf)
return getattr(chatService, "interfaceDbComponent", None)
async def _listConnections(args: Dict[str, Any], context: Dict[str, Any]):
try:
chatService = services.chat
connections = chatService.getUserConnections() if hasattr(chatService, "getUserConnections") else []
if not connections:
return ToolResult(toolCallId="", toolName="listConnections", success=True, data="No connections available.")
lines = []
for conn in connections:
connId = conn.get("id", "?") if isinstance(conn, dict) else getattr(conn, "id", "?")
authority = conn.get("authority", "?") if isinstance(conn, dict) else getattr(conn, "authority", "?")
email = conn.get("externalEmail", "") if isinstance(conn, dict) else getattr(conn, "externalEmail", "")
lines.append(f"- {authority} ({email}) id: {connId}")
return ToolResult(toolCallId="", toolName="listConnections", success=True, data="\n".join(lines))
except Exception as e:
return ToolResult(toolCallId="", toolName="listConnections", success=False, error=str(e))
async def _uploadToExternal(args: Dict[str, Any], context: Dict[str, Any]):
connectionId = args.get("connectionId", "")
service = args.get("service", "")
path = args.get("path", "")
fileId = args.get("fileId", "")
if not connectionId or not service or not path or not fileId:
return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error="connectionId, service, path, and fileId are required")
try:
from modules.connectors.connectorResolver import ConnectorResolver
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, service)
chatService = services.chat
fileContent = chatService.getFileContent(fileId)
if not fileContent:
return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error="File not found")
fileData = fileContent.get("data", b"") if isinstance(fileContent, dict) else b""
if isinstance(fileData, str):
fileData = fileData.encode("utf-8")
fileName = fileContent.get("fileName", "file") if isinstance(fileContent, dict) else "file"
result = await adapter.upload(path, fileData, fileName)
return ToolResult(toolCallId="", toolName="uploadToExternal", success=True, data=str(result))
except Exception as e:
return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error=str(e))
async def _sendMail(args: Dict[str, Any], context: Dict[str, Any]):
import base64 as _b64
connectionId = args.get("connectionId", "")
to = args.get("to", [])
subject = args.get("subject", "")
body = args.get("body", "")
bodyType = "HTML" if args.get("bodyType", "text").lower() == "html" else "Text"
draft = args.get("draft", False)
attachmentFileIds = args.get("attachmentFileIds") or []
if not connectionId or not to or not subject:
return ToolResult(toolCallId="", toolName="sendMail", success=False, error="connectionId, to, and subject are required")
try:
graphAttachments: List[Dict[str, Any]] = []
if attachmentFileIds:
chatService = services.chat
dbMgmt = chatService.interfaceDbComponent
for fid in attachmentFileIds:
fileRow = dbMgmt.getFile(fid)
if not fileRow:
return ToolResult(toolCallId="", toolName="sendMail", success=False, error=f"Attachment file not found: {fid}")
rawBytes = dbMgmt.getFileData(fid)
if not rawBytes:
return ToolResult(toolCallId="", toolName="sendMail", success=False, error=f"Attachment file has no data: {fid}")
graphAttachments.append({
"name": fileRow.fileName,
"contentBytes": _b64.b64encode(rawBytes).decode("ascii"),
"contentType": getattr(fileRow, "mimeType", "application/octet-stream"),
})
from modules.connectors.connectorResolver import ConnectorResolver
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, "outlook")
if draft and hasattr(adapter, "createDraft"):
result = await adapter.createDraft(
to=to, subject=subject, body=body, bodyType=bodyType,
cc=args.get("cc"), attachments=graphAttachments or None,
)
return ToolResult(toolCallId="", toolName="sendMail", success=True, data=str(result))
if hasattr(adapter, "sendMail"):
result = await adapter.sendMail(
to=to, subject=subject, body=body, bodyType=bodyType,
cc=args.get("cc"), attachments=graphAttachments or None,
)
return ToolResult(toolCallId="", toolName="sendMail", success=True, data=str(result))
return ToolResult(toolCallId="", toolName="sendMail", success=False, error="Mail not supported by this adapter")
except Exception as e:
return ToolResult(toolCallId="", toolName="sendMail", success=False, error=str(e))
_connToolParams = {
"connectionId": {"type": "string", "description": "UserConnection ID"},
"service": {"type": "string", "description": "Service name (sharepoint, outlook, drive, etc.)"},
}
registry.register(
"listConnections", _listConnections,
description="List the user's external connections (SharePoint, OneDrive, Outlook, etc.) and their IDs. Use with browseDataSource/uploadToExternal.",
parameters={"type": "object", "properties": {}},
readOnly=True,
)
registry.register(
"uploadToExternal", _uploadToExternal,
description=(
"Upload a local file to an external storage via connectionId+service. "
"Use listConnections to find available connections."
),
parameters={
"type": "object",
"properties": {
**_connToolParams,
"path": {"type": "string", "description": "Destination path on the external service"},
"fileId": {"type": "string", "description": "Local file ID to upload"},
},
"required": ["connectionId", "service", "path", "fileId"],
},
readOnly=False,
)
registry.register(
"sendMail", _sendMail,
description=(
"Send or draft an email via a connected mail service (Outlook). "
"Supports HTML body and file attachments from the workspace. "
"Set draft=true to save as draft without sending. "
"Use listConnections to find the connectionId."
),
parameters={
"type": "object",
"properties": {
"connectionId": {"type": "string", "description": "UserConnection ID"},
"to": {"type": "array", "items": {"type": "string"}, "description": "Recipient email addresses"},
"subject": {"type": "string", "description": "Email subject"},
"body": {"type": "string", "description": "Email body — plain text or HTML markup"},
"bodyType": {"type": "string", "enum": ["text", "html"], "description": "Body format: 'text' (default) or 'html'"},
"cc": {"type": "array", "items": {"type": "string"}, "description": "CC addresses"},
"attachmentFileIds": {
"type": "array", "items": {"type": "string"},
"description": "File IDs from the workspace to attach (use listFiles to find IDs)",
},
"draft": {"type": "boolean", "description": "If true, save as draft in Drafts folder instead of sending"},
},
"required": ["connectionId", "to", "subject", "body"],
},
readOnly=False,
)

View file

@ -0,0 +1,175 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Cross-workflow tools and core-only tool-set tagging."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
def _registerCrossWorkflowTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
# ---- Cross-workflow tools ----
async def _listWorkflowHistory(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
"""List all chat workflows in this workspace with metadata."""
import json as _json
try:
chatService = services.chat
chatInterface = chatService.interfaceDbChat
allWorkflows = chatInterface.getWorkflows() or []
allWorkflows.sort(
key=lambda w: w.get("sysCreatedAt") or w.get("startedAt") or 0,
reverse=True,
)
allWorkflows = allWorkflows[:50]
items = []
for wf in allWorkflows:
wfId = wf.get("id", "")
name = wf.get("name") or "(unnamed)"
createdAt = wf.get("sysCreatedAt") or wf.get("startedAt") or 0
lastActivity = wf.get("lastActivity") or createdAt
msgs = chatInterface.getMessages(wfId) or []
messageCount = len(msgs)
lastPreview = ""
if msgs:
lastMsg = msgs[-1] if isinstance(msgs[-1], dict) else (
msgs[-1].model_dump() if hasattr(msgs[-1], "model_dump") else {}
)
content = lastMsg.get("message") or lastMsg.get("content") or ""
lastPreview = content[:150]
items.append({
"id": wfId,
"name": name,
"createdAt": createdAt,
"lastActivity": lastActivity,
"messageCount": messageCount,
"lastMessagePreview": lastPreview,
})
return ToolResult(
toolCallId="", toolName="listWorkflowHistory",
success=True, data=_json.dumps(items, ensure_ascii=False),
)
except Exception as e:
return ToolResult(
toolCallId="", toolName="listWorkflowHistory",
success=False, error=str(e),
)
registry.register(
"listWorkflowHistory", _listWorkflowHistory,
description=(
"List all chat conversations/workflows in this workspace. "
"Returns id, name, createdAt, lastActivity, messageCount, and a preview "
"of the last message for each workflow. Use this to discover previous "
"conversations when the user asks about past chats or wants a summary "
"across conversations."
),
parameters={
"type": "object",
"properties": {},
},
readOnly=True,
)
async def _readWorkflowMessages(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
"""Read messages from a specific workflow."""
import json as _json
targetWorkflowId = args.get("workflowId", "")
limit = int(args.get("limit", 20))
offset = int(args.get("offset", 0))
if not targetWorkflowId:
return ToolResult(
toolCallId="", toolName="readWorkflowMessages",
success=False, error="workflowId is required",
)
try:
chatService = services.chat
chatInterface = chatService.interfaceDbChat
allMsgs = chatInterface.getMessages(targetWorkflowId) or []
sliced = allMsgs[offset:offset + limit]
items = []
for msg in sliced:
raw = msg if isinstance(msg, dict) else (
msg.model_dump() if hasattr(msg, "model_dump") else {}
)
content = raw.get("message") or raw.get("content") or ""
if len(content) > 2000:
content = content[:2000] + "..."
items.append({
"role": raw.get("role", ""),
"message": content,
"publishedAt": raw.get("publishedAt") or raw.get("sysCreatedAt") or 0,
})
header = f"Workflow {targetWorkflowId}: {len(allMsgs)} total messages"
if offset > 0 or len(allMsgs) > offset + limit:
header += f" (showing {offset + 1}-{offset + len(sliced)})"
return ToolResult(
toolCallId="", toolName="readWorkflowMessages",
success=True,
data=header + "\n" + _json.dumps(items, ensure_ascii=False),
)
except Exception as e:
return ToolResult(
toolCallId="", toolName="readWorkflowMessages",
success=False, error=str(e),
)
registry.register(
"readWorkflowMessages", _readWorkflowMessages,
description=(
"Read messages from a specific chat workflow/conversation. "
"Use this after listWorkflowHistory to read the content of a "
"specific past conversation. Supports pagination via offset/limit."
),
parameters={
"type": "object",
"properties": {
"workflowId": {"type": "string", "description": "ID of the workflow to read messages from"},
"limit": {"type": "integer", "description": "Max messages to return (default 20)"},
"offset": {"type": "integer", "description": "Skip first N messages (default 0)"},
},
"required": ["workflowId"],
},
readOnly=True,
)
# Tag core-only tools so restricted toolSets (e.g. "commcoach") exclude them.
# Tools NOT in this set remain toolSet=None → available to ALL sets.
_CORE_ONLY_TOOLS = {
"listFiles", "listFolders", "tagFile", "moveFile", "createFolder",
"writeFile", "deleteFile", "renameFile", "translateText",
"deleteFolder", "renameFolder", "moveFolder", "copyFile", "replaceInFile",
"listConnections", "uploadToExternal", "sendMail", "downloadFromDataSource",
"browseContainer", "readContentObjects", "extractContainerItem",
"summarizeContent", "describeImage", "renderDocument",
"textToSpeech", "generateImage", "createChart",
"speechToText", "detectLanguage", "neutralizeData", "executeCode",
"listWorkflowHistory", "readWorkflowMessages",
}
for _toolName in _CORE_ONLY_TOOLS:
_td = registry.getTool(_toolName)
if _td:
_td.toolSet = "core"

View file

@ -0,0 +1,258 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""DataSource convenience tools (browse, search, download from external sources)."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
def _registerDataSourceTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
# ---- DataSource convenience tools ----
_SOURCE_TYPE_TO_SERVICE = {
"sharepointFolder": "sharepoint",
"onedriveFolder": "onedrive",
"outlookFolder": "outlook",
"googleDriveFolder": "drive",
"gmailFolder": "gmail",
"ftpFolder": "files",
"clickupList": "clickup",
}
async def _resolveDataSource(dsId: str):
"""Resolve a DataSource record and return (connectionId, service, path, neutralize) or raise."""
chatService = services.chat
ds = chatService.getDataSource(dsId) if hasattr(chatService, "getDataSource") else None
if not ds:
raise ValueError(f"DataSource '{dsId}' not found")
connectionId = ds.get("connectionId", "")
sourceType = ds.get("sourceType", "")
path = ds.get("path", "/")
label = ds.get("label", "")
neutralize = bool(ds.get("neutralize", False))
service = _SOURCE_TYPE_TO_SERVICE.get(sourceType, sourceType)
if not connectionId:
raise ValueError(f"DataSource '{dsId}' has no connectionId")
logger.info(f"Resolved DataSource '{dsId}' ({label}): sourceType={sourceType}, service={service}, connectionId={connectionId}, path={path[:80]}, neutralize={neutralize}")
return connectionId, service, path, neutralize
_MAIL_SERVICES = {"outlook", "gmail"}
async def _browseDataSource(args: Dict[str, Any], context: Dict[str, Any]):
dsId = args.get("dataSourceId", "")
subPath = args.get("subPath", "")
directConnId = args.get("connectionId", "")
directService = args.get("service", "")
if not dsId and not (directConnId and directService):
return ToolResult(toolCallId="", toolName="browseDataSource", success=False,
error="Provide either dataSourceId OR connectionId+service")
try:
if dsId:
connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId)
else:
connectionId, service, basePath = directConnId, directService, args.get("path", "/")
if subPath:
if subPath.startswith("/"):
browsePath = subPath
else:
browsePath = f"{basePath.rstrip('/')}/{subPath}"
else:
browsePath = basePath
from modules.connectors.connectorResolver import ConnectorResolver
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, service)
entries = await adapter.browse(browsePath, filter=args.get("filter"))
if not entries:
return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data="Empty directory.")
lines = []
for e in entries:
prefix = "[DIR]" if e.isFolder else "[FILE]"
sizeInfo = f" ({e.size} bytes)" if e.size else ""
lines.append(f"- {prefix} {e.name}{sizeInfo} path: {e.path}")
result = "\n".join(lines)
if service in _MAIL_SERVICES:
result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data=result)
except Exception as e:
return ToolResult(toolCallId="", toolName="browseDataSource", success=False, error=str(e))
async def _searchDataSource(args: Dict[str, Any], context: Dict[str, Any]):
dsId = args.get("dataSourceId", "")
directConnId = args.get("connectionId", "")
directService = args.get("service", "")
query = args.get("query", "")
if not query:
return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error="query is required")
if not dsId and not (directConnId and directService):
return ToolResult(toolCallId="", toolName="searchDataSource", success=False,
error="Provide either dataSourceId OR connectionId+service")
try:
if dsId:
connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId)
else:
connectionId, service, basePath = directConnId, directService, args.get("path", "/")
from modules.connectors.connectorResolver import ConnectorResolver
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, service)
entries = await adapter.search(query, path=basePath)
if not entries:
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data="No results found.")
lines = [f"- {e.name} (path: {e.path})" for e in entries]
result = "\n".join(lines)
if service in _MAIL_SERVICES:
result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data=result)
except Exception as e:
return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error=str(e))
async def _downloadFromDataSource(args: Dict[str, Any], context: Dict[str, Any]):
dsId = args.get("dataSourceId", "")
directConnId = args.get("connectionId", "")
directService = args.get("service", "")
filePath = args.get("filePath", "")
fileName = args.get("fileName", "")
if not filePath:
return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="filePath is required")
if not dsId and not (directConnId and directService):
return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False,
error="Provide either dataSourceId OR connectionId+service")
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.connectors.connectorProviderBase import DownloadResult as _DR
_sourceNeutralize = False
if dsId:
connectionId, service, basePath, _sourceNeutralize = await _resolveDataSource(dsId)
else:
connectionId, service, basePath = directConnId, directService, "/"
fullPath = filePath if filePath.startswith("/") else f"{basePath.rstrip('/')}/{filePath}"
resolver = ConnectorResolver(
services.getService("security"),
_buildResolverDb(),
)
adapter = await resolver.resolveService(connectionId, service)
result = await adapter.download(fullPath)
if isinstance(result, _DR):
fileBytes = result.data
fileName = result.fileName or fileName
else:
fileBytes = result
if not fileBytes:
return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="Download returned empty")
if not fileName or "." not in fileName:
pathSegment = fullPath.split("/")[-1] or "downloaded_file"
fileName = fileName or pathSegment
if "." not in fileName:
try:
entries = await adapter.browse(basePath)
for entry in entries:
if getattr(entry, "path", "") == filePath or getattr(entry, "path", "").endswith(filePath):
if "." in entry.name:
fileName = entry.name
break
except Exception:
pass
if "." not in fileName:
if fileBytes[:4] == b"%PDF":
fileName = f"{fileName}.pdf"
elif fileBytes[:2] == b"PK":
fileName = f"{fileName}.zip"
chatService = services.chat
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(fileBytes, fileName)
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"featureInstanceId": fiId})
if _sourceNeutralize:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"neutralize": True})
tempFolderId = _getOrCreateTempFolder(chatService)
if tempFolderId:
chatService.interfaceDbComponent.updateFile(fileItem.id, {"folderId": tempFolderId})
ext = fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
hint = "Use readFile to read the text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx", "pdf", "eml", "msg") else "Use readFile to access the content."
return ToolResult(
toolCallId="", toolName="downloadFromDataSource", success=True,
data=f"Downloaded '{fileName}' ({len(fileBytes)} bytes) → local file id: {fileItem.id}. {hint}"
)
except Exception as e:
return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error=str(e))
registry.register(
"browseDataSource", _browseDataSource,
description=(
"Browse files and folders in a data source. Accepts either:\n"
"- dataSourceId (for attached data sources shown in the prompt), OR\n"
"- connectionId + service (for direct connection access via listConnections)."
),
parameters={
"type": "object",
"properties": {
"dataSourceId": {"type": "string", "description": "DataSource ID (from attached data sources)"},
"connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
"service": {"type": "string", "description": "Service name (alternative to dataSourceId, e.g. sharepoint, onedrive)"},
"path": {"type": "string", "description": "Root path (used with connectionId+service)"},
"subPath": {"type": "string", "description": "Sub-path within the data source to browse"},
"filter": {"type": "string", "description": "Filter pattern (e.g. '*.pdf')"},
},
},
readOnly=True,
)
registry.register(
"searchDataSource", _searchDataSource,
description=(
"Search for files within a data source. Accepts either dataSourceId OR connectionId+service."
),
parameters={
"type": "object",
"properties": {
"dataSourceId": {"type": "string", "description": "DataSource ID"},
"connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
"service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
"path": {"type": "string", "description": "Scope path (used with connectionId+service)"},
"query": {"type": "string", "description": "Search query"},
},
"required": ["query"],
},
readOnly=True,
)
registry.register(
"downloadFromDataSource", _downloadFromDataSource,
description=(
"Download a file or email from a data source into local storage. Returns a local file ID "
"to read with readFile. Accepts either dataSourceId OR connectionId+service. "
"For email sources (Outlook, Gmail), browse/search only return subjects -- use this to get full content."
),
parameters={
"type": "object",
"properties": {
"dataSourceId": {"type": "string", "description": "DataSource ID"},
"connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
"service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
"filePath": {"type": "string", "description": "Path of the file to download (from browseDataSource results)"},
"fileName": {"type": "string", "description": "File name with extension (e.g. 'report.pdf')"},
},
"required": ["dataSourceId", "filePath"],
},
readOnly=False,
)

View file

@ -0,0 +1,374 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Document and vision tools (containers, content objects, image description)."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
def _registerDocumentTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
# ---- Document tools (Smart Documents / Container Handling) ----
async def _browseContainer(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
if not fileId:
return ToolResult(toolCallId="", toolName="browseContainer", success=False, error="fileId is required")
try:
knowledgeService = services.getService("knowledge")
index = knowledgeService.getFileContentIndex(fileId)
if not index:
return ToolResult(toolCallId="", toolName="browseContainer", success=True, data="No content index available for this file. It may not have been indexed yet.")
structure = index.get("structure", {}) if isinstance(index, dict) else {}
objectSummary = index.get("objectSummary", []) if isinstance(index, dict) else []
totalObjects = index.get("totalObjects", 0) if isinstance(index, dict) else 0
result = f"File: {index.get('fileName', '?')} ({index.get('mimeType', '?')})\n"
result += f"Total content objects: {totalObjects}\n"
sections = structure.get("sections", [])
if sections:
result += "\nSections:\n"
for s in sections:
result += f" [{s.get('id', '?')}] {s.get('title', 'Untitled')} (pages {s.get('startPage', '?')}-{s.get('endPage', '?')})\n"
if structure.get("pageMap"):
pages = len(structure["pageMap"])
result += f"\nPages: {pages}\n"
imgCount = structure.get("imageCount", 0)
tableCount = structure.get("tableCount", 0)
if imgCount:
result += f"Images: {imgCount}\n"
if tableCount:
result += f"Tables: {tableCount}\n"
if structure.get("sheetMap"):
result += "\nSheets:\n"
for s in structure["sheetMap"]:
result += f" {s.get('sheetName', '?')} ({s.get('rows', '?')} rows x {s.get('columns', '?')} cols)\n"
if structure.get("slideMap"):
result += "\nSlides:\n"
for s in structure["slideMap"]:
result += f" Slide {s.get('slideIndex', 0) + 1}: {s.get('title', '(no title)')}\n"
return ToolResult(toolCallId="", toolName="browseContainer", success=True, data=result)
except Exception as e:
return ToolResult(toolCallId="", toolName="browseContainer", success=False, error=str(e))
async def _readContentObjects(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
if not fileId:
return ToolResult(toolCallId="", toolName="readContentObjects", success=False, error="fileId is required")
try:
knowledgeService = services.getService("knowledge")
filterDict = {}
if args.get("pageIndex") is not None:
filterDict["pageIndex"] = args["pageIndex"]
if args.get("contentType"):
filterDict["contentType"] = args["contentType"]
if args.get("sectionId"):
filterDict["sectionId"] = args["sectionId"]
objects = await knowledgeService.readContentObjects(fileId, filterDict)
if not objects:
return ToolResult(toolCallId="", toolName="readContentObjects", success=True, data="No content objects found with the given filter.")
result = f"Found {len(objects)} content objects:\n\n"
for obj in objects[:20]:
data = obj.get("data", "")
cType = obj.get("contentType", "?")
ref = obj.get("contextRef", {})
location = ref.get("location", "") if isinstance(ref, dict) else ""
preview = data[:300] if cType == "text" else f"[{cType} data, {len(data)} chars]"
result += f"[{cType}] {location}: {preview}\n\n"
if len(objects) > 20:
result += f"... and {len(objects) - 20} more objects"
return ToolResult(toolCallId="", toolName="readContentObjects", success=True, data=result)
except Exception as e:
return ToolResult(toolCallId="", toolName="readContentObjects", success=False, error=str(e))
async def _extractContainerItem(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
containerPath = args.get("containerPath", "")
if not fileId or not containerPath:
return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error="fileId and containerPath are required")
try:
knowledgeService = services.getService("knowledge")
result = await knowledgeService.extractContainerItem(fileId, containerPath)
if result:
return ToolResult(toolCallId="", toolName="extractContainerItem", success=True, data=str(result))
return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error=f"Item '{containerPath}' not found in container index for file {fileId}. On-demand extraction is not yet implemented.")
except Exception as e:
return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error=str(e))
async def _summarizeContent(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
if not fileId:
return ToolResult(toolCallId="", toolName="summarizeContent", success=False, error="fileId is required")
try:
knowledgeService = services.getService("knowledge")
filterDict = {}
if args.get("sectionId"):
filterDict["sectionId"] = args["sectionId"]
if args.get("pageIndex") is not None:
filterDict["pageIndex"] = args["pageIndex"]
if args.get("contentType"):
filterDict["contentType"] = args["contentType"]
objects = await knowledgeService.readContentObjects(fileId, filterDict)
if not objects:
return ToolResult(toolCallId="", toolName="summarizeContent", success=True, data="No content found to summarize.")
textParts = [obj.get("data", "") for obj in objects if obj.get("contentType") != "image"]
combinedText = "\n\n".join(textParts)[:6000]
aiService = services.ai
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
summaryRequest = AiCallRequest(
prompt=f"Summarize the following content concisely:\n\n{combinedText}",
options=AiCallOptions(operationType=OperationTypeEnum.DATA_ANALYSE),
)
response = await aiService.callAi(summaryRequest)
return ToolResult(toolCallId="", toolName="summarizeContent", success=True, data=response.content)
except Exception as e:
return ToolResult(toolCallId="", toolName="summarizeContent", success=False, error=str(e))
registry.register(
"browseContainer", _browseContainer,
description="Browse the structural index of a document (pages, sections, sheets, slides). Use before readContentObjects for targeted reading.",
parameters={
"type": "object",
"properties": {"fileId": {"type": "string", "description": "The file ID to browse"}},
"required": ["fileId"],
},
readOnly=True,
)
registry.register(
"readContentObjects", _readContentObjects,
description="Read extracted content objects from a file, optionally filtered by page, section, or type. Use browseContainer first to see the structure.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID"},
"pageIndex": {"type": "integer", "description": "Filter by page index"},
"sectionId": {"type": "string", "description": "Filter by section ID"},
"contentType": {"type": "string", "description": "Filter by content type (text, image, etc.)"},
},
"required": ["fileId"],
},
readOnly=True,
)
registry.register(
"extractContainerItem", _extractContainerItem,
description="Extract a specific item from a container file (ZIP, nested file). Use browseContainer to see available items.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The container file ID"},
"containerPath": {"type": "string", "description": "Path within the container"},
},
"required": ["fileId", "containerPath"],
},
readOnly=True,
)
registry.register(
"summarizeContent", _summarizeContent,
description="Generate an AI-powered summary of a file's content. Optionally filter by section, page, or content type.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID"},
"sectionId": {"type": "string", "description": "Optional: summarize only this section"},
"pageIndex": {"type": "integer", "description": "Optional: summarize only this page"},
"contentType": {"type": "string", "description": "Optional: filter by content type"},
},
"required": ["fileId"],
},
readOnly=True,
)
# ---- Vision tool ----
async def _describeImage(args: Dict[str, Any], context: Dict[str, Any]):
"""Analyse an image using AI vision. Uses Knowledge Store chunks produced by Extractors."""
fileId = args.get("fileId", "")
prompt = args.get("prompt", "Describe this image in detail. Extract all visible text, tables, and data.")
pageIndex = args.get("pageIndex")
if not fileId:
return ToolResult(toolCallId="", toolName="describeImage", success=False, error="fileId is required")
try:
import base64 as _b64
imageData = None
mimeType = "image/png"
knowledgeService = services.getService("knowledge") if hasattr(services, "getService") else None
# 1) Knowledge Store: image chunks already produced by PdfExtractor / ImageExtractor
if knowledgeService:
chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
if pageIndex is not None:
imageChunks = [c for c in imageChunks if c.get("contextRef", {}).get("pageIndex") == pageIndex]
if imageChunks:
imageData = imageChunks[0].get("data", "")
chunkMime = imageChunks[0].get("contextRef", {}).get("mimeType")
if chunkMime:
mimeType = chunkMime
# 2) File not yet indexed -> trigger extraction via ExtractionService, then retry
if not imageData and knowledgeService and not knowledgeService.isFileIndexed(fileId):
try:
chatService = services.chat
fileInfo = chatService.getFileInfo(fileId)
fileContent = chatService.getFileContent(fileId)
if fileContent and fileInfo:
rawData = fileContent.get("data", "")
if isinstance(rawData, str) and len(rawData) > 100:
rawBytes = _b64.b64decode(rawData)
elif isinstance(rawData, bytes):
rawBytes = rawData
else:
rawBytes = None
if rawBytes:
from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry
from modules.serviceCenter.services.serviceExtraction.subPipeline import runExtraction
from modules.datamodels.datamodelExtraction import ExtractionOptions
fileMime = fileInfo.get("mimeType", "application/octet-stream")
fileName = fileInfo.get("fileName", fileId)
extracted = runExtraction(
ExtractorRegistry(), None,
rawBytes, fileName, fileMime, ExtractionOptions(),
)
contentObjects = []
for part in extracted.parts:
tg = (part.typeGroup or "").lower()
ct = "image" if tg == "image" else "text"
if not part.data or not part.data.strip():
continue
contentObjects.append({
"contentObjectId": part.id,
"contentType": ct,
"data": part.data,
"contextRef": {"containerPath": fileName, "location": part.label, **(part.metadata or {})},
})
if contentObjects:
_diFiId, _diMId = _resolveFileScope(fileId, context)
await knowledgeService.indexFile(
fileId=fileId, fileName=fileName, mimeType=fileMime,
userId=context.get("userId", ""), contentObjects=contentObjects,
featureInstanceId=_diFiId,
mandateId=_diMId,
)
chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
if pageIndex is not None:
imageChunks = [c for c in imageChunks if c.get("contextRef", {}).get("pageIndex") == pageIndex]
if imageChunks:
imageData = imageChunks[0].get("data", "")
except Exception as extractErr:
logger.warning(f"describeImage: on-demand extraction failed: {extractErr}")
# 3) Direct image file (not a container) - use raw file data
if not imageData:
chatService = services.chat
fileContent = chatService.getFileContent(fileId)
if fileContent:
fileMimeType = fileContent.get("mimeType", "")
if fileMimeType.startswith("image/"):
imageData = fileContent.get("data", "")
mimeType = fileMimeType
if not imageData:
chatService = services.chat
fileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None
fileName = fileInfo.get("fileName", fileId) if fileInfo else fileId
fileMime = fileInfo.get("mimeType", "unknown") if fileInfo else "unknown"
return ToolResult(toolCallId="", toolName="describeImage", success=False,
error=f"No image data found in '{fileName}' (type: {fileMime}). "
f"This file likely contains text, not images. Use readFile(fileId=\"{fileId}\") to access its text content.")
try:
rawHead = _b64.b64decode(imageData[:32])
if rawHead[:3] == b"\xff\xd8\xff":
mimeType = "image/jpeg"
elif rawHead[:8] == b"\x89PNG\r\n\x1a\n":
mimeType = "image/png"
elif rawHead[:4] == b"GIF8":
mimeType = "image/gif"
elif rawHead[:4] == b"RIFF" and rawHead[8:12] == b"WEBP":
mimeType = "image/webp"
except Exception:
pass
dataUrl = f"data:{mimeType};base64,{imageData}"
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum as OTE
_opType = OTE.IMAGE_ANALYSE
try:
from modules.datamodels.datamodelFiles import FileItem as _FileItemModel
from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO
_fRow = _CO().db._loadRecord(_FileItemModel, fileId)
if _fRow:
_fGet = (lambda k, d=None: _fRow.get(k, d)) if isinstance(_fRow, dict) else (lambda k, d=None: getattr(_fRow, k, d))
if bool(_fGet("neutralize", False)):
_opType = OTE.NEUTRALIZATION_IMAGE
logger.info(f"describeImage: file {fileId} has neutralize=True, using NEUTRALIZATION_IMAGE (internal models only)")
except Exception:
pass
visionRequest = AiCallRequest(
prompt=prompt,
options=AiCallOptions(operationType=_opType),
messages=[{"role": "user", "content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": dataUrl}},
]}],
)
visionResponse = await services.ai.callAi(visionRequest)
if visionResponse.errorCount > 0:
return ToolResult(toolCallId="", toolName="describeImage", success=False, error=visionResponse.content)
return ToolResult(toolCallId="", toolName="describeImage", success=True, data=visionResponse.content)
except Exception as e:
return ToolResult(toolCallId="", toolName="describeImage", success=False, error=str(e))
registry.register(
"describeImage", _describeImage,
description="Analyze an image using AI vision. Works with image files and images extracted from PDFs/DOCX/PPTX. Use for OCR, data extraction, and visual analysis.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID containing the image or document with images"},
"prompt": {"type": "string", "description": "What to look for in the image (default: describe everything)"},
"pageIndex": {"type": "integer", "description": "Filter images by page index (0-based, for multi-page documents)"},
},
"required": ["fileId"],
},
readOnly=True,
)

View file

@ -0,0 +1,160 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Feature Data Sub-Agent tool (queryFeatureInstance)."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
def _registerFeatureSubAgentTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
# ---- Feature Data Sub-Agent tool ----
async def _queryFeatureInstance(args: Dict[str, Any], context: Dict[str, Any]):
"""Delegate a question to the Feature Data Sub-Agent."""
featureInstanceId = args.get("featureInstanceId", "")
question = args.get("question", "")
if not featureInstanceId or not question:
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=False, error="featureInstanceId and question are required",
)
try:
from modules.serviceCenter.services.serviceAgent.featureDataAgent import runFeatureDataAgent
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
from modules.interfaces.interfaceDbApp import getRootInterface
rootIf = getRootInterface()
instance = rootIf.getFeatureInstance(featureInstanceId)
if not instance:
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=False, error=f"Feature instance {featureInstanceId} not found",
)
featureCode = instance.featureCode
mandateId = instance.mandateId or ""
instanceLabel = instance.label or ""
userId = context.get("userId", "")
workspaceInstanceId = context.get("featureInstanceId", "")
rootDbConn = rootIf.db if hasattr(rootIf, "db") else None
if rootDbConn is None:
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=False, error="No database connector available",
)
featureDataSources = rootDbConn.getRecordset(
FeatureDataSource,
recordFilter={"featureInstanceId": featureInstanceId, "workspaceInstanceId": workspaceInstanceId},
)
_anySourceNeutralize = any(
bool(ds.get("neutralize", False) if isinstance(ds, dict) else getattr(ds, "neutralize", False))
for ds in (featureDataSources or [])
)
from modules.security.rbacCatalog import getCatalogService
catalog = getCatalogService()
tableFilters = {}
if not featureDataSources:
selectedTables = catalog.getDataObjects(featureCode)
else:
allObjs = {o["meta"]["table"]: o for o in catalog.getDataObjects(featureCode) if "meta" in o and "table" in o.get("meta", {})}
selectedTables = [allObjs[ds["tableName"]] for ds in featureDataSources if ds.get("tableName") in allObjs]
for ds in featureDataSources:
rf = ds.get("recordFilter")
if rf and isinstance(rf, dict) and ds.get("tableName"):
tableFilters[ds["tableName"]] = rf
if not selectedTables:
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=False, error=f"No data tables available for feature '{featureCode}'",
)
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.configuration import APP_CONFIG
featureDbName = f"poweron_{featureCode.lower()}"
featureDbConn = DatabaseConnector(
dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
dbDatabase=featureDbName,
dbUser=APP_CONFIG.get("DB_USER"),
dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
userId=userId or "agent",
)
aiService = services.ai if hasattr(services, "ai") else None
if aiService is None:
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=False, error="AI service not available for sub-agent",
)
async def _subAgentAiCall(req):
if _anySourceNeutralize:
req.requireNeutralization = True
return await aiService.callAi(req)
try:
answer = await runFeatureDataAgent(
question=question,
featureInstanceId=featureInstanceId,
featureCode=featureCode,
selectedTables=selectedTables,
mandateId=mandateId,
userId=userId,
aiCallFn=_subAgentAiCall,
dbConnector=featureDbConn,
instanceLabel=instanceLabel,
tableFilters=tableFilters,
)
finally:
try:
featureDbConn.close()
except Exception:
pass
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=True, data=answer,
)
except Exception as e:
logger.error(f"queryFeatureInstance failed: {e}", exc_info=True)
return ToolResult(
toolCallId="", toolName="queryFeatureInstance",
success=False, error=str(e),
)
registry.register(
"queryFeatureInstance", _queryFeatureInstance,
description=(
"Query data from a feature instance (e.g. Trustee, CommCoach). "
"Delegates to a specialized sub-agent that knows the feature's data schema "
"and can browse/query its tables. Use this when the user has attached "
"feature data sources or asks about feature-specific data."
),
parameters={
"type": "object",
"properties": {
"featureInstanceId": {"type": "string", "description": "ID of the feature instance to query"},
"question": {"type": "string", "description": "What data to find or analyze from this feature instance"},
},
"required": ["featureInstanceId", "question"]
},
readOnly=True
)

View file

@ -0,0 +1,62 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Shared helpers for core agent tools (file scope, binary detection, temp folder)."""
import logging
from typing import Optional
_MAX_TOOL_RESULT_CHARS = 50_000
_BINARY_SIGNATURES = (b"%PDF", b"\x89PNG", b"\xff\xd8\xff", b"GIF8", b"PK\x03\x04", b"Rar!", b"\x1f\x8b")
def _resolveFileScope(fileId: str, context: dict) -> tuple:
"""Resolve featureInstanceId and mandateId for a file from context or management DB.
Returns (featureInstanceId, mandateId) never None, always strings.
"""
fiId = context.get("featureInstanceId", "") or ""
mId = context.get("mandateId", "") or ""
if fiId and mId:
return fiId, mId
try:
from modules.datamodels.datamodelFiles import FileItem
from modules.interfaces.interfaceDbManagement import ComponentObjects
fm = ComponentObjects().db._loadRecord(FileItem, fileId)
if fm:
_get = (lambda k: fm.get(k, "")) if isinstance(fm, dict) else (lambda k: getattr(fm, k, ""))
fiId = fiId or str(_get("featureInstanceId") or "")
mId = mId or str(_get("mandateId") or "")
except Exception:
pass
return fiId, mId
def _looksLikeBinary(data: bytes, sampleSize: int = 1024) -> bool:
"""Detect binary content by checking for magic bytes and non-printable char ratio."""
if any(data[:8].startswith(sig) for sig in _BINARY_SIGNATURES):
return True
sample = data[:sampleSize]
if not sample:
return False
nonPrintable = sum(1 for b in sample if b < 0x09 or (0x0E <= b < 0x20 and b != 0x1B))
return nonPrintable / len(sample) > 0.10
def _getOrCreateTempFolder(chatService) -> Optional[str]:
"""Return the ID of the root-level 'Temp' folder, creating it if it doesn't exist."""
try:
allFolders = chatService.interfaceDbComponent.listFolders()
tempFolder = next(
(f for f in allFolders
if f.get("name") == "Temp" and not f.get("parentId")),
None,
)
if tempFolder:
return tempFolder.get("id")
newFolder = chatService.interfaceDbComponent.createFolder("Temp", parentId=None)
return newFolder.get("id") if newFolder else None
except Exception as e:
logger.warning(f"Could not get/create Temp folder: {e}")
return None

View file

@ -0,0 +1,958 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Media and utility tools (render, TTS, STT, image gen, charts, neutralize, code exec)."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
def _registerMediaTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
# ---- Document rendering tool ----
def _markdownToDocumentJson(markdown: str, title: str, language: str = "de") -> Dict[str, Any]:
"""Convert markdown content to the standard document JSON format expected by renderers."""
import re as _re
sections = []
order = 0
lines = markdown.split("\n")
i = 0
def _nextId():
nonlocal order
order += 1
return f"s_{order}"
while i < len(lines):
line = lines[i]
# --- Headings ---
headingMatch = _re.match(r'^(#{1,6})\s+(.+)', line)
if headingMatch:
level = len(headingMatch.group(1))
text = headingMatch.group(2).strip()
sections.append({
"id": _nextId(), "content_type": "heading", "order": order,
"elements": [{"content": {"text": text, "level": level}}],
})
i += 1
continue
# --- Fenced code blocks ---
codeMatch = _re.match(r'^```(\w*)', line)
if codeMatch:
lang = codeMatch.group(1) or "text"
codeLines = []
i += 1
while i < len(lines) and not lines[i].startswith("```"):
codeLines.append(lines[i])
i += 1
i += 1
sections.append({
"id": _nextId(), "content_type": "code_block", "order": order,
"elements": [{"content": {"code": "\n".join(codeLines), "language": lang}}],
})
continue
# --- Tables ---
tableMatch = _re.match(r'^\|(.+)\|$', line)
if tableMatch and (i + 1) < len(lines) and _re.match(r'^\|[\s\-:|]+\|$', lines[i + 1]):
headerCells = [c.strip() for c in tableMatch.group(1).split("|")]
i += 2
rows = []
while i < len(lines) and _re.match(r'^\|(.+)\|$', lines[i]):
rowCells = [c.strip() for c in lines[i][1:-1].split("|")]
rows.append(rowCells)
i += 1
sections.append({
"id": _nextId(), "content_type": "table", "order": order,
"elements": [{"content": {"headers": headerCells, "rows": rows}}],
})
continue
# --- Bullet / numbered lists ---
listMatch = _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', line)
if listMatch:
isNumbered = bool(_re.match(r'\d+[.)]', listMatch.group(2)))
items = []
while i < len(lines) and _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', lines[i]):
m = _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', lines[i])
items.append({"text": m.group(3).strip()})
i += 1
sections.append({
"id": _nextId(), "content_type": "bullet_list", "order": order,
"elements": [{"content": {"items": items, "list_type": "numbered" if isNumbered else "bullet"}}],
})
continue
# --- Empty lines (skip) ---
if not line.strip():
i += 1
continue
# --- Images: ![alt](file:fileId) or ![alt](url) ---
imgMatch = _re.match(r'^!\[([^\]]*)\]\(([^)]+)\)', line)
if imgMatch:
altText = imgMatch.group(1).strip() or "Image"
src = imgMatch.group(2).strip()
fileId = ""
if src.startswith("file:"):
fileId = src[5:]
sections.append({
"id": _nextId(), "content_type": "image", "order": order,
"elements": [{
"content": {
"altText": altText,
"base64Data": "",
"_fileRef": fileId,
"_srcUrl": src if not fileId else "",
}
}],
})
i += 1
continue
# --- Paragraph (collect consecutive non-empty lines) ---
paraLines = []
while i < len(lines) and lines[i].strip() and not _re.match(r'^(#{1,6}\s|```|\|.+\||!\[|(\s*)([-*+]|\d+[.)]) )', lines[i]):
paraLines.append(lines[i])
i += 1
if paraLines:
sections.append({
"id": _nextId(), "content_type": "paragraph", "order": order,
"elements": [{"content": {"text": " ".join(paraLines)}}],
})
continue
i += 1
if not sections:
sections.append({
"id": _nextId(), "content_type": "paragraph", "order": order,
"elements": [{"content": {"text": markdown.strip() or "(empty)"}}],
})
return {
"metadata": {
"split_strategy": "single_document",
"source_documents": [],
"extraction_method": "agent_rendering",
"title": title,
"language": language,
},
"documents": [{
"id": "doc_1",
"title": title,
"sections": sections,
}],
}
async def _renderDocument(args: Dict[str, Any], context: Dict[str, Any]):
"""Render agent-produced markdown content into any document format via the RendererRegistry."""
import re as _re
sourceFileId = (args.get("sourceFileId") or "").strip()
content = args.get("content", "")
if not isinstance(content, str):
content = str(content) if content is not None else ""
outputFormat = args.get("outputFormat", "pdf")
title = args.get("title", "Document")
language = args.get("language", "de")
if sourceFileId:
try:
dbMgmt = services.chat.interfaceDbComponent
fileRow = dbMgmt.getFile(sourceFileId)
if not fileRow:
return ToolResult(
toolCallId="",
toolName="renderDocument",
success=False,
error=f"sourceFileId not found: {sourceFileId}",
)
rawBytes = dbMgmt.getFileData(sourceFileId)
if not rawBytes:
return ToolResult(
toolCallId="",
toolName="renderDocument",
success=False,
error=f"sourceFileId has no data: {sourceFileId}",
)
try:
content = rawBytes.decode("utf-8")
except UnicodeDecodeError:
content = rawBytes.decode("latin-1", errors="replace")
except Exception as e:
return ToolResult(
toolCallId="",
toolName="renderDocument",
success=False,
error=f"Could not read sourceFileId: {e}",
)
if not (content or "").strip():
return ToolResult(
toolCallId="",
toolName="renderDocument",
success=False,
error=(
"Provide non-empty `content` (markdown) or `sourceFileId` (id of a .md/.txt from writeFile). "
"For long documents use writeFile create+append, then renderDocument(sourceFileId=...)."
),
)
modelMaxTokens = context.get("modelMaxOutputTokens", 0)
_inlineCharLimit = int(modelMaxTokens * 3 * 0.5) if modelMaxTokens > 0 else 6000
_inlineCharLimit = max(_inlineCharLimit, 3000)
if not sourceFileId and len(content) > _inlineCharLimit:
return ToolResult(
toolCallId="",
toolName="renderDocument",
success=False,
error=(
f"Inline `content` is {len(content)} chars — over the {_inlineCharLimit} char limit "
f"(derived from model output budget of {modelMaxTokens} tokens). "
"Large documents must use the file path:\n"
"1. writeFile(mode='create', name='draft.md', content=<first ~5000 chars>)\n"
"2. writeFile(mode='append', fileId=<id>, content=<next chunk>) — repeat as needed\n"
"3. renderDocument(sourceFileId=<id>, outputFormat='pdf', title='...')\n"
"This avoids output truncation entirely."
),
)
try:
structuredContent = _markdownToDocumentJson(content, title, language)
# Resolve image file references (file:fileId) to base64 data from Knowledge Store
knowledgeService = None
try:
knowledgeService = services.getService("knowledge")
except Exception:
pass
resolvedImages = 0
for doc in structuredContent.get("documents", []):
for section in doc.get("sections", []):
if section.get("content_type") != "image":
continue
for element in section.get("elements", []):
contentObj = element.get("content", {})
fileRef = contentObj.get("_fileRef", "")
if not fileRef or contentObj.get("base64Data"):
continue
if knowledgeService:
chunks = knowledgeService._knowledgeDb.getContentChunks(fileRef)
imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
if imageChunks:
contentObj["base64Data"] = imageChunks[0].get("data", "")
chunkMime = imageChunks[0].get("contextRef", {}).get("mimeType", "image/png")
contentObj["mimeType"] = chunkMime
resolvedImages += 1
if not contentObj.get("base64Data"):
try:
rawBytes = services.chat.getFileData(fileRef)
if rawBytes:
import base64 as _b64
contentObj["base64Data"] = _b64.b64encode(rawBytes).decode("ascii")
contentObj["mimeType"] = "image/png"
resolvedImages += 1
except Exception:
pass
contentObj.pop("_fileRef", None)
contentObj.pop("_srcUrl", None)
sectionCount = len(structuredContent.get("documents", [{}])[0].get("sections", []))
logger.info(f"renderDocument: parsed {sectionCount} sections from markdown ({len(content)} chars), resolved {resolvedImages} image(s), format={outputFormat}")
generationService = services.getService("generation")
documents = await generationService.renderReport(
extractedContent=structuredContent,
outputFormat=outputFormat,
language=language,
title=title,
userPrompt=content,
)
if not documents:
return ToolResult(toolCallId="", toolName="renderDocument", success=False, error="Rendering produced no output")
savedFiles = []
sideEvents = []
chatService = services.chat
sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "document"
for doc in documents:
docData = doc.documentData if hasattr(doc, "documentData") else b""
docName = doc.filename if hasattr(doc, "filename") else f"{sanitizedTitle}.{outputFormat}"
docMime = doc.mimeType if hasattr(doc, "mimeType") else "application/octet-stream"
if not docName.lower().endswith(f".{outputFormat}"):
docName = f"{sanitizedTitle}.{outputFormat}"
fileItem = None
if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
fileItem = chatService.interfaceDbComponent.saveGeneratedFile(docData, docName, docMime)
else:
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docData, docName)
if fileItem:
fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId:
chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
tempFolderId = _getOrCreateTempFolder(chatService)
if tempFolderId:
chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
savedFiles.append(f"- {docName} (id: {fid})")
sideEvents.append({
"type": "fileCreated",
"data": {
"fileId": fid,
"fileName": docName,
"mimeType": docMime,
"fileSize": len(docData),
},
})
result = f"Rendered {len(documents)} document(s):\n" + "\n".join(savedFiles)
return ToolResult(toolCallId="", toolName="renderDocument", success=True, data=result, sideEvents=sideEvents)
except Exception as e:
logger.error(f"renderDocument failed: {e}")
return ToolResult(toolCallId="", toolName="renderDocument", success=False, error=str(e))
registry.register(
"renderDocument", _renderDocument,
description=(
"Render markdown into a document file (PDF, DOCX, XLSX, PPTX, CSV, HTML, MD, JSON, TXT). "
"For long documents: write markdown with writeFile (mode=create then append chunks), then call this tool with "
"`sourceFileId` only (tiny JSON — avoids model output truncation). For short docs you may pass `content` inline. "
"Images: ![alt text](file:fileId) in the markdown."
),
parameters={
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Full markdown inline. Prefer `sourceFileId` when the document is large (many KB).",
},
"sourceFileId": {
"type": "string",
"description": "Chat file id of markdown saved via writeFile (create+append). Use this instead of `content` for long PDFs.",
},
"outputFormat": {"type": "string", "description": "Target format: pdf, docx, xlsx, pptx, csv, html, md, json, txt", "default": "pdf"},
"title": {"type": "string", "description": "Document title", "default": "Document"},
"language": {"type": "string", "description": "Document language (ISO 639-1)", "default": "de"},
},
},
readOnly=False,
)
# ── textToSpeech tool ──────────────────────────────────────────────
def _stripMarkdownForTts(text: str) -> str:
"""Strip markdown formatting so TTS reads clean speech text."""
import re as _re
t = text
t = _re.sub(r'\*\*(.+?)\*\*', r'\1', t)
t = _re.sub(r'\*(.+?)\*', r'\1', t)
t = _re.sub(r'__(.+?)__', r'\1', t)
t = _re.sub(r'_(.+?)_', r'\1', t)
t = _re.sub(r'`[^`]+`', lambda m: m.group(0)[1:-1], t)
t = _re.sub(r'^#{1,6}\s*', '', t, flags=_re.MULTILINE)
t = _re.sub(r'^\s*[-*+]\s+', '', t, flags=_re.MULTILINE)
t = _re.sub(r'^\s*\d+\.\s+', '', t, flags=_re.MULTILINE)
t = _re.sub(r'\[(.+?)\]\(.+?\)', r'\1', t)
t = _re.sub(r'!\[.*?\]\(.*?\)', '', t)
t = _re.sub(r'\n{3,}', '\n\n', t)
return t.strip()
async def _textToSpeech(args: Dict[str, Any], context: Dict[str, Any]):
"""Convert text to speech using Google Cloud TTS, deliver audio via SSE."""
import base64 as _b64
text = args.get("text", "")
language = args.get("language", "auto")
voiceName = args.get("voiceName")
if not text:
return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="text is required")
cleanText = _stripMarkdownForTts(text)
if not cleanText:
return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="text is empty after stripping markdown")
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
mandateId = context.get("mandateId", "")
voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
_ISO_TO_BCP47 = {
"de": "de-DE", "en": "en-US", "fr": "fr-FR", "it": "it-IT",
"es": "es-ES", "pt": "pt-BR", "nl": "nl-NL", "pl": "pl-PL",
"ru": "ru-RU", "ja": "ja-JP", "zh": "zh-CN", "ko": "ko-KR",
"ar": "ar-XA", "hi": "hi-IN", "tr": "tr-TR", "sv": "sv-SE",
}
if language == "auto":
try:
snippet = cleanText[:500]
detectResult = await voiceInterface.detectLanguage(snippet)
if detectResult and detectResult.get("success"):
detected = detectResult.get("language", "de")
language = _ISO_TO_BCP47.get(detected, detected)
if "-" not in language:
language = _ISO_TO_BCP47.get(language, f"{language}-{language.upper()}")
logger.info(f"textToSpeech: auto-detected language '{detected}' -> '{language}'")
else:
language = "de-DE"
except Exception as detectErr:
logger.warning(f"textToSpeech: language detection failed: {detectErr}, defaulting to de-DE")
language = "de-DE"
if not voiceName:
try:
from modules.datamodels.datamodelUam import UserVoicePreferences
from modules.interfaces.interfaceDbApp import getRootInterface
userId = context.get("userId", "")
if userId:
rootIf = getRootInterface()
prefRecords = rootIf.db.getRecordset(
UserVoicePreferences,
recordFilter={"userId": userId}
)
if prefRecords:
allPrefs = [
r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else r
for r in prefRecords
]
_mid = str(mandateId or "").strip()
scopedPref = next((p for p in allPrefs if str(p.get("mandateId") or "").strip() == _mid), None)
globalPref = next((p for p in allPrefs if not str(p.get("mandateId") or "").strip()), None)
def _resolveVoiceFromMap(prefDict, lang):
vm = (prefDict or {}).get("ttsVoiceMap", {}) or {}
if not isinstance(vm, dict) or not vm:
return None
baseLang = lang.split("-")[0].lower() if isinstance(lang, str) and lang else ""
langNorm = str(lang or "").strip()
if langNorm in vm:
entry = vm[langNorm]
return entry.get("voiceName") if isinstance(entry, dict) else entry
if baseLang and baseLang in vm:
entry = vm[baseLang]
return entry.get("voiceName") if isinstance(entry, dict) else entry
if baseLang:
for mk, mv in vm.items():
mkn = str(mk).lower()
if mkn == baseLang or mkn.startswith(f"{baseLang}-"):
return mv.get("voiceName") if isinstance(mv, dict) else mv
return None
voiceName = (
_resolveVoiceFromMap(scopedPref, language)
or _resolveVoiceFromMap(globalPref, language)
or _resolveVoiceFromMap(allPrefs[0], language)
)
if not voiceName:
for candidate in [globalPref, scopedPref, allPrefs[0]]:
if candidate and candidate.get("ttsVoice") and candidate.get("ttsLanguage") == language:
voiceName = candidate["ttsVoice"]
break
if voiceName:
logger.info(f"textToSpeech: using configured voice '{voiceName}' for language '{language}'")
except Exception as prefErr:
logger.debug(f"textToSpeech: could not load voice preferences: {prefErr}")
ttsResult = await voiceInterface.textToSpeech(
text=cleanText,
languageCode=language,
voiceName=voiceName,
)
if not ttsResult or not ttsResult.get("success"):
errMsg = ttsResult.get("error", "TTS call failed") if ttsResult else "TTS returned None"
return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error=errMsg)
audioContent = ttsResult.get("audioContent", "")
if not audioContent:
return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="TTS returned no audio")
if isinstance(audioContent, bytes):
audioB64 = _b64.b64encode(audioContent).decode("ascii")
elif isinstance(audioContent, str):
audioB64 = audioContent
else:
audioB64 = str(audioContent)
audioFormat = ttsResult.get("audioFormat", "mp3")
charCount = len(cleanText)
usedVoice = voiceName or "default"
logger.info(f"textToSpeech: generated {audioFormat} audio for {charCount} chars, language={language}, voice={usedVoice}")
return ToolResult(
toolCallId="", toolName="textToSpeech", success=True,
data=f"Audio generated ({charCount} characters, language={language}, voice={usedVoice}). Playing in chat.",
sideEvents=[{
"type": "voiceResponse",
"data": {
"audio": audioB64,
"format": audioFormat,
"language": language,
"charCount": charCount,
},
}],
)
except ImportError:
return ToolResult(toolCallId="", toolName="textToSpeech", success=False,
error="Voice interface not available (missing dependency)")
except Exception as e:
logger.error(f"textToSpeech failed: {e}")
return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error=str(e))
registry.register(
"textToSpeech", _textToSpeech,
description=(
"Convert text to speech audio. The audio is played directly in the chat. "
"Use this when the user asks you to read something aloud, narrate, or speak. "
"Language is auto-detected from the text content. You do NOT need to specify a language."
),
parameters={
"type": "object",
"properties": {
"text": {"type": "string", "description": "The text to convert to speech. Can include markdown (will be stripped automatically)."},
"language": {"type": "string", "description": "BCP-47 language code (e.g. de-DE, en-US) or 'auto' for automatic detection", "default": "auto"},
"voiceName": {"type": "string", "description": "Optional specific voice name. If omitted, uses the configured voice for the detected language."},
},
"required": ["text"],
},
readOnly=False,
)
# ── generateImage tool ─────────────────────────────────────────────
async def _generateImage(args: Dict[str, Any], context: Dict[str, Any]):
"""Generate an image from a text prompt using AI (DALL-E)."""
import re as _re
prompt = (args.get("prompt") or "").strip()
style = (args.get("style") or "").strip() or None
title = (args.get("title") or "").strip() or "Generated Image"
if not prompt:
return ToolResult(toolCallId="", toolName="generateImage", success=False, error="prompt is required")
try:
from modules.serviceCenter.services.serviceGeneration.paths.imagePath import ImageGenerationPath
imagePath = ImageGenerationPath(services)
aiResponse = await imagePath.generateImages(
userPrompt=prompt,
count=1,
style=style,
format="png",
title=title,
)
if not aiResponse.documents:
return ToolResult(toolCallId="", toolName="generateImage", success=False, error="Image generation returned no image data")
sideEvents = []
savedFiles = []
chatService = services.chat
sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "generated_image"
for doc in aiResponse.documents:
docData = doc.documentData if hasattr(doc, "documentData") else b""
docName = doc.documentName if hasattr(doc, "documentName") else f"{sanitizedTitle}.png"
docMime = doc.mimeType if hasattr(doc, "mimeType") else "image/png"
if not docName.lower().endswith(".png"):
docName = f"{sanitizedTitle}.png"
fileItem = None
if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
fileItem = chatService.interfaceDbComponent.saveGeneratedFile(docData, docName, docMime)
else:
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docData, docName)
if fileItem:
fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId:
chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
tempFolderId = _getOrCreateTempFolder(chatService)
if tempFolderId:
chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
savedFiles.append(f"- {docName} (id: {fid})")
sideEvents.append({
"type": "fileCreated",
"data": {
"fileId": fid,
"fileName": docName,
"mimeType": docMime,
"fileSize": len(docData),
},
})
result = f"Generated {len(aiResponse.documents)} image(s):\n" + "\n".join(savedFiles)
return ToolResult(toolCallId="", toolName="generateImage", success=True, data=result, sideEvents=sideEvents)
except Exception as e:
logger.error(f"generateImage failed: {e}")
return ToolResult(toolCallId="", toolName="generateImage", success=False, error=str(e))
registry.register(
"generateImage", _generateImage,
description=(
"Generate an image from a text description using AI (DALL-E). "
"The generated image is saved as a file in the workspace. "
"Use this when the user asks to create, generate, draw, or design an image, illustration, icon, logo, diagram, or any visual content. "
"Provide a detailed, descriptive prompt for best results."
),
parameters={
"type": "object",
"properties": {
"prompt": {"type": "string", "description": "Detailed description of the image to generate. Be specific about subject, composition, colors, style, and mood."},
"style": {"type": "string", "description": "Optional style modifier (e.g. 'photorealistic', 'watercolor', 'digital art', 'minimalist', 'sketch')"},
"title": {"type": "string", "description": "Title/filename for the generated image", "default": "Generated Image"},
},
"required": ["prompt"],
},
readOnly=False,
)
# ── createChart tool ─────────────────────────────────────────────────
async def _createChart(args: Dict[str, Any], context: Dict[str, Any]):
"""Create a data chart as PNG image using matplotlib."""
import re as _re
chartType = (args.get("chartType") or "bar").strip().lower()
title = (args.get("title") or "Chart").strip()
labels = args.get("labels") or []
datasets = args.get("datasets") or []
xLabel = (args.get("xLabel") or "").strip()
yLabel = (args.get("yLabel") or "").strip()
width = min(max(args.get("width") or 10, 4), 20)
height = min(max(args.get("height") or 6, 3), 14)
colors = args.get("colors") or None
if not datasets:
return ToolResult(toolCallId="", toolName="createChart", success=False, error="datasets is required (list of {label, values})")
try:
import matplotlib
matplotlib.use("Agg")
import logging as _mpllog
_mpllog.getLogger("matplotlib").setLevel(_mpllog.WARNING)
import matplotlib.pyplot as plt
import io
_DEFAULT_COLORS = [
"#4285F4", "#EA4335", "#FBBC04", "#34A853", "#FF6D01",
"#46BDC6", "#7B61FF", "#F538A0", "#00ACC1", "#AB47BC",
]
usedColors = colors if colors and len(colors) >= len(datasets) else _DEFAULT_COLORS
fig, ax = plt.subplots(figsize=(width, height))
fig.patch.set_facecolor("#FFFFFF")
ax.set_facecolor("#FAFAFA")
if chartType in ("pie", "donut"):
values = datasets[0].get("values", []) if datasets else []
explode = [0.02] * len(values)
wedges, texts, autotexts = ax.pie(
values, labels=labels, autopct="%1.1f%%",
colors=usedColors[:len(values)], explode=explode,
textprops={"fontsize": 9},
)
if chartType == "donut":
ax.add_artist(plt.Circle((0, 0), 0.55, fc="white"))
ax.set_title(title, fontsize=14, fontweight="bold", pad=16)
else:
import numpy as _np
x = _np.arange(len(labels)) if labels else _np.arange(max(len(d.get("values", [])) for d in datasets))
barWidth = 0.8 / max(len(datasets), 1)
for i, ds in enumerate(datasets):
dsLabel = ds.get("label", f"Series {i+1}")
values = ds.get("values", [])
color = usedColors[i % len(usedColors)]
if chartType == "bar":
offset = (i - len(datasets) / 2 + 0.5) * barWidth
ax.bar(x + offset, values, barWidth, label=dsLabel, color=color, edgecolor="white", linewidth=0.5)
elif chartType == "horizontalbar":
offset = (i - len(datasets) / 2 + 0.5) * barWidth
ax.barh(x + offset, values, barWidth, label=dsLabel, color=color, edgecolor="white", linewidth=0.5)
elif chartType == "line":
ax.plot(x[:len(values)], values, marker="o", markersize=5, label=dsLabel, color=color, linewidth=2)
elif chartType == "area":
ax.fill_between(x[:len(values)], values, alpha=0.3, color=color)
ax.plot(x[:len(values)], values, label=dsLabel, color=color, linewidth=2)
elif chartType == "scatter":
ax.scatter(x[:len(values)], values, label=dsLabel, color=color, s=50, edgecolors="white", linewidth=0.5)
else:
ax.bar(x, values, label=dsLabel, color=color)
if labels:
if chartType == "horizontalbar":
ax.set_yticks(x)
ax.set_yticklabels(labels, fontsize=9)
else:
ax.set_xticks(x)
ax.set_xticklabels(labels, fontsize=9, rotation=45 if len(labels) > 6 else 0, ha="right" if len(labels) > 6 else "center")
ax.set_title(title, fontsize=14, fontweight="bold", pad=12)
if xLabel:
ax.set_xlabel(xLabel, fontsize=10)
if yLabel:
ax.set_ylabel(yLabel, fontsize=10)
if len(datasets) > 1:
ax.legend(fontsize=9, framealpha=0.9)
ax.grid(axis="y", alpha=0.3, linestyle="--")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.tight_layout()
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=150, bbox_inches="tight")
plt.close(fig)
pngData = buf.getvalue()
chatService = services.chat
sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "chart"
fileName = f"{sanitizedTitle}.png"
if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
fileItem = chatService.interfaceDbComponent.saveGeneratedFile(pngData, fileName, "image/png")
else:
fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(pngData, fileName)
fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?") if isinstance(fileItem, dict) else "?"
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId and fid != "?":
chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
tempFolderId = _getOrCreateTempFolder(chatService)
if tempFolderId and fid != "?":
chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
sideEvents = [{"type": "fileCreated", "data": {
"fileId": fid, "fileName": fileName,
"mimeType": "image/png", "fileSize": len(pngData),
}}]
return ToolResult(
toolCallId="", toolName="createChart", success=True,
data=f"Chart saved as '{fileName}' (id: {fid}, {len(pngData)} bytes). "
f"Embed in documents with: ![{title}](file:{fid})",
sideEvents=sideEvents,
)
except Exception as e:
logger.error(f"createChart failed: {e}", exc_info=True)
return ToolResult(toolCallId="", toolName="createChart", success=False, error=str(e))
registry.register(
"createChart", _createChart,
description=(
"Create a data chart/graph as a PNG image using matplotlib. "
"Supported types: bar, horizontalBar, line, area, scatter, pie, donut. "
"The chart is saved as a file in the workspace. "
"Use the returned fileId to embed in documents via renderDocument: ![title](file:fileId). "
"Provide structured data with labels and datasets."
),
parameters={
"type": "object",
"properties": {
"chartType": {
"type": "string",
"enum": ["bar", "horizontalBar", "line", "area", "scatter", "pie", "donut"],
"description": "Chart type (default: bar)",
},
"title": {"type": "string", "description": "Chart title"},
"labels": {
"type": "array", "items": {"type": "string"},
"description": "X-axis labels / category names",
},
"datasets": {
"type": "array",
"items": {
"type": "object",
"properties": {
"label": {"type": "string", "description": "Series name (legend)"},
"values": {"type": "array", "items": {"type": "number"}, "description": "Data values"},
},
"required": ["values"],
},
"description": "Data series to plot",
},
"xLabel": {"type": "string", "description": "X-axis label"},
"yLabel": {"type": "string", "description": "Y-axis label"},
"colors": {
"type": "array", "items": {"type": "string"},
"description": "Custom hex colors for series (e.g. ['#4285F4', '#EA4335'])",
},
"width": {"type": "number", "description": "Figure width in inches (4-20, default 10)"},
"height": {"type": "number", "description": "Figure height in inches (3-14, default 6)"},
},
"required": ["datasets"],
},
readOnly=False,
)
# ── Phase 3: speechToText, detectLanguage, neutralizeData, executeCode ──
async def _speechToText(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
if not fileId:
return ToolResult(toolCallId="", toolName="speechToText", success=False, error="fileId is required")
try:
chatService = services.chat
audioData = chatService.interfaceDbComponent.getFileData(fileId)
if not audioData:
return ToolResult(toolCallId="", toolName="speechToText", success=False, error=f"No data found for file {fileId}")
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
mandateId = context.get("mandateId", "")
voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
language = args.get("language", "de-DE")
result = await voiceInterface.speechToText(audioData, language=language)
if result and result.get("success"):
transcript = result.get("text", "")
confidence = result.get("confidence", 0)
return ToolResult(
toolCallId="", toolName="speechToText", success=True,
data=f"Transcript (confidence: {confidence:.0%}):\n{transcript}"
)
return ToolResult(toolCallId="", toolName="speechToText", success=False, error=result.get("error", "Transcription failed"))
except Exception as e:
return ToolResult(toolCallId="", toolName="speechToText", success=False, error=str(e))
async def _detectLanguage(args: Dict[str, Any], context: Dict[str, Any]):
text = args.get("text", "")
if not text:
return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error="text is required")
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
mandateId = context.get("mandateId", "")
voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
result = await voiceInterface.detectLanguage(text)
if result and result.get("success"):
lang = result.get("language", "unknown")
return ToolResult(toolCallId="", toolName="detectLanguage", success=True, data=f"Detected language: {lang}")
return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error=result.get("error", "Detection failed"))
except Exception as e:
return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error=str(e))
async def _neutralizeData(args: Dict[str, Any], context: Dict[str, Any]):
text = args.get("text", "")
fileId = args.get("fileId", "")
if not text and not fileId:
return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="text or fileId is required")
try:
neutralizationService = services.getService("neutralization")
if not neutralizationService:
return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="Neutralization service not available")
if not neutralizationService.interfaceDbComponent:
neutralizationService.interfaceDbComponent = services.chat.interfaceDbComponent
if text:
result = await neutralizationService.processTextAsync(text, fileId or None)
else:
result = neutralizationService.processFile(fileId)
if result:
neutralized = result.get("neutralized_text", "") or result.get("result", str(result))
return ToolResult(toolCallId="", toolName="neutralizeData", success=True, data=neutralized)
return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="Neutralization returned no result")
except Exception as e:
return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error=str(e))
async def _executeCode(args: Dict[str, Any], context: Dict[str, Any]):
code = args.get("code", "")
language = args.get("language", "python")
if not code:
return ToolResult(toolCallId="", toolName="executeCode", success=False, error="code is required")
if language != "python":
return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"Language '{language}' not supported. Only 'python' is available.")
try:
from modules.serviceCenter.services.serviceAgent.sandboxExecutor import executePython
result = await executePython(code)
if result.get("success"):
output = result.get("output", "(no output)")
return ToolResult(toolCallId="", toolName="executeCode", success=True, data=output)
error = result.get("error", "Execution failed")
tb = result.get("traceback", "")
return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"{error}\n{tb}" if tb else error)
except Exception as e:
return ToolResult(toolCallId="", toolName="executeCode", success=False, error=str(e))
registry.register(
"speechToText", _speechToText,
description="Transcribe an audio file to text using speech recognition. Returns the transcript with confidence score.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "Audio file ID from the workspace"},
"language": {"type": "string", "description": "BCP-47 language code (e.g. 'de-DE', 'en-US'). Default: 'de-DE'"},
},
"required": ["fileId"]
},
readOnly=True
)
registry.register(
"detectLanguage", _detectLanguage,
description="Detect the language of a text snippet. Returns ISO 639-1 code (e.g. 'de', 'en').",
parameters={
"type": "object",
"properties": {
"text": {"type": "string", "description": "Text to analyze"},
},
"required": ["text"]
},
readOnly=True
)
registry.register(
"neutralizeData", _neutralizeData,
description="Anonymize text or file content by replacing personal data (names, addresses, etc.) with placeholders. Non-destructive -- returns the anonymized copy.",
parameters={
"type": "object",
"properties": {
"text": {"type": "string", "description": "Text to anonymize"},
"fileId": {"type": "string", "description": "File ID to anonymize (alternative to text)"},
},
},
readOnly=True
)
registry.register(
"executeCode", _executeCode,
description=(
"Execute Python code in a sandboxed environment for calculations and data analysis. "
"Available modules: math, statistics, json, csv, re, datetime, collections, itertools, functools, decimal, fractions, random. "
"No file system, network, or OS access. Max 30s execution time. "
"Use print() to produce output."
),
parameters={
"type": "object",
"properties": {
"code": {"type": "string", "description": "Python code to execute"},
"language": {"type": "string", "description": "Programming language (only 'python' supported)", "default": "python"},
},
"required": ["code"]
},
readOnly=True
)

View file

@ -0,0 +1,950 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Workspace and file management tools (read, write, search, folders, web, translate)."""
import logging
from typing import Any, Dict, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
_getOrCreateTempFolder,
_looksLikeBinary,
_resolveFileScope,
_MAX_TOOL_RESULT_CHARS,
)
logger = logging.getLogger(__name__)
import uuid as _uuid
def _registerWorkspaceTools(registry: ToolRegistry, services):
"""Auto-extracted from registerCoreTools."""
import uuid as _uuid
# ---- Read-only tools ----
def _applyOffsetLimit(text: str, offset: int = None, limit: int = None) -> str:
"""Apply line-based offset/limit to text content, returning numbered lines."""
if offset is None and limit is None:
return None
lines = text.split("\n")
totalLines = len(lines)
startLine = max(0, (offset or 1) - 1)
endLine = min(totalLines, startLine + (limit or 200))
selected = lines[startLine:endLine]
numbered = "\n".join(f"{i + startLine + 1}|{line}" for i, line in enumerate(selected))
header = f"[Lines {startLine + 1}-{endLine} of {totalLines} total]\n"
return header + numbered
async def _readFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
offset = args.get("offset")
limit = args.get("limit")
if not fileId:
return ToolResult(toolCallId="", toolName="readFile", success=False, error="fileId is required")
try:
knowledgeService = services.getService("knowledge") if hasattr(services, "getService") else None
# 1) Knowledge Store: return already-extracted text chunks
if knowledgeService:
fileStatus = knowledgeService.getFileStatus(fileId)
if fileStatus == "indexed":
chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
textChunks = [
c for c in (chunks or [])
if c.get("contentType") != "image" and c.get("data")
]
if textChunks:
assembled = "\n\n".join(c["data"] for c in textChunks)
chunked = _applyOffsetLimit(assembled, offset, limit)
if chunked is not None:
return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
if len(assembled) > _MAX_TOOL_RESULT_CHARS:
assembled = assembled[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(assembled)}. Use offset/limit to read specific sections.]"
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data=assembled,
)
elif fileStatus in ("processing", "embedding", "extracted"):
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data=f"[File {fileId} is currently being processed (status: {fileStatus}). Try again shortly.]",
)
# 2) Not indexed yet: try on-demand extraction
chatService = services.chat
fileInfo = chatService.getFileInfo(fileId)
if not fileInfo:
return ToolResult(toolCallId="", toolName="readFile", success=True, data="File not found.")
fileName = fileInfo.get("fileName", fileId)
mimeType = fileInfo.get("mimeType", "")
_BINARY_TYPES = ("application/pdf", "image/", "application/vnd.", "application/zip",
"application/x-zip", "application/x-tar", "application/x-7z",
"application/msword", "application/octet-stream",
"message/rfc822")
isBinary = any(mimeType.startswith(t) for t in _BINARY_TYPES)
rawBytes = chatService.getFileData(fileId)
if not rawBytes:
return ToolResult(toolCallId="", toolName="readFile", success=True, data="File data not accessible.")
if not isBinary:
isBinary = _looksLikeBinary(rawBytes)
if isBinary:
try:
from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry, ChunkerRegistry
from modules.serviceCenter.services.serviceExtraction.subPipeline import runExtraction
from modules.datamodels.datamodelExtraction import ExtractionOptions
extracted = runExtraction(
ExtractorRegistry(), ChunkerRegistry(),
rawBytes, fileName, mimeType, ExtractionOptions(),
)
contentObjects = []
for part in extracted.parts:
tg = (part.typeGroup or "").lower()
ct = "image" if tg == "image" else "text"
if not part.data or not part.data.strip():
continue
contentObjects.append({
"contentObjectId": part.id,
"contentType": ct,
"data": part.data,
"contextRef": {
"containerPath": fileName,
"location": part.label or "file",
**(part.metadata or {}),
},
})
if contentObjects:
if knowledgeService:
try:
userId = context.get("userId", "")
_fiId, _mId = _resolveFileScope(fileId, context)
await knowledgeService.indexFile(
fileId=fileId, fileName=fileName, mimeType=mimeType,
userId=userId, contentObjects=contentObjects,
featureInstanceId=_fiId,
mandateId=_mId,
)
except Exception:
pass
joined = ""
if knowledgeService:
_chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
_textChunks = [
c for c in (_chunks or [])
if c.get("contentType") != "image" and c.get("data")
]
if _textChunks:
joined = "\n\n".join(c["data"] for c in _textChunks)
if not joined:
textParts = [o["data"] for o in contentObjects if o["contentType"] != "image"]
joined = "\n\n".join(textParts) if textParts else ""
if joined:
chunked = _applyOffsetLimit(joined, offset, limit)
if chunked is not None:
return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
if len(joined) > _MAX_TOOL_RESULT_CHARS:
joined = joined[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(joined)}. Use offset/limit to read specific sections.]"
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data=joined,
)
imgCount = sum(1 for o in contentObjects if o["contentType"] == "image")
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data=f"[Extracted {len(contentObjects)} content objects from '{fileName}' "
f"({imgCount} images, no readable text). "
f"Use describeImage(fileId='{fileId}') to analyze visual content.]",
)
except Exception as extractErr:
logger.warning(f"readFile extraction failed for {fileId} ({fileName}): {extractErr}")
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data=f"[Binary file: '{fileName}', type={mimeType}, size={len(rawBytes)} bytes. "
f"Text extraction not available. Use describeImage for images.]",
)
# 3) Text file: decode raw bytes
for encoding in ("utf-8", "utf-8-sig", "latin-1"):
try:
text = rawBytes.decode(encoding)
if text.strip():
_fileNeedNeutralize = False
try:
from modules.datamodels.datamodelFiles import FileItem as _FI
from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO
_fRec = _CO().db._loadRecord(_FI, fileId)
if _fRec:
_fG = (lambda k, d=None: _fRec.get(k, d)) if isinstance(_fRec, dict) else (lambda k, d=None: getattr(_fRec, k, d))
_fileNeedNeutralize = bool(_fG("neutralize", False))
except Exception:
pass
if _fileNeedNeutralize:
try:
_nSvc = services.getService("neutralization") if hasattr(services, "getService") else None
if _nSvc and hasattr(_nSvc, 'processTextAsync'):
_nResult = await _nSvc.processTextAsync(text, fileId)
if _nResult and _nResult.get("neutralized_text"):
text = _nResult["neutralized_text"]
logger.debug(f"readFile: neutralized text for file {fileId}")
else:
logger.warning(f"readFile: neutralization failed for file {fileId}, blocking text (fail-safe)")
return ToolResult(toolCallId="", toolName="readFile", success=True,
data="[File requires neutralization but neutralization failed. Content blocked for data protection.]")
else:
logger.warning(f"readFile: neutralization required but service unavailable for file {fileId}")
return ToolResult(toolCallId="", toolName="readFile", success=True,
data="[File requires neutralization but service unavailable. Content blocked for data protection.]")
except Exception as _nErr:
logger.error(f"readFile: neutralization error for file {fileId}: {_nErr}")
return ToolResult(toolCallId="", toolName="readFile", success=True,
data="[File requires neutralization but an error occurred. Content blocked for data protection.]")
chunked = _applyOffsetLimit(text, offset, limit)
if chunked is not None:
return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
if len(text) > _MAX_TOOL_RESULT_CHARS:
text = text[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(text)}. Use offset/limit to read specific sections.]"
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data=text,
)
except (UnicodeDecodeError, ValueError):
continue
return ToolResult(
toolCallId="", toolName="readFile", success=True,
data="File is empty or could not be decoded.",
)
except Exception as e:
return ToolResult(toolCallId="", toolName="readFile", success=False, error=str(e))
async def _listFiles(args: Dict[str, Any], context: Dict[str, Any]):
try:
chatService = services.chat
files = chatService.listFiles(
folderId=args.get("folderId"),
tags=args.get("tags"),
search=args.get("search"),
)
fileList = "\n".join(
f"- {f.get('fileName', 'unknown')} (id: {f.get('id', '?')}, "
f"type: {f.get('mimeType', '?')}, size: {f.get('fileSize', '?')}, "
f"tags: {f.get('tags', [])}, status: {f.get('status', 'n/a')})"
for f in files
) if files else "No files found."
return ToolResult(toolCallId="", toolName="listFiles", success=True, data=fileList)
except Exception as e:
return ToolResult(toolCallId="", toolName="listFiles", success=False, error=str(e))
async def _searchInFileContent(args: Dict[str, Any], context: Dict[str, Any]):
import re as _re
fileId = args.get("fileId", "")
query = args.get("query", "")
contextLines = args.get("contextLines", 2)
if not fileId or not query:
return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error="fileId and query are required")
try:
chatService = services.chat
rawBytes = chatService.getFileData(fileId)
if not rawBytes:
return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error="File data not accessible")
try:
content = rawBytes.decode("utf-8")
except UnicodeDecodeError:
content = rawBytes.decode("latin-1", errors="replace")
lines = content.split("\n")
pattern = _re.compile(_re.escape(query), _re.IGNORECASE)
matches = []
for i, line in enumerate(lines):
if pattern.search(line):
start = max(0, i - contextLines)
end = min(len(lines), i + contextLines + 1)
snippet = "\n".join(f"{j + 1}|{lines[j]}" for j in range(start, end))
matches.append(snippet)
if not matches:
return ToolResult(toolCallId="", toolName="searchInFileContent", success=True,
data=f"No matches for '{query}' in file.")
shown = matches[:20]
resultText = f"Found {len(matches)} match(es) for '{query}':\n\n" + "\n---\n".join(shown)
if len(matches) > 20:
resultText += f"\n\n... and {len(matches) - 20} more matches"
return ToolResult(toolCallId="", toolName="searchInFileContent", success=True, data=resultText)
except Exception as e:
return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error=str(e))
async def _listFolders(args: Dict[str, Any], context: Dict[str, Any]):
try:
chatService = services.chat
folders = chatService.listFolders(parentId=args.get("parentId"))
folderList = "\n".join(
f"- {f.get('name', 'unnamed')} (id: {f.get('id', '?')})"
for f in folders
) if folders else "No folders found."
return ToolResult(toolCallId="", toolName="listFolders", success=True, data=folderList)
except Exception as e:
return ToolResult(toolCallId="", toolName="listFolders", success=False, error=str(e))
async def _webSearch(args: Dict[str, Any], context: Dict[str, Any]):
query = args.get("query", "")
if not query:
return ToolResult(toolCallId="", toolName="webSearch", success=False, error="query is required")
try:
webService = services.getService("web")
result = await webService.performWebResearch(
prompt=query,
urls=[],
country=None,
language=args.get("language"),
)
summary = result.get("summary", "") if isinstance(result, dict) else str(result)
return ToolResult(
toolCallId="", toolName="webSearch", success=True,
data=summary or str(result)
)
except Exception as e:
return ToolResult(toolCallId="", toolName="webSearch", success=False, error=str(e))
# ---- Write tools ----
async def _tagFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
tags = args.get("tags", [])
if not fileId:
return ToolResult(toolCallId="", toolName="tagFile", success=False, error="fileId is required")
try:
chatService = services.chat
chatService.interfaceDbComponent.updateFile(fileId, {"tags": tags})
return ToolResult(
toolCallId="", toolName="tagFile", success=True,
data=f"Tags updated to {tags} for file {fileId}"
)
except Exception as e:
return ToolResult(toolCallId="", toolName="tagFile", success=False, error=str(e))
async def _moveFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
targetFolderId = args.get("targetFolderId")
if not fileId:
return ToolResult(toolCallId="", toolName="moveFile", success=False, error="fileId is required")
try:
chatService = services.chat
chatService.interfaceDbComponent.updateFile(fileId, {"folderId": targetFolderId})
return ToolResult(
toolCallId="", toolName="moveFile", success=True,
data=f"File {fileId} moved to folder {targetFolderId or 'root'}"
)
except Exception as e:
return ToolResult(toolCallId="", toolName="moveFile", success=False, error=str(e))
async def _createFolder(args: Dict[str, Any], context: Dict[str, Any]):
name = args.get("name", "")
if not name:
return ToolResult(toolCallId="", toolName="createFolder", success=False, error="name is required")
try:
chatService = services.chat
folder = chatService.createFolder(name=name, parentId=args.get("parentId"))
return ToolResult(
toolCallId="", toolName="createFolder", success=True,
data=f"Folder '{name}' created (id: {folder.get('id', '?')})"
)
except Exception as e:
return ToolResult(toolCallId="", toolName="createFolder", success=False, error=str(e))
async def _writeFile(args: Dict[str, Any], context: Dict[str, Any]):
content = args.get("content", "")
mode = args.get("mode", "create")
fileId = args.get("fileId", "")
name = args.get("name", "")
if not content:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error="content is required")
try:
chatService = services.chat
dbMgmt = chatService.interfaceDbComponent
if mode == "append":
if not fileId:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error="fileId is required for mode=append")
file = dbMgmt.getFile(fileId)
if not file:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error=f"File {fileId} not found")
existingData = dbMgmt.getFileData(fileId) or b""
try:
existingText = existingData.decode("utf-8")
except UnicodeDecodeError:
existingText = existingData.decode("latin-1", errors="replace")
newContent = existingText + content
dbMgmt.updateFileData(fileId, newContent.encode("utf-8"))
dbMgmt.updateFile(fileId, {"fileSize": len(newContent.encode("utf-8"))})
return ToolResult(
toolCallId="", toolName="writeFile", success=True,
data=f"Appended {len(content)} chars to '{file.fileName}' (id: {fileId}, total: {len(newContent)} chars)",
sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": file.fileName}}],
)
if mode == "overwrite":
if not fileId:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error="fileId is required for mode=overwrite")
file = dbMgmt.getFile(fileId)
if not file:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error=f"File {fileId} not found")
dbMgmt.updateFileData(fileId, content.encode("utf-8"))
dbMgmt.updateFile(fileId, {"fileSize": len(content.encode("utf-8"))})
return ToolResult(
toolCallId="", toolName="writeFile", success=True,
data=f"Overwritten '{file.fileName}' (id: {fileId}, {len(content)} chars)",
sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": file.fileName}}],
)
# mode == "create" (default)
if not name:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error="name is required for mode=create")
fileItem, _ = dbMgmt.saveUploadedFile(content.encode("utf-8"), name)
fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
if fiId:
dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId})
if args.get("folderId"):
dbMgmt.updateFile(fileItem.id, {"folderId": args["folderId"]})
if args.get("tags"):
dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]})
return ToolResult(
toolCallId="", toolName="writeFile", success=True,
data=f"File '{name}' created (id: {fileItem.id})",
sideEvents=[{
"type": "fileCreated",
"data": {
"fileId": fileItem.id,
"fileName": name,
"mimeType": fileItem.mimeType,
"fileSize": fileItem.fileSize,
},
}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="writeFile", success=False, error=str(e))
# ---- Register all tools ----
registry.register(
"readFile", _readFile,
description=(
"Read the content of a file. Returns full content by default. "
"For large files, use offset and limit to read specific line ranges. "
"When truncated, the response tells the total line count so you can paginate."
),
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to read"},
"offset": {"type": "integer", "description": "Start reading from this line number (1-based). Omit for full file."},
"limit": {"type": "integer", "description": "Max number of lines to return (default: all). Use with offset for chunked reading."},
},
"required": ["fileId"]
},
readOnly=True
)
registry.register(
"listFiles", _listFiles,
description=(
"List files in the local workspace. Filter by folder, tags, or search term. "
"For external data sources, use browseDataSource instead."
),
parameters={
"type": "object",
"properties": {
"folderId": {"type": "string", "description": "Filter by folder ID"},
"tags": {"type": "array", "items": {"type": "string"}, "description": "Filter by tags (any match)"},
"search": {"type": "string", "description": "Search in file names and descriptions"},
}
},
readOnly=True
)
registry.register(
"searchInFileContent", _searchInFileContent,
description=(
"Search for text within a file's content. Returns matching lines with context. "
"Case-insensitive. Use to locate specific text before using replaceInFile, "
"or to find relevant sections in a large file before reading with offset/limit."
),
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to search in"},
"query": {"type": "string", "description": "Text to search for (case-insensitive)"},
"contextLines": {"type": "integer", "description": "Number of context lines around each match (default: 2)"},
},
"required": ["fileId", "query"]
},
readOnly=True
)
registry.register(
"listFolders", _listFolders,
description="List folders in the local workspace. For external data sources, use browseDataSource instead.",
parameters={
"type": "object",
"properties": {
"parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
}
},
readOnly=True
)
registry.register(
"webSearch", _webSearch,
description="Search the web for general information. Use readUrl to fetch content from a known URL instead.",
parameters={
"type": "object",
"properties": {"query": {"type": "string", "description": "Search query"}},
"required": ["query"]
},
readOnly=True
)
registry.register(
"tagFile", _tagFile,
description="Set or update tags on a file for categorization and filtering via listFiles.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID"},
"tags": {"type": "array", "items": {"type": "string"}, "description": "Tags to set"},
},
"required": ["fileId", "tags"]
},
readOnly=False
)
registry.register(
"moveFile", _moveFile,
description="Move a file to a different folder in the local workspace.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to move"},
"targetFolderId": {"type": "string", "description": "Target folder ID (null for root)"},
},
"required": ["fileId"]
},
readOnly=False
)
registry.register(
"createFolder", _createFolder,
description="Create a new folder in the local workspace.",
parameters={
"type": "object",
"properties": {
"name": {"type": "string", "description": "Folder name"},
"parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
},
"required": ["name"]
},
readOnly=False
)
registry.register(
"writeFile", _writeFile,
description=(
"Create, append, or overwrite a file. Modes:\n"
"- create (default): create a new file (name required).\n"
"- append: append content to an existing file (fileId required). "
"Use for large content that exceeds a single tool call (~8000 chars per call).\n"
"- overwrite: replace entire file content (fileId required)."
),
parameters={
"type": "object",
"properties": {
"name": {"type": "string", "description": "File name (required for mode=create)"},
"content": {"type": "string", "description": "Content to write/append"},
"mode": {"type": "string", "enum": ["create", "append", "overwrite"], "description": "Write mode (default: create)"},
"fileId": {"type": "string", "description": "File ID (required for mode=append/overwrite)"},
"folderId": {"type": "string", "description": "Target folder ID (mode=create only)"},
"tags": {"type": "array", "items": {"type": "string"}, "description": "Tags (mode=create only)"},
},
"required": ["content"]
},
readOnly=False
)
# ---- Phase 1: deleteFile, renameFile, readUrl, translateText ----
async def _deleteFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
if not fileId:
return ToolResult(toolCallId="", toolName="deleteFile", success=False, error="fileId is required")
try:
chatService = services.chat
file = chatService.interfaceDbComponent.getFile(fileId)
if not file:
return ToolResult(toolCallId="", toolName="deleteFile", success=False, error=f"File {fileId} not found")
fileName = file.fileName
try:
knowledgeService = services.getService("knowledge")
if knowledgeService and hasattr(knowledgeService, "removeFile"):
knowledgeService.removeFile(fileId)
except Exception:
pass
chatService.interfaceDbComponent.deleteFile(fileId)
return ToolResult(
toolCallId="", toolName="deleteFile", success=True,
data=f"File '{fileName}' (id: {fileId}) deleted",
sideEvents=[{"type": "fileDeleted", "data": {"fileId": fileId, "fileName": fileName}}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="deleteFile", success=False, error=str(e))
async def _renameFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
newName = args.get("newName", "")
if not fileId or not newName:
return ToolResult(toolCallId="", toolName="renameFile", success=False, error="fileId and newName are required")
try:
chatService = services.chat
chatService.interfaceDbComponent.updateFile(fileId, {"fileName": newName})
return ToolResult(
toolCallId="", toolName="renameFile", success=True,
data=f"File {fileId} renamed to '{newName}'",
sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": newName}}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="renameFile", success=False, error=str(e))
async def _readUrl(args: Dict[str, Any], context: Dict[str, Any]):
url = args.get("url", "")
if not url:
return ToolResult(toolCallId="", toolName="readUrl", success=False, error="url is required")
try:
webService = services.getService("web")
result = await webService._performWebCrawl(
instruction="Extract all content from this page",
urls=[url],
maxDepth=1,
maxWidth=1,
)
if isinstance(result, list) and result:
content = "\n\n".join(
item.get("content", "") or item.get("text", "") or str(item)
for item in result if item
)
elif isinstance(result, dict):
content = result.get("content", "") or result.get("summary", "") or str(result)
else:
content = str(result) if result else "No content retrieved"
_MAX = 30000
if len(content) > _MAX:
content = content[:_MAX] + f"\n\n... (truncated at {_MAX} chars)"
return ToolResult(toolCallId="", toolName="readUrl", success=True, data=content)
except Exception as e:
return ToolResult(toolCallId="", toolName="readUrl", success=False, error=str(e))
async def _translateText(args: Dict[str, Any], context: Dict[str, Any]):
text = args.get("text", "")
targetLanguage = args.get("targetLanguage", "")
if not text or not targetLanguage:
return ToolResult(toolCallId="", toolName="translateText", success=False, error="text and targetLanguage are required")
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
mandateId = context.get("mandateId", "")
voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
sourceLanguage = args.get("sourceLanguage", "auto")
result = await voiceInterface.translateText(text, sourceLanguage=sourceLanguage, targetLanguage=targetLanguage)
if result and result.get("success"):
translated = result.get("translated_text", "")
return ToolResult(toolCallId="", toolName="translateText", success=True, data=translated)
return ToolResult(toolCallId="", toolName="translateText", success=False, error=result.get("error", "Translation failed"))
except Exception as e:
return ToolResult(toolCallId="", toolName="translateText", success=False, error=str(e))
registry.register(
"deleteFile", _deleteFile,
description="Permanently delete a file from the local workspace.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to delete"},
},
"required": ["fileId"]
},
readOnly=False
)
registry.register(
"renameFile", _renameFile,
description="Rename a file in the local workspace. Include the file extension in the new name.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to rename"},
"newName": {"type": "string", "description": "New file name including extension"},
},
"required": ["fileId", "newName"]
},
readOnly=False
)
registry.register(
"readUrl", _readUrl,
description=(
"Read and extract content from a specific URL. "
"Use when the user provides a specific URL to read, or when you need to fetch content from a known web page. "
"For general information searches, use webSearch instead."
),
parameters={
"type": "object",
"properties": {
"url": {"type": "string", "description": "The URL to read"},
},
"required": ["url"]
},
readOnly=True
)
registry.register(
"translateText", _translateText,
description=(
"Translate text to a target language using Google Cloud Translation. "
"More efficient than AI translation for large text volumes. "
"Use ISO language codes (e.g. 'en', 'de', 'fr', 'es', 'it', 'pt', 'zh', 'ja', 'ko', 'ar')."
),
parameters={
"type": "object",
"properties": {
"text": {"type": "string", "description": "Text to translate"},
"targetLanguage": {"type": "string", "description": "Target language ISO code (e.g. 'en', 'de', 'fr')"},
"sourceLanguage": {"type": "string", "description": "Source language ISO code (default: auto-detect)"},
},
"required": ["text", "targetLanguage"]
},
readOnly=True
)
# ---- Phase 2: deleteFolder, renameFolder, moveFolder, copyFile, editFile ----
async def _deleteFolder(args: Dict[str, Any], context: Dict[str, Any]):
folderId = args.get("folderId", "")
recursive = args.get("recursive", False)
if not folderId:
return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error="folderId is required")
try:
chatService = services.chat
result = chatService.interfaceDbComponent.deleteFolder(folderId, recursive=recursive)
summary = f"Deleted {result.get('deletedFolders', 1)} folder(s) and {result.get('deletedFiles', 0)} file(s)"
return ToolResult(
toolCallId="", toolName="deleteFolder", success=True, data=summary,
sideEvents=[{"type": "folderDeleted", "data": {"folderId": folderId, **result}}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error=str(e))
async def _renameFolder(args: Dict[str, Any], context: Dict[str, Any]):
folderId = args.get("folderId", "")
newName = args.get("newName", "")
if not folderId or not newName:
return ToolResult(toolCallId="", toolName="renameFolder", success=False, error="folderId and newName are required")
try:
chatService = services.chat
chatService.interfaceDbComponent.renameFolder(folderId, newName)
return ToolResult(
toolCallId="", toolName="renameFolder", success=True,
data=f"Folder {folderId} renamed to '{newName}'",
sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "name": newName}}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="renameFolder", success=False, error=str(e))
async def _moveFolder(args: Dict[str, Any], context: Dict[str, Any]):
folderId = args.get("folderId", "")
targetParentId = args.get("targetParentId")
if not folderId:
return ToolResult(toolCallId="", toolName="moveFolder", success=False, error="folderId is required")
try:
chatService = services.chat
chatService.interfaceDbComponent.moveFolder(folderId, targetParentId)
return ToolResult(
toolCallId="", toolName="moveFolder", success=True,
data=f"Folder {folderId} moved to {targetParentId or 'root'}",
sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "parentId": targetParentId}}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="moveFolder", success=False, error=str(e))
async def _copyFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
if not fileId:
return ToolResult(toolCallId="", toolName="copyFile", success=False, error="fileId is required")
try:
chatService = services.chat
copiedFile = chatService.interfaceDbComponent.copyFile(
fileId,
targetFolderId=args.get("targetFolderId"),
newFileName=args.get("newFileName"),
)
return ToolResult(
toolCallId="", toolName="copyFile", success=True,
data=f"File copied as '{copiedFile.fileName}' (id: {copiedFile.id})",
sideEvents=[{
"type": "fileCreated",
"data": {"fileId": copiedFile.id, "fileName": copiedFile.fileName,
"mimeType": copiedFile.mimeType, "fileSize": copiedFile.fileSize},
}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="copyFile", success=False, error=str(e))
async def _replaceInFile(args: Dict[str, Any], context: Dict[str, Any]):
fileId = args.get("fileId", "")
oldText = args.get("oldText", "")
newText = args.get("newText", "")
replaceAll = args.get("replaceAll", False)
if not fileId or not oldText:
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="fileId and oldText are required")
try:
chatService = services.chat
dbMgmt = chatService.interfaceDbComponent
file = dbMgmt.getFile(fileId)
if not file:
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=f"File {fileId} not found")
if not dbMgmt.isTextMimeType(file.mimeType):
return ToolResult(
toolCallId="", toolName="replaceInFile", success=False,
error=f"Cannot edit binary file ({file.mimeType}). Only text-based files are supported."
)
rawData = dbMgmt.getFileData(fileId)
if not rawData:
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="File has no content")
try:
oldContent = rawData.decode("utf-8")
except UnicodeDecodeError:
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="File content is not valid UTF-8 text")
count = oldContent.count(oldText)
if count == 0:
return ToolResult(
toolCallId="", toolName="replaceInFile", success=False,
error="oldText not found in file. Use readFile or searchInFileContent to verify the exact text."
)
if count > 1 and not replaceAll:
return ToolResult(
toolCallId="", toolName="replaceInFile", success=False,
error=f"oldText found {count} times. Set replaceAll=true or provide more surrounding context to make it unique."
)
newContent = oldContent.replace(oldText, newText) if replaceAll else oldContent.replace(oldText, newText, 1)
editId = str(_uuid.uuid4())
label = f"all {count} occurrences" if replaceAll else "1 occurrence"
return ToolResult(
toolCallId="", toolName="replaceInFile", success=True,
data=f"Edit proposed for '{file.fileName}': replaced {label}. Waiting for user review.",
sideEvents=[{
"type": "fileEditProposal",
"data": {
"id": editId,
"fileId": fileId,
"fileName": file.fileName,
"mimeType": file.mimeType,
"oldContent": oldContent,
"newContent": newContent,
},
}],
)
except Exception as e:
return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=str(e))
registry.register(
"deleteFolder", _deleteFolder,
description="Delete a folder from the local workspace. Set recursive=true to delete all contents.",
parameters={
"type": "object",
"properties": {
"folderId": {"type": "string", "description": "The folder ID to delete"},
"recursive": {"type": "boolean", "description": "If true, delete folder and all contents (files and subfolders). Default: false"},
},
"required": ["folderId"]
},
readOnly=False
)
registry.register(
"renameFolder", _renameFolder,
description="Rename a folder in the local workspace.",
parameters={
"type": "object",
"properties": {
"folderId": {"type": "string", "description": "The folder ID to rename"},
"newName": {"type": "string", "description": "New folder name"},
},
"required": ["folderId", "newName"]
},
readOnly=False
)
registry.register(
"moveFolder", _moveFolder,
description="Move a folder to a different parent in the local workspace.",
parameters={
"type": "object",
"properties": {
"folderId": {"type": "string", "description": "The folder ID to move"},
"targetParentId": {"type": "string", "description": "Target parent folder ID (null/omit for root)"},
},
"required": ["folderId"]
},
readOnly=False
)
registry.register(
"copyFile", _copyFile,
description="Create an independent copy of a file in the local workspace.",
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to copy"},
"targetFolderId": {"type": "string", "description": "Target folder for the copy (default: same folder)"},
"newFileName": {"type": "string", "description": "New file name (default: same name, auto-numbered if duplicate)"},
},
"required": ["fileId"]
},
readOnly=False
)
registry.register(
"replaceInFile", _replaceInFile,
description=(
"Replace specific text in an existing file. The edit is shown to the user for "
"review (accept/reject) before being applied. Provide enough surrounding context "
"in oldText to make the match unique (at least 2-3 lines). "
"Use readFile or searchInFileContent first to identify the exact text to replace."
),
parameters={
"type": "object",
"properties": {
"fileId": {"type": "string", "description": "The file ID to edit"},
"oldText": {"type": "string", "description": "Exact text to find and replace (must be unique unless replaceAll=true)"},
"newText": {"type": "string", "description": "The replacement text"},
"replaceAll": {"type": "boolean", "description": "Replace all occurrences (default: false)"},
},
"required": ["fileId", "oldText", "newText"]
},
readOnly=False
)

View file

@ -0,0 +1,27 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""Orchestrator: registers all core agent tools by delegating to domain modules."""
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.coreTools._workspaceTools import _registerWorkspaceTools
from modules.serviceCenter.services.serviceAgent.coreTools._connectionTools import _registerConnectionTools
from modules.serviceCenter.services.serviceAgent.coreTools._dataSourceTools import _registerDataSourceTools
from modules.serviceCenter.services.serviceAgent.coreTools._documentTools import _registerDocumentTools
from modules.serviceCenter.services.serviceAgent.coreTools._mediaTools import _registerMediaTools
from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import _registerFeatureSubAgentTools
from modules.serviceCenter.services.serviceAgent.coreTools._crossWorkflowTools import _registerCrossWorkflowTools
def registerCoreTools(registry: ToolRegistry, services):
"""Register all built-in core tools on the agent ToolRegistry.
Delegates to domain-specific modules under coreTools/.
"""
_registerWorkspaceTools(registry, services)
_registerConnectionTools(registry, services)
_registerDataSourceTools(registry, services)
_registerDocumentTools(registry, services)
_registerMediaTools(registry, services)
_registerFeatureSubAgentTools(registry, services)
_registerCrossWorkflowTools(registry, services)

View file

@ -88,6 +88,8 @@ class AgentConfig(BaseModel):
maxRounds: int = Field(default=25, ge=1, le=100)
maxCostCHF: Optional[float] = Field(default=None, ge=0.0)
toolSet: str = Field(default="core")
initialToolboxes: List[str] = Field(default_factory=lambda: ["core"])
availableToolboxes: List[str] = Field(default_factory=list)
temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,183 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Toolbox Registry for the Agent service.
Manages thematic tool groupings (toolboxes) and the `requestToolbox` meta-tool.
"""
import logging
from typing import Dict, List, Any, Optional, Set
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
class ToolboxDefinition(BaseModel):
"""Definition of a thematic toolbox."""
id: str = Field(description="Unique toolbox identifier (e.g. 'core', 'email', 'workflow')")
label: str = Field(description="Human-readable label")
description: str = Field(default="", description="What this toolbox provides")
featureCode: Optional[str] = Field(default=None, description="Feature code if toolbox is feature-specific")
tools: List[str] = Field(default_factory=list, description="Tool names belonging to this toolbox")
isDefault: bool = Field(default=False, description="If true, toolbox is active by default")
requiresConnection: Optional[str] = Field(
default=None,
description="Connection authority required (e.g. 'microsoft', 'google'). None = always available."
)
class ToolboxRegistry:
"""Registry for toolbox definitions. Manages activation and tool lookup."""
def __init__(self):
self._toolboxes: Dict[str, ToolboxDefinition] = {}
def registerToolbox(self, toolbox: ToolboxDefinition) -> None:
"""Register a toolbox definition."""
if toolbox.id in self._toolboxes:
logger.debug("Toolbox '%s' already registered, updating", toolbox.id)
self._toolboxes[toolbox.id] = toolbox
logger.debug("Registered toolbox: %s (%d tools, default=%s)", toolbox.id, len(toolbox.tools), toolbox.isDefault)
def getToolbox(self, toolboxId: str) -> Optional[ToolboxDefinition]:
"""Get a toolbox by ID."""
return self._toolboxes.get(toolboxId)
def getAllToolboxes(self) -> List[ToolboxDefinition]:
"""Get all registered toolboxes."""
return list(self._toolboxes.values())
def getDefaultToolboxes(self) -> List[ToolboxDefinition]:
"""Get all default toolboxes (active at agent start)."""
return [tb for tb in self._toolboxes.values() if tb.isDefault]
def getActiveToolboxes(self, userConnections: List[str] = None) -> List[ToolboxDefinition]:
"""
Get toolboxes available to the user based on their connections.
Toolboxes without requiresConnection are always available.
Toolboxes with requiresConnection are available only if the user has that connection.
"""
available = []
connectionAuthorities: Set[str] = set(userConnections or [])
for tb in self._toolboxes.values():
if tb.requiresConnection is None:
available.append(tb)
elif tb.requiresConnection in connectionAuthorities:
available.append(tb)
return available
def getToolsForToolboxes(self, toolboxIds: List[str]) -> List[str]:
"""Get the union of all tool names for the given toolbox IDs."""
tools: Set[str] = set()
for tbId in toolboxIds:
tb = self._toolboxes.get(tbId)
if tb:
tools.update(tb.tools)
return sorted(tools)
def getToolboxForTool(self, toolName: str) -> Optional[str]:
"""Find which toolbox a tool belongs to."""
for tb in self._toolboxes.values():
if toolName in tb.tools:
return tb.id
return None
def toApiResponse(self, userConnections: List[str] = None) -> List[Dict[str, Any]]:
"""Serialize available toolboxes for API response."""
available = self.getActiveToolboxes(userConnections)
return [
{
"id": tb.id,
"label": tb.label,
"description": tb.description,
"toolCount": len(tb.tools),
"isDefault": tb.isDefault,
"requiresConnection": tb.requiresConnection,
}
for tb in available
]
# Module-level singleton
_toolboxRegistry = ToolboxRegistry()
def getToolboxRegistry() -> ToolboxRegistry:
"""Get the global toolbox registry singleton."""
return _toolboxRegistry
def _registerDefaultToolboxes() -> None:
"""Register the default set of toolboxes."""
defaults = [
ToolboxDefinition(
id="core",
label="Core Tools",
description="Basic agent tools: search, read, write, web",
isDefault=True,
tools=[],
),
ToolboxDefinition(
id="ai",
label="AI Tools",
description="AI-powered analysis and generation",
isDefault=True,
tools=[],
),
ToolboxDefinition(
id="datasources",
label="Data Sources",
description="Access external data sources and databases",
isDefault=False,
tools=[],
),
ToolboxDefinition(
id="email",
label="Email",
description="Read and send emails via Outlook/Gmail",
requiresConnection="microsoft",
isDefault=False,
tools=[],
),
ToolboxDefinition(
id="sharepoint",
label="SharePoint",
description="Access SharePoint sites, lists, and files",
requiresConnection="microsoft",
isDefault=False,
tools=[],
),
ToolboxDefinition(
id="clickup",
label="ClickUp",
description="Manage ClickUp tasks and projects",
requiresConnection="clickup",
isDefault=False,
tools=[],
),
ToolboxDefinition(
id="jira",
label="Jira",
description="Manage Jira issues and projects",
requiresConnection="jira",
isDefault=False,
tools=[],
),
ToolboxDefinition(
id="workflow",
label="Workflow",
description="Graph manipulation tools for the visual editor",
featureCode="graphicalEditor",
isDefault=False,
tools=[
"readWorkflowGraph", "addNode", "removeNode", "connectNodes",
"setNodeParameter", "listAvailableNodeTypes", "validateGraph",
"listWorkflowHistory", "readWorkflowMessages",
],
),
]
for tb in defaults:
_toolboxRegistry.registerToolbox(tb)
_registerDefaultToolboxes()

View file

@ -0,0 +1,479 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Workflow Toolbox - AI-assisted graph manipulation tools for the GraphicalEditor.
Tools: readWorkflowGraph, addNode, removeNode, connectNodes, setNodeParameter,
listAvailableNodeTypes, validateGraph, listWorkflowHistory, readWorkflowMessages.
"""
import logging
import uuid
from typing import Dict, Any, List, Optional
from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
logger = logging.getLogger(__name__)
TOOLBOX_ID = "workflow"
async def _readWorkflowGraph(params: Dict[str, Any], context: Any) -> ToolResult:
"""Read the current workflow graph (nodes and connections)."""
try:
workflowId = params.get("workflowId")
instanceId = params.get("instanceId")
if not workflowId or not instanceId:
return ToolResult(success=False, error="workflowId and instanceId required")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
return ToolResult(success=False, error=f"Workflow {workflowId} not found")
graph = wf.get("graph", {})
nodes = graph.get("nodes", [])
connections = graph.get("connections", [])
return ToolResult(
success=True,
data={
"workflowId": workflowId,
"label": wf.get("label", ""),
"nodeCount": len(nodes),
"connectionCount": len(connections),
"nodes": [{"id": n.get("id"), "type": n.get("type"), "title": n.get("title", "")} for n in nodes],
"connections": connections,
},
)
except Exception as e:
logger.exception("readWorkflowGraph failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _addNode(params: Dict[str, Any], context: Any) -> ToolResult:
"""Add a node to the workflow graph."""
try:
workflowId = params.get("workflowId")
instanceId = params.get("instanceId")
nodeType = params.get("nodeType")
if not workflowId or not instanceId or not nodeType:
return ToolResult(success=False, error="workflowId, instanceId, and nodeType required")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
return ToolResult(success=False, error=f"Workflow {workflowId} not found")
graph = dict(wf.get("graph", {}))
nodes = list(graph.get("nodes", []))
nodeId = params.get("nodeId") or str(uuid.uuid4())[:8]
title = params.get("title", "")
nodeParams = params.get("parameters", {})
position = params.get("position", {"x": len(nodes) * 200, "y": 100})
newNode = {
"id": nodeId,
"type": nodeType,
"title": title,
"parameters": nodeParams,
"position": position,
}
nodes.append(newNode)
graph["nodes"] = nodes
iface.updateWorkflow(workflowId, {"graph": graph})
return ToolResult(
success=True,
data={"nodeId": nodeId, "nodeType": nodeType, "message": f"Node '{title or nodeType}' added"},
)
except Exception as e:
logger.exception("addNode failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _removeNode(params: Dict[str, Any], context: Any) -> ToolResult:
"""Remove a node and its connections from the workflow graph."""
try:
workflowId = params.get("workflowId")
instanceId = params.get("instanceId")
nodeId = params.get("nodeId")
if not workflowId or not instanceId or not nodeId:
return ToolResult(success=False, error="workflowId, instanceId, and nodeId required")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
return ToolResult(success=False, error=f"Workflow {workflowId} not found")
graph = dict(wf.get("graph", {}))
nodes = [n for n in graph.get("nodes", []) if n.get("id") != nodeId]
connections = [
c for c in graph.get("connections", [])
if c.get("source") != nodeId and c.get("target") != nodeId
]
graph["nodes"] = nodes
graph["connections"] = connections
iface.updateWorkflow(workflowId, {"graph": graph})
return ToolResult(success=True, data={"nodeId": nodeId, "message": f"Node {nodeId} removed"})
except Exception as e:
logger.exception("removeNode failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _connectNodes(params: Dict[str, Any], context: Any) -> ToolResult:
"""Connect two nodes in the workflow graph."""
try:
workflowId = params.get("workflowId")
instanceId = params.get("instanceId")
sourceId = params.get("sourceId")
targetId = params.get("targetId")
if not workflowId or not instanceId or not sourceId or not targetId:
return ToolResult(success=False, error="workflowId, instanceId, sourceId, and targetId required")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
return ToolResult(success=False, error=f"Workflow {workflowId} not found")
graph = dict(wf.get("graph", {}))
connections = list(graph.get("connections", []))
newConn = {
"source": sourceId,
"target": targetId,
"sourceOutput": params.get("sourceOutput", 0),
"targetInput": params.get("targetInput", 0),
}
connections.append(newConn)
graph["connections"] = connections
iface.updateWorkflow(workflowId, {"graph": graph})
return ToolResult(success=True, data={"connection": newConn, "message": f"Connected {sourceId} -> {targetId}"})
except Exception as e:
logger.exception("connectNodes failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _setNodeParameter(params: Dict[str, Any], context: Any) -> ToolResult:
"""Set a parameter on a node."""
try:
workflowId = params.get("workflowId")
instanceId = params.get("instanceId")
nodeId = params.get("nodeId")
paramName = params.get("parameterName")
paramValue = params.get("parameterValue")
if not workflowId or not instanceId or not nodeId or not paramName:
return ToolResult(success=False, error="workflowId, instanceId, nodeId, and parameterName required")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
return ToolResult(success=False, error=f"Workflow {workflowId} not found")
graph = dict(wf.get("graph", {}))
nodes = list(graph.get("nodes", []))
found = False
for n in nodes:
if n.get("id") == nodeId:
nodeParams = dict(n.get("parameters", {}))
nodeParams[paramName] = paramValue
n["parameters"] = nodeParams
found = True
break
if not found:
return ToolResult(success=False, error=f"Node {nodeId} not found in graph")
graph["nodes"] = nodes
iface.updateWorkflow(workflowId, {"graph": graph})
return ToolResult(success=True, data={"nodeId": nodeId, "parameter": paramName, "message": f"Parameter '{paramName}' set"})
except Exception as e:
logger.exception("setNodeParameter failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _listAvailableNodeTypes(params: Dict[str, Any], context: Any) -> ToolResult:
"""List all available node types for the flow builder."""
try:
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
nodeTypes = [
{"id": n.get("id"), "category": n.get("category"), "label": n.get("label", {}).get("en", n.get("id"))}
for n in STATIC_NODE_TYPES
]
return ToolResult(success=True, data={"nodeTypes": nodeTypes, "count": len(nodeTypes)})
except Exception as e:
logger.exception("listAvailableNodeTypes failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _validateGraph(params: Dict[str, Any], context: Any) -> ToolResult:
"""Validate a workflow graph for common issues."""
try:
workflowId = params.get("workflowId")
instanceId = params.get("instanceId")
if not workflowId or not instanceId:
return ToolResult(success=False, error="workflowId and instanceId required")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf:
return ToolResult(success=False, error=f"Workflow {workflowId} not found")
graph = wf.get("graph", {})
nodes = graph.get("nodes", [])
connections = graph.get("connections", [])
issues: List[str] = []
nodeIds = {n.get("id") for n in nodes}
if not nodes:
issues.append("Graph has no nodes")
hasTrigger = any(n.get("type", "").startswith("trigger.") for n in nodes)
if not hasTrigger:
issues.append("No trigger node found")
for c in connections:
if c.get("source") not in nodeIds:
issues.append(f"Connection source '{c.get('source')}' not found")
if c.get("target") not in nodeIds:
issues.append(f"Connection target '{c.get('target')}' not found")
connectedNodes = set()
for c in connections:
connectedNodes.add(c.get("source"))
connectedNodes.add(c.get("target"))
orphans = [n.get("id") for n in nodes if n.get("id") not in connectedNodes and not n.get("type", "").startswith("trigger.")]
if orphans:
issues.append(f"Orphan nodes (not connected): {', '.join(orphans)}")
return ToolResult(
success=True,
data={
"valid": len(issues) == 0,
"issues": issues,
"nodeCount": len(nodes),
"connectionCount": len(connections),
},
)
except Exception as e:
logger.exception("validateGraph failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _listWorkflowHistory(params: Dict[str, Any], context: Any) -> ToolResult:
"""List versions (history) for a workflow."""
try:
workflowId = params.get("workflowId", "")
instanceId = params.get("instanceId", "")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
versions = iface.getVersions(workflowId)
return ToolResult(
success=True,
data={
"workflowId": workflowId,
"versions": [
{
"id": v.get("id"),
"versionNumber": v.get("versionNumber"),
"status": v.get("status"),
"publishedAt": v.get("publishedAt"),
"publishedBy": v.get("publishedBy"),
}
for v in versions
],
},
)
except Exception as e:
logger.exception("listWorkflowHistory failed: %s", e)
return ToolResult(success=False, error=str(e))
async def _readWorkflowMessages(params: Dict[str, Any], context: Any) -> ToolResult:
"""Read recent run logs/messages for a workflow."""
try:
workflowId = params.get("workflowId", "")
instanceId = params.get("instanceId", "")
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
user = getattr(context, "user", None)
mandateId = getattr(context, "mandateId", "") or ""
iface = getGraphicalEditorInterface(user, mandateId, instanceId)
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoRun
runs = iface.db.getRecordset(AutoRun, recordFilter={"workflowId": workflowId}) or []
runSummaries = []
for r in sorted(runs, key=lambda x: x.get("startedAt") or 0, reverse=True)[:10]:
runSummaries.append({
"runId": r.get("id"),
"status": r.get("status"),
"startedAt": r.get("startedAt"),
"completedAt": r.get("completedAt"),
"error": r.get("error"),
})
return ToolResult(
success=True,
data={"workflowId": workflowId, "recentRuns": runSummaries},
)
except Exception as e:
logger.exception("readWorkflowMessages failed: %s", e)
return ToolResult(success=False, error=str(e))
def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
"""Return tool definitions for registration in the ToolRegistry."""
return [
{
"name": "readWorkflowGraph",
"handler": _readWorkflowGraph,
"description": "Read the current workflow graph (nodes and connections)",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string", "description": "Workflow ID"},
"instanceId": {"type": "string", "description": "Feature instance ID"},
},
"required": ["workflowId", "instanceId"],
},
"toolSet": TOOLBOX_ID,
},
{
"name": "addNode",
"handler": _addNode,
"description": "Add a node to the workflow graph",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
"nodeType": {"type": "string", "description": "Node type (e.g. ai.chat, email.send)"},
"title": {"type": "string", "description": "Human-readable title"},
"parameters": {"type": "object", "description": "Node parameters"},
"position": {"type": "object", "description": "Canvas position {x, y}"},
},
"required": ["workflowId", "instanceId", "nodeType"],
},
"toolSet": TOOLBOX_ID,
},
{
"name": "removeNode",
"handler": _removeNode,
"description": "Remove a node and its connections from the graph",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
"nodeId": {"type": "string", "description": "ID of the node to remove"},
},
"required": ["workflowId", "instanceId", "nodeId"],
},
"toolSet": TOOLBOX_ID,
},
{
"name": "connectNodes",
"handler": _connectNodes,
"description": "Connect two nodes in the graph",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
"sourceId": {"type": "string"},
"targetId": {"type": "string"},
"sourceOutput": {"type": "integer", "default": 0},
"targetInput": {"type": "integer", "default": 0},
},
"required": ["workflowId", "instanceId", "sourceId", "targetId"],
},
"toolSet": TOOLBOX_ID,
},
{
"name": "setNodeParameter",
"handler": _setNodeParameter,
"description": "Set a parameter on a node",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
"nodeId": {"type": "string"},
"parameterName": {"type": "string"},
"parameterValue": {"description": "Value to set (any type)"},
},
"required": ["workflowId", "instanceId", "nodeId", "parameterName", "parameterValue"],
},
"toolSet": TOOLBOX_ID,
},
{
"name": "listAvailableNodeTypes",
"handler": _listAvailableNodeTypes,
"description": "List all available node types for the flow builder",
"parameters": {"type": "object", "properties": {}},
"readOnly": True,
"toolSet": TOOLBOX_ID,
},
{
"name": "validateGraph",
"handler": _validateGraph,
"description": "Validate a workflow graph for common issues",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
},
"required": ["workflowId", "instanceId"],
},
"readOnly": True,
"toolSet": TOOLBOX_ID,
},
{
"name": "listWorkflowHistory",
"handler": _listWorkflowHistory,
"description": "List version history for a workflow (AutoVersion entries)",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
},
"required": ["workflowId", "instanceId"],
},
"readOnly": True,
"toolSet": TOOLBOX_ID,
},
{
"name": "readWorkflowMessages",
"handler": _readWorkflowMessages,
"description": "Read recent run logs and status for a workflow",
"parameters": {
"type": "object",
"properties": {
"workflowId": {"type": "string"},
"instanceId": {"type": "string"},
},
"required": ["workflowId", "instanceId"],
},
"readOnly": True,
"toolSet": TOOLBOX_ID,
},
]

View file

@ -0,0 +1,71 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Subscription handler for GraphicalEditor workflow run failures.
Sends email notifications to subscribed users when a workflow run fails.
"""
from typing import List
from modules.datamodels.datamodelMessaging import (
MessagingEventParameters,
MessagingSubscriptionExecutionResult,
MessagingSubscriptionRegistration,
MessagingChannel,
)
def execute(
eventParameters: MessagingEventParameters,
registrations: List[MessagingSubscriptionRegistration],
messagingService,
) -> MessagingSubscriptionExecutionResult:
"""
Subscription function for GraphicalEditor run failures.
Sends email/SMS to registered users when a workflow run fails.
"""
triggerData = eventParameters.triggerData or {}
workflowId = triggerData.get("workflowId", "Unknown")
workflowLabel = triggerData.get("workflowLabel", workflowId)
runId = triggerData.get("runId", "Unknown")
error = triggerData.get("error", "Unknown error")
mandateId = triggerData.get("mandateId", "")
emailRegistrations = [r for r in registrations if r.channel == MessagingChannel.EMAIL]
smsRegistrations = [r for r in registrations if r.channel == MessagingChannel.SMS]
emailSubject = f"Workflow fehlgeschlagen: {workflowLabel}"
emailMessage = (
f"Ein Workflow-Run ist fehlgeschlagen.\n\n"
f"Workflow: {workflowLabel}\n"
f"Workflow-ID: {workflowId}\n"
f"Run-ID: {runId}\n"
f"Fehler: {error}\n\n"
f"Bitte prüfen Sie den Workflow im Grafischen Editor."
)
smsMessage = f"Workflow '{workflowLabel}' fehlgeschlagen: {error[:100]}"
messagesSent = 0
for reg in emailRegistrations:
sendResult = messagingService.sendMessage(
subject=emailSubject,
message=emailMessage,
registration=reg,
)
if sendResult.success:
messagesSent += 1
for reg in smsRegistrations:
sendResult = messagingService.sendMessage(
subject="",
message=smsMessage,
registration=reg,
)
if sendResult.success:
messagesSent += 1
return MessagingSubscriptionExecutionResult(
success=True,
messagesSent=messagesSent,
)

View file

@ -272,26 +272,6 @@ NAVIGATION_SECTIONS = [
"adminOnly": True,
"sysAdminOnly": True,
},
{
"id": "admin-automation-events",
"objectKey": "ui.admin.automationEvents",
"label": {"en": "Automation Events", "de": "Automation Events", "fr": "Événements d'automatisation"},
"icon": "FaClock",
"path": "/admin/automation-events",
"order": 80,
"adminOnly": True,
"sysAdminOnly": True,
},
{
"id": "admin-automation-logs",
"objectKey": "ui.admin.automationLogs",
"label": {"en": "Execution Logs", "de": "Ausführungsprotokolle", "fr": "Journaux d'exécution"},
"icon": "FaClipboardList",
"path": "/admin/automation-logs",
"order": 85,
"adminOnly": True,
"sysAdminOnly": True,
},
{
"id": "admin-logs",
"objectKey": "ui.admin.logs",
@ -369,7 +349,6 @@ UI_OBJECTS = _buildUiObjectsFromNavigation()
# - data.uam.* → User Access Management (mandantenübergreifend)
# - data.chat.* → Chat/AI-Daten (benutzer-eigen, kein Mandantenkontext)
# - data.files.* → Dateien (benutzer-eigen)
# - data.automation.* → Automation (benutzer-eigen)
# - data.feature.* → Mandanten-/Feature-spezifische Daten (dynamisch)
# =============================================================================
@ -437,12 +416,6 @@ DATA_OBJECTS = [
"label": {"en": "File", "de": "Datei", "fr": "Fichier"},
"meta": {"table": "FileItem", "namespace": "files", "groupDisabled": True}
},
# Automation - benutzer-eigen
{
"objectKey": "data.automation.AutomationDefinition",
"label": {"en": "Automation", "de": "Automatisierung", "fr": "Automatisation"},
"meta": {"table": "AutomationDefinition", "namespace": "automation", "groupDisabled": True}
},
]
# =============================================================================
@ -450,16 +423,6 @@ DATA_OBJECTS = [
# =============================================================================
RESOURCE_OBJECTS = [
{
"objectKey": "resource.store.automation",
"label": {"en": "Store: Automation", "de": "Store: Automation", "fr": "Store: Automatisation"},
"meta": {"category": "store", "featureCode": "automation"}
},
{
"objectKey": "resource.store.automation2",
"label": {"en": "Store: Automation 2", "de": "Store: Automation 2", "fr": "Store: Automatisation 2"},
"meta": {"category": "store", "featureCode": "automation2"}
},
{
"objectKey": "resource.store.teamsbot",
"label": {"en": "Store: Teams Bot", "de": "Store: Teams Bot", "fr": "Store: Teams Bot"},

View file

@ -1,10 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Workflow feature - handles workflow execution, scheduling, and start/stop operations.
"""
from .mainWorkflow import chatStart, chatStop, executeAutomation, syncAutomationEvents, createAutomationEventHandler
__all__ = ['chatStart', 'chatStop', 'executeAutomation', 'syncAutomationEvents', 'createAutomationEventHandler']

View file

@ -1,325 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Main workflow service - handles workflow execution, scheduling, and start/stop operations.
"""
import logging
import json
from typing import Dict, Any, Optional
from modules.datamodels.datamodelChat import ChatWorkflow, UserInputRequest, WorkflowModeEnum
from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition
from modules.datamodels.datamodelUam import User
from modules.shared.timeUtils import getUtcTimestamp
from modules.shared.eventManagement import eventManager
from modules.features.automation.mainAutomation import getAutomationServices
from modules.workflows.workflowManager import WorkflowManager
from .subAutomationUtils import parseScheduleToCron, planToPrompt, replacePlaceholders
logger = logging.getLogger(__name__)
async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode: WorkflowModeEnum, workflowId: Optional[str] = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None, services=None) -> ChatWorkflow:
"""
Starts a new chat or continues an existing one, then launches processing asynchronously.
Args:
currentUser: Current user
userInput: User input request
workflowId: Optional workflow ID to continue existing workflow
workflowMode: Workflow mode (Dynamic, Automation, etc.)
mandateId: Mandate ID (required for billing)
featureInstanceId: Feature instance ID (required for billing)
featureCode: Feature code (e.g., 'automation')
services: Pre-built service hub from the calling feature (required). Each feature must pass its own services.
"""
if services is None:
raise ValueError("services is required: each feature must pass its own service hub (e.g. getAutomationServices)")
try:
# Store allowedProviders in services context for model selection
if hasattr(userInput, 'allowedProviders') and userInput.allowedProviders:
services.allowedProviders = userInput.allowedProviders
logger.info(f"AI provider filter active: {userInput.allowedProviders}")
# Store feature code in services (for billing)
if featureCode:
services.featureCode = featureCode
workflowManager = WorkflowManager(services)
workflow = await workflowManager.workflowStart(userInput, workflowMode, workflowId)
return workflow
except Exception as e:
logger.error(f"Error starting chat: {str(e)}")
raise
async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None, services=None) -> ChatWorkflow:
"""Stops a running chat. Caller must pass services from the owning feature."""
if services is None:
raise ValueError("services is required: each feature must pass its own service hub (e.g. getAutomationServices)")
try:
if featureCode:
services.featureCode = featureCode
workflowManager = WorkflowManager(services)
return await workflowManager.workflowStop(workflowId)
except Exception as e:
logger.error(f"Error stopping chat: {str(e)}")
raise
async def executeAutomation(automationId: str, automation, creatorUser: User, services) -> ChatWorkflow:
"""Execute automation workflow with the creator user's context.
The automation object and creatorUser are resolved by the caller (handler)
using the SysAdmin eventUser. This function does NOT re-load them.
Args:
automationId: ID of automation to execute
automation: Pre-loaded automation object (with system fields like sysCreatedBy)
creatorUser: The user who created the automation (workflow runs in this context)
services: Services instance (used for interfaceDbApp etc.)
Returns:
ChatWorkflow instance created by automation execution
"""
executionStartTime = getUtcTimestamp()
executionLog = {
"timestamp": executionStartTime,
"workflowId": None,
"status": "running",
"messages": []
}
try:
executionLog["messages"].append(f"Started execution at {executionStartTime}")
# Store allowed providers from automation in services context
if hasattr(automation, 'allowedProviders') and automation.allowedProviders:
services.allowedProviders = automation.allowedProviders
logger.debug(f"Automation {automationId} restricted to providers: {automation.allowedProviders}")
# Context comes EXCLUSIVELY from the automation definition
automationMandateId = str(automation.mandateId) if automation.mandateId is not None else None
automationFeatureInstanceId = str(automation.featureInstanceId) if automation.featureInstanceId is not None else None
if not automationMandateId or not automationFeatureInstanceId:
raise ValueError(f"Automation {automationId} missing mandateId or featureInstanceId")
logger.info(f"Executing automation {automationId} as user {creatorUser.id} with mandateId={automationMandateId}, featureInstanceId={automationFeatureInstanceId}")
# 1. Replace placeholders in template to generate plan
template = automation.template or ""
placeholders = automation.placeholders or {}
planJson = replacePlaceholders(template, placeholders)
try:
plan = json.loads(planJson)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse plan JSON after placeholder replacement: {str(e)}")
logger.error(f"Template: {template[:500]}...")
logger.error(f"Placeholders: {placeholders}")
logger.error(f"Generated planJson (first 1000 chars): {planJson[:1000]}")
logger.error(f"Error position: line {e.lineno}, column {e.colno}, char {e.pos}")
if e.pos is not None:
start = max(0, e.pos - 100)
end = min(len(planJson), e.pos + 100)
logger.error(f"Context around error: ...{planJson[start:end]}...")
raise ValueError(f"Invalid JSON after placeholder replacement: {str(e)}")
executionLog["messages"].append("Template placeholders replaced successfully")
executionLog["messages"].append(f"Using creator user: {creatorUser.id}")
# 2. Create UserInputRequest from plan
# Embed plan JSON in prompt for TemplateMode to extract
promptText = planToPrompt(plan)
planJsonStr = json.dumps(plan)
# Embed plan as JSON comment so TemplateMode can extract it
promptWithPlan = f"{promptText}\n\n<!--TEMPLATE_PLAN_START-->\n{planJsonStr}\n<!--TEMPLATE_PLAN_END-->"
userInput = UserInputRequest(
prompt=promptWithPlan,
listFileId=[],
userLanguage=creatorUser.language or "en"
)
executionLog["messages"].append("Starting workflow execution")
# 3. Start workflow using chatStart with creator's context
# mandateId and featureInstanceId come from the automation definition
# Each feature must pass its own services - no fallback
creatorServices = getAutomationServices(
creatorUser,
mandateId=automationMandateId,
featureInstanceId=automationFeatureInstanceId,
)
workflow = await chatStart(
currentUser=creatorUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_AUTOMATION,
workflowId=None,
mandateId=automationMandateId,
featureInstanceId=automationFeatureInstanceId,
featureCode='automation',
services=creatorServices,
)
executionLog["workflowId"] = workflow.id
executionLog["status"] = "completed"
executionLog["messages"].append(f"Workflow {workflow.id} started successfully")
logger.info(f"Started workflow {workflow.id} with plan containing {len(plan.get('tasks', []))} tasks (plan embedded in userInput)")
# Set workflow name with "automated" prefix — use creatorServices from chatStart
automationLabel = automation.label or "Unknown Automation"
workflowName = f"automated: {automationLabel}"
creatorServices.interfaceDbChat.updateWorkflow(workflow.id, {"name": workflowName})
logger.info(f"Set workflow {workflow.id} name to: {workflowName}")
# Save execution log (bypasses RBAC — system operation, not a user edit)
executionLogs = list(automation.executionLogs or [])
executionLogs.append(executionLog)
# Keep only last 50 executions
if len(executionLogs) > 50:
executionLogs = executionLogs[-50:]
services.interfaceDbAutomation._saveExecutionLog(automationId, executionLogs)
return workflow
except Exception as e:
# Log error to execution log
executionLog["status"] = "error"
executionLog["messages"].append(f"Error: {str(e)}")
# Save execution log even on error (bypasses RBAC — system operation)
# Use the automation object already passed in (no re-load needed)
try:
executionLogs = list(getattr(automation, 'executionLogs', None) or [])
executionLogs.append(executionLog)
if len(executionLogs) > 50:
executionLogs = executionLogs[-50:]
services.interfaceDbAutomation._saveExecutionLog(automationId, executionLogs)
except Exception as logError:
logger.error(f"Error saving execution log: {str(logError)}")
raise
def syncAutomationEvents(services, eventUser) -> Dict[str, Any]:
"""Sync scheduler with all active automations.
All operations (DB reads, scheduler registration) are synchronous.
Args:
services: Services instance for data access
eventUser: System-level event user for accessing automations
Returns:
Dictionary with sync results (synced count and event IDs)
"""
# Get all automation definitions filtered by RBAC (for current mandate)
filtered = services.interfaceDbAutomation.getAllAutomationDefinitionsWithRBAC(eventUser)
registeredEvents = {}
for automation in filtered:
# Handle both dict and object access patterns
if isinstance(automation, dict):
automationId = automation.get('id')
isActive = automation.get('active', False)
currentEventId = automation.get('eventId')
schedule = automation.get('schedule')
else:
automationId = automation.id
isActive = automation.active if hasattr(automation, 'active') else False
currentEventId = automation.eventId if hasattr(automation, 'eventId') else None
schedule = automation.schedule if hasattr(automation, 'schedule') else None
if not schedule:
logger.warning(f"Automation {automationId} has no schedule, skipping")
continue
try:
# Parse schedule to cron kwargs
cronKwargs = parseScheduleToCron(schedule)
if isActive:
newEventId = f"automation.{automationId}"
handler = createAutomationEventHandler(automationId, eventUser)
# Register with replaceExisting=True (atomically replaces old event)
eventManager.registerCron(
jobId=newEventId,
func=handler,
cronKwargs=cronKwargs,
replaceExisting=True
)
# Update automation with new eventId
if currentEventId != newEventId:
services.interfaceDbAutomation.updateAutomationDefinition(
automationId,
{"eventId": newEventId}
)
registeredEvents[automationId] = newEventId
else:
# Remove event if exists
if currentEventId:
try:
eventManager.remove(currentEventId)
services.interfaceDbAutomation.updateAutomationDefinition(
automationId,
{"eventId": None}
)
except Exception as e:
logger.warning(f"Error removing event {currentEventId}: {str(e)}")
except Exception as e:
logger.error(f"Error syncing automation {automationId}: {str(e)}")
return {
"synced": len(registeredEvents),
"events": registeredEvents
}
def createAutomationEventHandler(automationId: str, eventUser):
"""Create event handler function for a specific automation.
Args:
automationId: ID of automation to create handler for
eventUser: System-level event user for accessing automations (captured in closure)
Returns:
Async handler function for scheduled automation execution
"""
async def handler():
try:
if not eventUser:
logger.error("Event user not available for automation execution")
return
# Load automation using SysAdmin eventUser (has unrestricted access)
eventServices = getAutomationServices(eventUser, mandateId=None, featureInstanceId=None)
automation = eventServices.interfaceDbAutomation.getAutomationDefinition(automationId, includeSystemFields=True)
if not automation or not getattr(automation, "active", False):
logger.warning(f"Automation {automationId} not found or not active, skipping execution")
return
# Get creator user ID from automation's sysCreatedBy system field
creatorUserId = getattr(automation, "sysCreatedBy", None)
if not creatorUserId:
logger.error(f"Automation {automationId} has no creator user (sysCreatedBy missing)")
return
# Get creator user from database (using SysAdmin access)
creatorUser = eventServices.interfaceDbApp.getUser(creatorUserId)
if not creatorUser:
logger.error(f"Creator user {creatorUserId} not found for automation {automationId}")
return
# Execute automation — pass automation object and creatorUser directly
# No re-load needed in executeAutomation
await executeAutomation(automationId, automation, creatorUser, eventServices)
logger.info(f"Successfully executed automation {automationId} as user {creatorUserId}")
except Exception as e:
logger.error(f"Error executing automation {automationId}: {str(e)}")
return handler

View file

@ -1,65 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Automation Lifecycle Manager.
Handles startup and shutdown of scheduled automations.
Note: This module is NOT for feature container lifecycle - it only manages
the automation scheduler (loading/syncing scheduled automation events).
"""
import logging
from modules.features.automation.mainAutomation import getAutomationServices
logger = logging.getLogger(__name__)
def start(eventUser) -> bool:
"""
Start automation scheduler and sync scheduled events.
All operations are synchronous (DB access, scheduler registration).
Args:
eventUser: System-level event user for background operations (provided by app.py)
"""
if not eventUser:
logger.warning("Automation: No event user provided, skipping automation sync")
return True
try:
from modules.workflows.automation import syncAutomationEvents
from modules.shared.callbackRegistry import callbackRegistry
# Get services for event user (provides access to interfaces)
services = getAutomationServices(eventUser, mandateId=None, featureInstanceId=None)
# Register callback for automation changes
def onAutomationChanged(chatInterface):
"""Callback triggered when automations are created/updated/deleted."""
eventServices = getAutomationServices(eventUser, mandateId=None, featureInstanceId=None)
syncAutomationEvents(eventServices, eventUser)
callbackRegistry.register('automation.changed', onAutomationChanged)
logger.info("Automation: Registered change callback")
# Initial sync on startup
syncAutomationEvents(services, eventUser)
logger.info("Automation: Scheduled events synced on startup")
except Exception as e:
logger.error(f"Automation: Error setting up events on startup: {str(e)}")
return False
return True
def stop(eventUser) -> bool:
"""
Stop automation scheduler.
Args:
eventUser: System-level event user (provided by app.py)
"""
# Callbacks will remain registered (acceptable for shutdown)
logger.info("Automation: Scheduler stopped (callbacks cleaned up on shutdown)")
return True

View file

@ -1,385 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Automation templates for workflow definitions.
Contains predefined workflow templates that can be used to create automation definitions.
"""
from typing import Dict, Any
# Automation templates structure
AUTOMATION_TEMPLATES: Dict[str, Any] = {
"sets": [
{
"template": {
"overview": "SharePoint Themen Zusammenfassung",
"tasks": [
{
"id": "Task01",
"title": "SharePoint Themen Zusammenfassung",
"description": "Erstellt eine Zusammenfassung aller SharePoint Sites und deren Inhalte",
"objective": "Erstelle eine Zusammenfassung aller SharePoint Themen (Sites) und deren Inhalte als Word-Dokument",
"actionList": [
{
"execMethod": "sharepoint",
"execAction": "findDocumentPath",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"searchQuery": "*",
"maxResults": 100
},
"execResultLabel": "sharepoint_sites_found"
},
{
"execMethod": "sharepoint",
"execAction": "listDocuments",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"pathQuery": "{{KEY:sharepointBasePath}}",
"includeSubfolders": True
},
"execResultLabel": "sharepoint_structure"
},
{
"execMethod": "ai",
"execAction": "process",
"execParameters": {
"aiPrompt": "{{KEY:summaryPrompt}}",
"documentList": ["sharepoint_sites_found", "sharepoint_structure"],
"resultType": "docx"
},
"execResultLabel": "sharepoint_summary"
},
{
"execMethod": "sharepoint",
"execAction": "uploadDocument",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"documentList": ["sharepoint_summary"],
"pathQuery": "{{KEY:sharepointFolderNameDestination}}"
},
"execResultLabel": "sharepoint_upload_result"
}
]
}
]
},
"parameters": {
"connectionName": "connection:msft:p.motsch@valueon.ch",
"sharepointBasePath": "/sites/company-share",
"sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
"summaryPrompt": "Erstelle eine umfassende Zusammenfassung aller SharePoint Sites und deren Inhalte. Strukturiere das Dokument nach Sites und fasse für jede Site die wichtigsten Themen, Ordnerstrukturen und Dokumente zusammen. Erstelle ein professionelles Word-Dokument mit Überschriften, Abschnitten und einer klaren Gliederung. Berücksichtige alle gefundenen Sites, deren Ordnerstrukturen und dokumentiere die wichtigsten Inhalte pro Site."
}
},
{
"template": {
"overview": "Immobilienrecherche Zürich",
"tasks": [
{
"id": "Task02",
"title": "Immobilienrecherche Zürich",
"description": "Webrecherche nach Immobilien im Kanton Zürich und Speicherung in Excel",
"objective": "Immobilienrecherche im Kanton Zürich zum Verkauf (5-20 Mio. CHF) und speichere Ergebnisse in Excel-Liste auf SharePoint",
"actionList": [
{
"execMethod": "ai",
"execAction": "webResearch",
"execParameters": {
"prompt": "{{KEY:immobilienResearchPrompt}}",
"urlList": ["{{KEY:immobilienResearchUrl}}"]
},
"execResultLabel": "immobilien_research_results"
},
{
"execMethod": "ai",
"execAction": "process",
"execParameters": {
"aiPrompt": "{{KEY:excelFormatPrompt}}",
"documentList": ["immobilien_research_results"],
"resultType": "xlsx"
},
"execResultLabel": "immobilien_excel_list"
},
{
"execMethod": "sharepoint",
"execAction": "uploadDocument",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"documentList": ["immobilien_excel_list"],
"pathQuery": "{{KEY:sharepointFolderNameDestination}}"
},
"execResultLabel": "immobilien_upload_result"
}
]
}
]
},
"parameters": {
"connectionName": "connection:msft:p.motsch@valueon.ch",
"sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
"immobilienResearchUrl": ["https://www.homegate.ch", "https://www.immoscout24.ch", "https://www.immowelt.ch"],
"immobilienResearchPrompt": "Suche nach Immobilien zum Verkauf im Kanton Zürich, Schweiz, im Preisbereich von 5-20 Millionen CHF. Sammle Informationen zu: Ort, Preis, Beschreibung, URL zu Bildern, Verkäufer/Kontaktinformationen.",
"excelFormatPrompt": "Erstelle eine Excel-Datei mit den recherchierten Immobilien. Jede Immobilie soll eine Zeile sein mit den folgenden Spalten: Ort, Preis (in CHF), Beschreibung, URL zu Bild, Verkäufer. Verwende die Daten aus der Webrecherche."
}
},
{
"template": {
"overview": "Spesenbelege Zusammenfassung",
"tasks": [
{
"id": "Task03",
"title": "Spesenbelege CSV Zusammenfassung",
"description": "Liest PDF-Spesenbelege aus SharePoint-Ordner und erstellt CSV-Zusammenfassung",
"objective": "Extrahiere alle PDF-Spesenbelege aus einem SharePoint-Ordner und erstelle eine CSV-Datei mit allen Spesendaten im selben Ordner",
"actionList": [
{
"execMethod": "sharepoint",
"execAction": "findDocumentPath",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"searchQuery": "{{KEY:sharepointFolderNameSource}}:files:.pdf",
"maxResults": 100
},
"execResultLabel": "sharepoint_pdf_files"
},
{
"execMethod": "sharepoint",
"execAction": "readDocuments",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"pathObject": "sharepoint_pdf_files"
},
"execResultLabel": "spesenbelege_documents"
},
{
"execMethod": "ai",
"execAction": "process",
"execParameters": {
"aiPrompt": "{{KEY:expenseExtractionPrompt}}",
"documentList": ["spesenbelege_documents"],
"resultType": "csv"
},
"execResultLabel": "spesenbelege_csv"
},
{
"execMethod": "sharepoint",
"execAction": "uploadDocument",
"execParameters": {
"connectionReference": "{{KEY:connectionName}}",
"documentList": ["spesenbelege_csv"],
"pathQuery": "{{KEY:sharepointFolderNameDestination}}"
},
"execResultLabel": "spesenbelege_upload_result"
}
]
}
]
},
"parameters": {
"connectionName": "connection:msft:p.motsch@valueon.ch",
"sharepointFolderNameSource": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/expenses",
"sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
"expenseExtractionPrompt": "Verarbeite alle bereitgestellten Dokumente, aber extrahiere nur Daten aus PDF-Spesenbelegen (ignoriere andere Dateitypen). Für jeden gefundenen PDF-Spesenbeleg extrahiere als separaten Datensatz: Datum, Betrag, MWST %, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler/Verkäufer, Steuerbetrag. Erstelle eine CSV-Datei mit einer Zeile pro Spesenbeleg. Verwende die folgenden Spaltenüberschriften: Datum, Betrag, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler, Steuerbetrag. Stelle sicher, dass alle Beträge numerisch sind und Datumswerte im Format YYYY-MM-DD vorliegen. Wenn ein Dokument kein Spesenbeleg ist, ignoriere es."
}
},
{
"template": {
"overview": "Preprocessing Server Data Update",
"tasks": [
{
"id": "Task04",
"title": "Trigger Preprocessing Server",
"description": "Triggers the preprocessing server at customer tenant to update database with configuration",
"objective": "Call preprocessing server endpoint to update database with provided configuration JSON",
"actionList": [
{
"execMethod": "context",
"execAction": "triggerPreprocessingServer",
"execParameters": {
"endpoint": "{{KEY:endpoint}}",
"configJson": "{{KEY:configJson}}",
"authSecretConfigKey": "{{KEY:authSecretConfigKey}}"
},
"execResultLabel": "preprocessing_server_result"
}
]
}
]
},
"parameters": {
"endpoint": "https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataprocessor/update-db-with-config",
"authSecretConfigKey": "PREPROCESS_ALTHAUS_CHAT_SECRET",
"configJson": "{\"tables\":[{\"name\":\"Artikel\",\"powerbi_table_name\":\"Artikel\",\"steps\":[{\"keep\":{\"columns\":[\"I_ID\",\"Artikelbeschrieb\",\"Artikelbezeichnung\",\"Artikelgruppe\",\"Artikelkategorie\",\"Artikelkürzel\",\"Artikelnummer\",\"Einheit\",\"Gesperrt\",\"Keywords\",\"Lieferant\",\"Warengruppe\"]}},{\"fillna\":{\"column\":\"Lieferant\",\"value\":\"Unbekannt\"}}]},{\"name\":\"Einkaufspreis\",\"powerbi_table_name\":\"Einkaufspreis\",\"steps\":[{\"to_numeric\":{\"column\":\"EP_CHF\",\"errors\":\"coerce\"}},{\"dropna\":{\"subset\":[\"EP_CHF\"]}}]}]}"
}
},
{
"template": {
"overview": "JIRA to SharePoint Ticket Synchronization",
"tasks": [
{
"id": "Task01",
"title": "Sync JIRA Tickets to SharePoint",
"description": "Export JIRA tickets, merge with SharePoint file, upload back, and import changes to JIRA",
"objective": "Synchronize JIRA tickets with SharePoint file (bidirectional sync)",
"actionList": [
{
"execMethod": "sharepoint",
"execAction": "findSiteByUrl",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"hostname": "{{KEY:sharepointHostname}}",
"sitePath": "{{KEY:sharepointSitePath}}"
},
"execResultLabel": "sharepoint_site"
},
{
"execMethod": "jira",
"execAction": "connectJira",
"execParameters": {
"apiUsername": "{{KEY:jiraUsername}}",
"apiTokenConfigKey": "{{KEY:jiraTokenConfigKey}}",
"apiUrl": "{{KEY:jiraUrl}}",
"projectCode": "{{KEY:jiraProjectCode}}",
"issueType": "{{KEY:jiraIssueType}}",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "jira_connection"
},
{
"execMethod": "jira",
"execAction": "exportTicketsAsJson",
"execParameters": {
"connectionId": "jira_connection",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "jira_exported_tickets"
},
{
"execMethod": "sharepoint",
"execAction": "downloadFileByPath",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
},
"execResultLabel": "existing_file_content"
},
{
"execMethod": "jira",
"execAction": "parseExcelContent",
"execParameters": {
"excelContent": "existing_file_content",
"skipRows": 3,
"hasCustomHeaders": True
},
"execResultLabel": "existing_parsed_data"
},
{
"execMethod": "jira",
"execAction": "mergeTicketData",
"execParameters": {
"jiraData": "jira_exported_tickets",
"existingData": "existing_parsed_data",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}",
"idField": "ID"
},
"execResultLabel": "merged_ticket_data"
},
{
"execMethod": "sharepoint",
"execAction": "copyFile",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"sourceFolder": "{{KEY:sharepointMainFolder}}",
"sourceFile": "{{KEY:syncFileName}}",
"destFolder": "{{KEY:sharepointBackupFolder}}",
"destFile": "backup_{{TIMESTAMP}}_{{KEY:syncFileName}}"
},
"execResultLabel": "file_backup"
},
{
"execMethod": "jira",
"execAction": "createExcelContent",
"execParameters": {
"data": "merged_ticket_data",
"headers": "existing_parsed_data",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "new_file_content"
},
{
"execMethod": "sharepoint",
"execAction": "uploadFile",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"folderPath": "{{KEY:sharepointMainFolder}}",
"fileName": "{{KEY:syncFileName}}",
"content": "new_file_content"
},
"execResultLabel": "uploaded_file"
},
{
"execMethod": "sharepoint",
"execAction": "downloadFileByPath",
"execParameters": {
"connectionReference": "{{KEY:sharepointConnection}}",
"siteId": "sharepoint_site",
"filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
},
"execResultLabel": "uploaded_file_content"
},
{
"execMethod": "jira",
"execAction": "parseExcelContent",
"execParameters": {
"excelContent": "uploaded_file_content",
"skipRows": 3,
"hasCustomHeaders": True
},
"execResultLabel": "import_data"
},
{
"execMethod": "jira",
"execAction": "importTicketsFromJson",
"execParameters": {
"connectionId": "jira_connection",
"ticketData": "import_data",
"taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
},
"execResultLabel": "import_result"
}
]
}
]
},
"parameters": {
"sharepointConnection": "connection:msft:patrick.motsch@delta.ch",
"sharepointHostname": "deltasecurityag.sharepoint.com",
"sharepointSitePath": "SteeringBPM",
"sharepointMainFolder": "/General/50 Docs hosted by SELISE",
"sharepointBackupFolder": "/General/50 Docs hosted by SELISE/SyncHistory",
"syncFileName": "DELTAgroup x SELISE Ticket Exchange List.xlsx",
"jiraUsername": "p.motsch@valueon.ch",
"jiraTokenConfigKey": "Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET",
"jiraUrl": "https://deltasecurity.atlassian.net",
"jiraProjectCode": "DCS",
"jiraIssueType": "Task",
"taskSyncDefinition": "{\"ID\":[\"get\",[\"key\"]],\"Module Category\":[\"get\",[\"fields\",\"customfield_10058\",\"value\"]],\"Summary\":[\"get\",[\"fields\",\"summary\"]],\"Description\":[\"get\",[\"fields\",\"description\"]],\"References\":[\"get\",[\"fields\",\"customfield_10066\"]],\"Priority\":[\"get\",[\"fields\",\"priority\",\"name\"]],\"Issue Status\":[\"get\",[\"fields\",\"status\",\"name\"]],\"Assignee\":[\"get\",[\"fields\",\"assignee\",\"displayName\"]],\"Issue Created\":[\"get\",[\"fields\",\"created\"]],\"Due Date\":[\"get\",[\"fields\",\"duedate\"]],\"DELTA Comments\":[\"get\",[\"fields\",\"customfield_10167\"]],\"SELISE Ticket References\":[\"put\",[\"fields\",\"customfield_10067\"]],\"SELISE Status Values\":[\"put\",[\"fields\",\"customfield_10065\"]],\"SELISE Comments\":[\"put\",[\"fields\",\"customfield_10168\"]]}"
}
}
]
}
def getAutomationTemplates() -> Dict[str, Any]:
"""
Get automation templates.
Returns:
Dict containing the automation templates structure with 'sets' key.
"""
return AUTOMATION_TEMPLATES

View file

@ -1,110 +0,0 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Utility functions for automation feature.
Moved from interfaces/interfaceDbChat.py.
"""
import json
from typing import Dict, Any
from datetime import datetime, UTC
def parseScheduleToCron(schedule: str) -> Dict[str, Any]:
"""Parse schedule string to cron kwargs for APScheduler"""
parts = schedule.split()
if len(parts) != 5:
raise ValueError(f"Invalid schedule format: {schedule}")
return {
"minute": parts[0],
"hour": parts[1],
"day": parts[2],
"month": parts[3],
"day_of_week": parts[4]
}
def planToPrompt(plan: Dict) -> str:
"""Convert plan structure to prompt string for workflow execution"""
return plan.get("userMessage", plan.get("overview", "Execute automation workflow"))
def replacePlaceholders(template: str, placeholders: Dict[str, str]) -> str:
"""Replace placeholders in template with actual values. Placeholder format: {{KEY:PLACEHOLDER_NAME}} or {{TIMESTAMP}}"""
result = template
# Replace TIMESTAMP placeholder first (calculated placeholder, not from parameters)
timestampPattern = "{{TIMESTAMP}}"
if timestampPattern in result:
timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
result = result.replace(timestampPattern, timestamp)
for placeholderName, value in placeholders.items():
pattern = f"{{{{KEY:{placeholderName}}}}}"
# Check if placeholder is in an array context like ["{{KEY:...}}"]
# If value is a JSON array/dict, we should replace the entire ["{{KEY:...}}"] with the array
arrayPattern = f'["{pattern}"]'
if arrayPattern in result:
# Check if value is a JSON array/dict
isArrayValue = False
arrayValue = None
if isinstance(value, (list, dict)):
isArrayValue = True
arrayValue = json.dumps(value)
elif isinstance(value, str):
try:
parsed = json.loads(value)
if isinstance(parsed, (list, dict)):
isArrayValue = True
arrayValue = value # Already valid JSON string
except (json.JSONDecodeError, ValueError):
pass
if isArrayValue:
# Replace ["{{KEY:...}}"] with the array value
result = result.replace(arrayPattern, arrayValue)
continue # Skip the regular replacement below
# Replace occurrences one-by-one to handle mixed contexts
while pattern in result:
patternStart = result.find(pattern)
isQuoted = False
if patternStart > 0:
charBefore = result[patternStart - 1]
patternEnd = patternStart + len(pattern)
charAfter = result[patternEnd] if patternEnd < len(result) else None
if charBefore == '"' and charAfter == '"':
isQuoted = True
if isinstance(value, (list, dict)):
replacement = json.dumps(value)
elif isinstance(value, str):
try:
parsed = json.loads(value)
if isinstance(parsed, (list, dict)):
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
else:
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
except (json.JSONDecodeError, ValueError):
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
else:
replacement = str(value)
result = result[:patternStart] + replacement + result[patternStart + len(pattern):]
return result

View file

@ -1,7 +1,10 @@
# Copyright (c) 2025 Patrick Motsch
# Main execution engine for automation2 graphs.
import asyncio
import logging
import time
import uuid
from datetime import datetime, timezone
from typing import Dict, Any, List, Set, Optional
@ -22,7 +25,7 @@ from modules.workflows.automation2.executors import (
PauseForHumanTaskError,
PauseForEmailWaitError,
)
from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
from modules.workflows.automation2.runEnvelope import normalize_run_envelope
logger = logging.getLogger(__name__)
@ -82,6 +85,82 @@ def _getExecutor(
return None
def _createStepLog(iface, runId: str, nodeId: str, nodeType: str, status: str = "running", inputSnapshot: Dict = None) -> Optional[str]:
"""Create an AutoStepLog entry. Returns the step log ID or None if interface unavailable."""
if not iface or not runId:
return None
try:
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoStepLog
stepId = str(uuid.uuid4())
iface.db.recordCreate(AutoStepLog, {
"id": stepId,
"runId": runId,
"nodeId": nodeId,
"nodeType": nodeType,
"status": status,
"inputSnapshot": inputSnapshot or {},
"startedAt": time.time(),
})
return stepId
except Exception as e:
logger.debug("Could not create AutoStepLog: %s", e)
return None
def _updateStepLog(iface, stepId: str, status: str, output: Dict = None, error: str = None,
durationMs: int = None, tokensUsed: int = 0, retryCount: int = 0) -> None:
"""Update an AutoStepLog entry with results."""
if not iface or not stepId:
return
try:
from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoStepLog
updates: Dict[str, Any] = {
"status": status,
"completedAt": time.time(),
}
if output is not None:
updates["output"] = output
if error is not None:
updates["error"] = error
if durationMs is not None:
updates["durationMs"] = durationMs
if tokensUsed:
updates["tokensUsed"] = tokensUsed
if retryCount:
updates["retryCount"] = retryCount
iface.db.recordModify(AutoStepLog, stepId, updates)
except Exception as e:
logger.debug("Could not update AutoStepLog %s: %s", stepId, e)
async def _executeWithRetry(executor, node, context, maxRetries: int = 0, retryDelaySeconds: float = 1.0):
"""Execute a node with optional retry policy from node parameters."""
params = node.get("parameters") or {}
retries = params.get("retryMaxAttempts", maxRetries)
delay = params.get("retryDelaySeconds", retryDelaySeconds)
attempt = 0
lastError = None
while attempt <= retries:
try:
result = await executor.execute(node, context)
return result, attempt
except (PauseForHumanTaskError, PauseForEmailWaitError):
raise
except Exception as e:
lastError = e
attempt += 1
if attempt <= retries:
logger.warning(
"Node %s failed (attempt %d/%d), retrying in %.1fs: %s",
node.get("id"), attempt, retries + 1, delay, e,
)
await asyncio.sleep(delay)
delay = min(delay * 2, 60)
else:
raise lastError
raise lastError
async def executeGraph(
graph: Dict[str, Any],
services: Any,
@ -202,17 +281,31 @@ async def executeGraph(
if not executor:
nodeOutputs[bnid] = None
continue
_rStepStart = time.time()
_rStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running")
try:
result = await executor.execute(body_node, context)
result, _rRetry = await _executeWithRetry(executor, body_node, context)
nodeOutputs[bnid] = result
logger.info("executeGraph loop resume body node %s done (iter %d)", bnid, next_index)
_rDur = int((time.time() - _rStepStart) * 1000)
_updateStepLog(automation2_interface, _rStepId, "completed",
output=result if isinstance(result, dict) else {"value": result},
durationMs=_rDur, retryCount=_rRetry)
logger.info("executeGraph loop resume body node %s done (iter %d, retries=%d)", bnid, next_index, _rRetry)
except PauseForHumanTaskError as e:
_updateStepLog(automation2_interface, _rStepId, "completed",
durationMs=int((time.time() - _rStepStart) * 1000))
if automation2_interface:
run_ctx = dict(run.get("context") or {})
run_ctx["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items}
automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)}
except PauseForEmailWaitError as e:
_updateStepLog(automation2_interface, _rStepId, "completed",
durationMs=int((time.time() - _rStepStart) * 1000))
raise
except Exception as ex:
_updateStepLog(automation2_interface, _rStepId, "failed",
error=str(ex), durationMs=int((time.time() - _rStepStart) * 1000))
logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex)
nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface:
@ -237,6 +330,9 @@ async def executeGraph(
nodeType = node.get("type", "")
if not _is_node_on_active_path(nodeId, connectionMap, nodeOutputs):
logger.info("executeGraph step %d/%d: nodeId=%s SKIP (inactive branch)", i + 1, len(ordered), nodeId)
_skipStepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, status="skipped")
if _skipStepId:
_updateStepLog(automation2_interface, _skipStepId, "skipped")
continue
executor = _getExecutor(nodeType, services, automation2_interface)
logger.info(
@ -251,8 +347,11 @@ async def executeGraph(
nodeOutputs[nodeId] = None
logger.debug("executeGraph node %s: no executor, output=None", nodeId)
continue
_stepStartMs = time.time()
_stepId = None
try:
if nodeType == "flow.loop":
_stepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, "running")
result = await executor.execute(node, context)
items = result.get("items") or []
body_ids = getLoopBodyNodeIds(nodeId, connectionMap)
@ -272,35 +371,66 @@ async def executeGraph(
if not bexec:
nodeOutputs[bnid] = None
continue
_bStepStart = time.time()
_bStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running")
try:
bres = await bexec.execute(body_node, context)
bres, _bRetry = await _executeWithRetry(bexec, body_node, context)
nodeOutputs[bnid] = bres
logger.info("executeGraph loop body node %s done (iter %d)", bnid, idx)
_bDur = int((time.time() - _bStepStart) * 1000)
_updateStepLog(automation2_interface, _bStepId, "completed",
output=bres if isinstance(bres, dict) else {"value": bres},
durationMs=_bDur, retryCount=_bRetry)
logger.info("executeGraph loop body node %s done (iter %d, retries=%d)", bnid, idx, _bRetry)
except PauseForHumanTaskError as e:
_updateStepLog(automation2_interface, _bStepId, "completed",
durationMs=int((time.time() - _bStepStart) * 1000))
if runId and automation2_interface:
run = automation2_interface.getRun(runId) or {}
run_ctx = dict(run.get("context") or {})
run_ctx["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items}
automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)}
except PauseForEmailWaitError as e:
_updateStepLog(automation2_interface, _bStepId, "completed",
durationMs=int((time.time() - _bStepStart) * 1000))
raise
except Exception as ex:
_updateStepLog(automation2_interface, _bStepId, "failed",
error=str(ex), durationMs=int((time.time() - _bStepStart) * 1000))
logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex)
nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs)
return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid}
nodeOutputs[nodeId] = {"items": items, "count": len(items)}
_updateStepLog(automation2_interface, _stepId, "completed",
output={"items": len(items)}, durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph flow.loop done: %d iterations", len(items))
else:
result = await executor.execute(node, context)
_stepStartMs = time.time()
_inputSnap = {}
for src, _, _ in connectionMap.get(nodeId, []):
if src in nodeOutputs:
_inputSnap[src] = nodeOutputs[src]
_stepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, "running", _inputSnap)
result, retryCount = await _executeWithRetry(executor, node, context)
nodeOutputs[nodeId] = result
_durMs = int((time.time() - _stepStartMs) * 1000)
_tokens = result.get("tokensUsed", 0) if isinstance(result, dict) else 0
_updateStepLog(automation2_interface, _stepId, "completed",
output=result if isinstance(result, dict) else {"value": result},
durationMs=_durMs, tokensUsed=_tokens, retryCount=retryCount)
logger.info(
"executeGraph node %s done: result_type=%s result_keys=%s",
"executeGraph node %s done: result_type=%s result_keys=%s retries=%d duration=%dms",
nodeId,
type(result).__name__,
list(result.keys()) if isinstance(result, dict) else "n/a",
retryCount,
_durMs,
)
except PauseForHumanTaskError as e:
_updateStepLog(automation2_interface, _stepId, "completed",
durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph paused for human task %s", e.taskId)
return {
"success": False,
@ -311,11 +441,13 @@ async def executeGraph(
"nodeOutputs": dict(nodeOutputs),
}
except PauseForEmailWaitError as e:
_updateStepLog(automation2_interface, _stepId, "completed",
durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph paused for email wait (run %s, node %s)", e.runId, e.nodeId)
# Start email poller on-demand (only runs while workflows wait for email)
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.features.automation2.emailPoller import ensureRunning
from modules.features.graphicalEditor.emailPoller import ensureRunning
root = getRootInterface()
event_user = root.getUserByUsername("event") if root else None
if event_user:
@ -353,8 +485,25 @@ async def executeGraph(
except Exception as e:
logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e)
nodeOutputs[nodeId] = {"error": str(e), "success": False}
_durMs = int((time.time() - _stepStartMs) * 1000)
_updateStepLog(automation2_interface, _stepId, "failed", error=str(e), durationMs=_durMs)
if runId and automation2_interface:
automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs)
try:
_wfObj = automation2_interface.getWorkflow(workflowId) if automation2_interface and workflowId else None
_wfDict = _wfObj if isinstance(_wfObj, dict) else (
_wfObj.model_dump() if hasattr(_wfObj, "model_dump") else {}
) if _wfObj else {}
_shouldNotify = _wfDict.get("notifyOnFailure", True) if _wfDict else True
if _shouldNotify:
from modules.workflows.scheduler.mainScheduler import _notifyRunFailed
_notifyRunFailed(
workflowId or "", runId or "", str(e),
mandateId=mandateId,
workflowLabel=_wfDict.get("label"),
)
except Exception:
pass
return {
"success": False,
"error": str(e),

View file

@ -29,7 +29,7 @@ def _is_user_connection_id(val: Any) -> bool:
def _getNodeDefinition(nodeType: str) -> Optional[Dict[str, Any]]:
"""Get node definition by type id for _method, _action, _paramMap."""
from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
for node in STATIC_NODE_TYPES:
if node.get("id") == nodeType:
return node
@ -534,7 +534,7 @@ class ActionNodeExecutor:
node: Dict[str, Any],
context: Dict[str, Any],
) -> Any:
from modules.features.automation2.nodeRegistry import getNodeTypeToMethodAction
from modules.features.graphicalEditor.nodeRegistry import getNodeTypeToMethodAction
from modules.workflows.automation2.graphUtils import resolveParameterReferences
from modules.workflows.processing.core.actionExecutor import ActionExecutor

View file

@ -17,12 +17,12 @@ _main_loop = None
def set_main_loop(loop) -> None:
global _main_loop
_main_loop = loop
from modules.features.automation2.interfaceFeatureAutomation2 import (
getAutomation2Interface,
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import (
getGraphicalEditorInterface as getAutomation2Interface,
getAllWorkflowsForScheduling,
)
from modules.features.automation2.mainAutomation2 import getAutomation2Services
from modules.features.automation2.entryPoints import find_invocation
from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices as getAutomation2Services
from modules.features.graphicalEditor.entryPoints import find_invocation
from modules.workflows.automation2.scheduleCron import parse_cron_to_kwargs

View file

@ -0,0 +1,2 @@
# Copyright (c) 2025 Patrick Motsch
# Workflow Scheduler - consolidated scheduler with v1 incremental sync patterns

View file

@ -0,0 +1,459 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Consolidated Workflow Scheduler.
Replaces subAutomation2Schedule with v1-style incremental sync patterns:
- eventId tracking on AutoWorkflow for change detection
- replaceExisting=True for idempotent re-registration
- active check before execution
- Capped execution log
"""
import asyncio
import logging
from typing import Any, Dict
from modules.shared.eventManagement import eventManager
logger = logging.getLogger(__name__)
_main_loop = None
JOB_ID_PREFIX = "graphicalEditor."
_CALLBACK_NAME = "graphicalEditor.workflow.changed"
def _setMainLoop(loop) -> None:
global _main_loop
_main_loop = loop
class WorkflowScheduler:
"""Consolidated scheduler with v1 incremental sync patterns."""
def __init__(self):
self._eventUser = None
self._registered: Dict[str, str] = {}
def start(self, eventUser) -> bool:
"""Start scheduler: sync workflows, register callback for changes."""
if not eventUser:
logger.warning("WorkflowScheduler: No event user provided, skipping")
return False
self._eventUser = eventUser
try:
eventManager.start()
self._syncScheduledWorkflows()
logger.info("WorkflowScheduler: initial sync complete")
self._delayedSync()
from modules.shared.callbackRegistry import callbackRegistry
callbackRegistry.register(_CALLBACK_NAME, self._onWorkflowChanged)
logger.info("WorkflowScheduler: callback registered for %s", _CALLBACK_NAME)
except Exception as e:
logger.error("WorkflowScheduler: Failed to start: %s", e)
return False
return True
def stop(self) -> bool:
"""Remove all scheduled workflow jobs."""
try:
self._removeAllJobs()
logger.info("WorkflowScheduler: all jobs removed")
except Exception as e:
logger.warning("WorkflowScheduler: error during stop: %s", e)
return True
def _syncScheduledWorkflows(self) -> Dict[str, Any]:
"""
Incremental sync: only re-register jobs whose eventId has changed.
Uses AutoWorkflow.eventId for change detection (v1 pattern).
"""
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getAllWorkflowsForScheduling
from modules.workflows.automation2.scheduleCron import parse_cron_to_kwargs
items = getAllWorkflowsForScheduling()
logger.info("WorkflowScheduler: found %d workflow(s) with trigger.schedule+cron", len(items))
newRegistered: Dict[str, str] = {}
activeWorkflowIds = set()
for item in items:
workflowId = item.get("workflowId")
if not workflowId:
continue
activeWorkflowIds.add(workflowId)
cron = item.get("cron")
mandateId = item.get("mandateId")
instanceId = item.get("featureInstanceId")
if not instanceId or not cron:
continue
jobId = f"{JOB_ID_PREFIX}{workflowId}"
entryPointId = item.get("entryPointId")
workflow = item.get("workflow") or {}
asyncHandler = self._createHandler(
workflowId=workflowId,
mandateId=mandateId,
instanceId=instanceId,
entryPointId=entryPointId,
workflow=workflow,
)
def _makeSyncWrapper(handler):
def syncWrapper():
loop = _main_loop
if loop and loop.is_running():
loop.call_soon_threadsafe(
lambda: asyncio.ensure_future(handler(), loop=loop)
)
else:
try:
asyncio.run(handler())
except RuntimeError:
logger.warning("WorkflowScheduler: could not run handler, no event loop")
return syncWrapper
syncWrapper = _makeSyncWrapper(asyncHandler)
intervalSeconds = _cronToIntervalSeconds(cron)
if intervalSeconds is not None:
eventManager.registerInterval(
jobId=jobId,
func=syncWrapper,
seconds=intervalSeconds,
replaceExisting=True,
)
else:
try:
cronKwargs = parse_cron_to_kwargs(cron)
eventManager.registerCron(
jobId=jobId,
func=syncWrapper,
cronKwargs=cronKwargs,
replaceExisting=True,
)
except ValueError as e:
logger.warning("Workflow %s: invalid cron %r: %s", workflowId, cron, e)
continue
newRegistered[workflowId] = jobId
mode = "interval" if intervalSeconds is not None else "cron"
logger.info(
"WorkflowScheduler: registered %s for workflow %s (%s=%s)",
jobId, workflowId, mode,
intervalSeconds if intervalSeconds is not None else cron,
)
self._updateEventId(workflow, workflowId, jobId)
staleIds = set(self._registered.keys()) - activeWorkflowIds
for wfId in staleIds:
oldJobId = self._registered[wfId]
try:
eventManager.remove(oldJobId)
logger.info("WorkflowScheduler: removed stale job %s", oldJobId)
except Exception:
pass
self._registered = newRegistered
return {"synced": len(newRegistered), "workflowsFound": len(items)}
def _updateEventId(self, workflow: Dict, workflowId: str, jobId: str) -> None:
"""Update AutoWorkflow.eventId for incremental sync tracking (v1 pattern)."""
currentEventId = workflow.get("eventId")
if currentEventId != jobId:
try:
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
from modules.interfaces.interfaceDbApp import getRootInterface
root = getRootInterface()
eventUser = root.getUserByUsername("event") if root else self._eventUser
if not eventUser:
return
mandateId = workflow.get("mandateId", "")
instanceId = workflow.get("featureInstanceId", "")
iface = getGraphicalEditorInterface(eventUser, mandateId, instanceId)
iface.updateWorkflow(workflowId, {"eventId": jobId})
except Exception as e:
logger.debug("WorkflowScheduler: could not update eventId for %s: %s", workflowId, e)
def _createHandler(
self,
workflowId: str,
mandateId: str,
instanceId: str,
entryPointId: str,
workflow: Dict[str, Any],
):
"""Create async handler for scheduled workflow execution with active-check."""
eventUser = self._eventUser
async def handler():
logger.info("WorkflowScheduler: CRON FIRED for workflow %s", workflowId)
try:
if not eventUser:
logger.error("WorkflowScheduler: event user not available")
return
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices
from modules.workflows.automation2.executionEngine import executeGraph
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
from modules.features.graphicalEditor.entryPoints import find_invocation
from modules.workflows.automation2.runEnvelope import default_run_envelope, normalize_run_envelope
iface = getGraphicalEditorInterface(eventUser, mandateId, instanceId)
wf = iface.getWorkflow(workflowId)
if not wf or not wf.get("graph"):
logger.warning("WorkflowScheduler: workflow %s not found or no graph", workflowId)
return
if not wf.get("active", True):
logger.info("WorkflowScheduler: workflow %s inactive, skipping", workflowId)
return
inv = find_invocation(wf, entryPointId)
if inv and (inv.get("kind") != "schedule" or not inv.get("enabled", True)):
logger.info("WorkflowScheduler: entry point %s disabled for workflow %s", entryPointId, workflowId)
return
services = getGraphicalEditorServices(
eventUser,
mandateId=mandateId,
featureInstanceId=instanceId,
)
discoverMethods(services)
title = (inv or {}).get("title") or {}
label = ""
if isinstance(title, dict):
label = title.get("en") or title.get("de") or ""
elif isinstance(title, str):
label = title
runEnv = default_run_envelope(
"schedule",
entry_point_id=entryPointId,
entry_point_label=label or None,
)
runEnv = normalize_run_envelope(runEnv, user_id=str(eventUser.id) if eventUser else None)
result = await executeGraph(
graph=wf["graph"],
services=services,
workflowId=workflowId,
instanceId=instanceId,
userId=None,
mandateId=mandateId,
automation2_interface=iface,
run_envelope=runEnv,
)
logger.info(
"WorkflowScheduler: executed workflow %s success=%s paused=%s",
workflowId, result.get("success"), result.get("paused"),
)
except Exception as e:
logger.exception("WorkflowScheduler: failed to execute workflow %s: %s", workflowId, e)
return handler
def _delayedSync(self) -> None:
"""Delayed sync (5s) in case DB was not ready at startup."""
import threading
eventUser = self._eventUser
def _run():
import time
time.sleep(5)
try:
self._syncScheduledWorkflows()
logger.info("WorkflowScheduler: delayed sync done")
except Exception as e:
logger.warning("WorkflowScheduler: delayed sync failed: %s", e)
t = threading.Thread(target=_run, daemon=True)
t.start()
def _onWorkflowChanged(self, _context=None) -> None:
"""Callback when a workflow is created/updated/deleted."""
try:
self._syncScheduledWorkflows()
logger.debug("WorkflowScheduler: re-synced after workflow change")
except Exception as e:
logger.warning("WorkflowScheduler: re-sync failed: %s", e)
def _removeAllJobs(self) -> None:
"""Remove all registered workflow schedule jobs."""
if not eventManager.scheduler:
return
for job in list(eventManager.scheduler.get_jobs()):
jid = job.id if hasattr(job, "id") else str(job)
if jid.startswith(JOB_ID_PREFIX):
try:
eventManager.remove(jid)
except Exception as e:
logger.debug("Could not remove job %s: %s", jid, e)
def _cronToIntervalSeconds(cron: str):
"""If cron represents a simple interval, return seconds. Otherwise None."""
if not cron or not isinstance(cron, str):
return None
parts = cron.strip().split()
if len(parts) == 5:
minute, hour, day, month, dow = parts
second = "0"
elif len(parts) == 6:
second, minute, hour, day, month, dow = parts
else:
return None
if minute.startswith("*/") and hour == "*" and day == "*" and month == "*" and dow == "*":
n = int(minute[2:]) if minute[2:].isdigit() else 0
if n > 0:
return n * 60
if minute == "*" and hour == "*" and day == "*" and month == "*" and dow == "*" and second == "0":
return 60
if minute == "0" and hour.startswith("*/") and day == "*" and month == "*" and dow == "*":
n = int(hour[2:]) if hour[2:].isdigit() else 0
if n > 0:
return n * 3600
if len(parts) == 6 and second.startswith("*/") and minute == "*" and hour == "*" and day == "*" and month == "*" and dow in ("*", "?"):
n = int(second[2:]) if second[2:].isdigit() else 0
if n > 0:
return n
return None
def _notifyRunFailed(workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None) -> None:
"""Notify on workflow run failure: emit event, create in-app notification, trigger email subscription."""
try:
eventManager.emit("graphicalEditor.run.failed", {
"workflowId": workflowId,
"runId": runId,
"error": error,
"mandateId": mandateId,
})
logger.info("Emitted run.failed event for run %s (workflow %s)", runId, workflowId)
except Exception as e:
logger.warning("Failed to emit run.failed event: %s", e)
_createRunFailedNotification(workflowId, runId, error, mandateId, workflowLabel)
_triggerRunFailedSubscription(workflowId, runId, error, mandateId, workflowLabel)
def _createRunFailedNotification(
workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None
) -> None:
"""Create in-app notification for the workflow creator."""
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelNotification import UserNotification, NotificationType, NotificationStatus
rootInterface = getRootInterface()
if not rootInterface:
return
from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
eventUser = rootInterface.getUserByUsername("event")
if not eventUser:
return
iface = getGraphicalEditorInterface(eventUser, mandateId or "", "")
wf = iface.getWorkflow(workflowId)
if not wf:
return
creatorId = wf.get("sysCreatedBy") if isinstance(wf, dict) else getattr(wf, "sysCreatedBy", None)
if not creatorId:
return
label = workflowLabel or (wf.get("label") if isinstance(wf, dict) else getattr(wf, "label", ""))
notification = UserNotification(
userId=creatorId,
type=NotificationType.SYSTEM,
status=NotificationStatus.UNREAD,
title="Workflow fehlgeschlagen",
message=f"Workflow '{label or workflowId}' ist fehlgeschlagen: {error[:200]}",
referenceType="AutoRun",
referenceId=runId,
icon="alert-triangle",
)
rootInterface.db.recordCreate(
model_class=UserNotification,
record=notification.model_dump(),
)
logger.info("Created in-app notification for user %s (run %s)", creatorId, runId)
except Exception as e:
logger.warning("Failed to create in-app run.failed notification: %s", e)
def _triggerRunFailedSubscription(
workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None
) -> None:
"""Trigger the messaging subscription for run failures (email notifications)."""
try:
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelMessaging import MessagingEventParameters
rootInterface = getRootInterface()
if not rootInterface:
return
eventUser = rootInterface.getUserByUsername("event")
if not eventUser:
return
ctx = ServiceCenterContext(
user=eventUser,
mandate_id=mandateId or "",
feature_instance_id="",
feature_code="graphicalEditor",
)
messagingService = getService("messaging", ctx)
subscriptionId = "GraphicalEditorRunFailed"
eventParams = MessagingEventParameters(triggerData={
"workflowId": workflowId,
"workflowLabel": workflowLabel or workflowId,
"runId": runId,
"error": error,
"mandateId": mandateId or "",
})
result = messagingService.executeSubscription(subscriptionId, eventParams)
logger.info(
"Triggered run.failed subscription: sent=%d success=%s",
result.messagesSent, result.success,
)
except FileNotFoundError:
logger.debug("Subscription function GraphicalEditorRunFailed not found (not yet registered)")
except ValueError as e:
logger.debug("Subscription GraphicalEditorRunFailed: %s", e)
except Exception as e:
logger.warning("Failed to trigger run.failed subscription: %s", e)
# Module-level singleton
_scheduler = WorkflowScheduler()
def start(eventUser) -> bool:
"""Start the consolidated workflow scheduler."""
return _scheduler.start(eventUser)
def stop() -> bool:
"""Stop the consolidated workflow scheduler."""
return _scheduler.stop()
def setMainLoop(loop) -> None:
"""Set the main event loop for thread-bridge."""
_setMainLoop(loop)

View file

@ -39,9 +39,6 @@ AUTH_DIR = GATEWAY_DIR / "modules" / "auth"
# Value: set of function names that must remain async def
_MUST_STAY_ASYNC: Dict[str, Set[str]] = {
# --- routes/ ---
"modules/routes/routeAdminAutomationEvents.py": {
"sync_all_automation_events", # await syncAutomationEvents(...)
},
"modules/routes/routeAdminRbacExport.py": {
"import_global_rbac", # await file.read()
"import_mandate_rbac", # await file.read()
@ -68,9 +65,6 @@ _MUST_STAY_ASYNC: Dict[str, Set[str]] = {
"refresh_token", # await request.json()
},
# --- features/ ---
"modules/features/automation/routeFeatureAutomation.py": {
"execute_automation_route", # await executeAutomation(...)
},
"modules/features/chatbot/routeFeatureChatbot.py": {
"stream_chatbot_start", # await chatProcess(...), contains async event_stream generator
"event_stream", # await request.is_disconnected(), await asyncio.wait_for(...)

View file

@ -102,8 +102,6 @@ except Exception as e:
# Alle PowerOn Datenbanken (für Export / Migration-Skripte)
ALL_DATABASES = [
"poweron_app",
"poweron_automation",
"poweron_automation2",
"poweron_billing",
"poweron_chat",
"poweron_chatbot",
@ -128,8 +126,6 @@ DATABASE_CONFIG = {
"poweron_management": "DB_MANAGEMENT",
"poweron_realestate": "DB_REALESTATE",
"poweron_trustee": "DB_TRUSTEE",
"poweron_automation": "DB",
"poweron_automation2": "DB",
"poweron_billing": "DB",
"poweron_commcoach": "DB",
"poweron_knowledge": "DB",

View file

@ -1,369 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Workflow Test with Documents - Tests chat workflow execution with uploaded documents
Simulates the UI route flow: upload files, start workflow with prompt and documents
"""
import asyncio
import json
import sys
import os
import time
from typing import Dict, Any, List, Optional
# Add the gateway to path (go up 2 levels from tests/functional/)
_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if _gateway_path not in sys.path:
sys.path.insert(0, _gateway_path)
# Import the service initialization
from modules.serviceHub import getInterface as getServices
from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
from modules.datamodels.datamodelUam import User
from modules.workflows.automation import chatStart
import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
class WorkflowWithDocumentsTester:
def __init__(self):
# Use root user for testing (has full access to everything)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelUam import Mandate
rootInterface = getRootInterface()
self.testUser = rootInterface.currentUser
# Get initial mandate ID for testing (User has no mandateId - use initial mandate)
self.testMandateId = rootInterface.getInitialId(Mandate)
# Initialize services using the existing system
self.services = getServices(self.testUser, None) # Test user, no workflow
self.workflow = None
self.testResults = {}
async def initialize(self):
"""Initialize the test environment."""
# Set logging level to INFO to see workflow progress
import logging
logging.getLogger().setLevel(logging.INFO)
print(f"Initialized test with user: {self.testUser.id}")
print(f"Test Mandate ID: {self.testMandateId}")
def createCsvTemplate(self) -> str:
"""Create a CSV template file for prime numbers."""
csvContent = """Primzahl,Index
2,1
3,2
5,3
7,4
11,5
13,6
17,7
19,8
23,9
29,10
"""
return csvContent
def createSecondDocument(self) -> str:
"""Create a second text document with instructions."""
docContent = """Anweisungen zur Primzahlgenerierung:
1. Generiere Primzahlen
2. Formatiere sie in einer Tabelle mit 10 Spalten pro Zeile
3. Verwende das bereitgestellte CSV-Vorlagenformat
4. Stelle sicher, dass alle Zahlen korrekt formatiert sind
5. Füge eine Index-Spalte hinzu, die bei 1 beginnt
"""
return docContent
async def uploadFiles(self) -> List[str]:
"""Upload test files to the filesystem and return their file IDs."""
print("\n" + "="*60)
print("UPLOADING TEST FILES")
print("="*60)
fileIds = []
# Create CSV template file
csvContent = self.createCsvTemplate()
csvFileName = "prime_numbers_template.csv"
print(f"Creating CSV template: {csvFileName}")
print(f"Content length: {len(csvContent)} bytes")
# Create file in component storage
csvFileItem = self.services.interfaceDbComponent.createFile(
name=csvFileName,
mimeType="text/csv",
content=csvContent.encode('utf-8')
)
# Persist file data
self.services.interfaceDbComponent.createFileData(csvFileItem.id, csvContent.encode('utf-8'))
fileIds.append(csvFileItem.id)
print(f"✅ Created CSV file with ID: {csvFileItem.id}")
print(f" File name: {csvFileItem.fileName}")
print(f" MIME type: {csvFileItem.mimeType}")
# Create second text document
docContent = self.createSecondDocument()
docFileName = "prime_numbers_instructions.txt"
print(f"\nCreating instruction document: {docFileName}")
print(f"Content length: {len(docContent)} bytes")
# Create file in component storage
docFileItem = self.services.interfaceDbComponent.createFile(
name=docFileName,
mimeType="text/plain",
content=docContent.encode('utf-8')
)
# Persist file data
self.services.interfaceDbComponent.createFileData(docFileItem.id, docContent.encode('utf-8'))
fileIds.append(docFileItem.id)
print(f"✅ Created instruction file with ID: {docFileItem.id}")
print(f" File name: {docFileItem.fileName}")
print(f" MIME type: {docFileItem.mimeType}")
return fileIds
async def startWorkflow(self, prompt: str, fileIds: List[str]) -> None:
"""Start a chat workflow with prompt and documents."""
print("\n" + "="*60)
print("STARTING WORKFLOW")
print("="*60)
print(f"Prompt: {prompt}")
print(f"Number of files: {len(fileIds)}")
print(f"File IDs: {fileIds}")
# Create UserInputRequest
userInput = UserInputRequest(
prompt=prompt,
listFileId=fileIds,
userLanguage="en"
)
# Start workflow (this is async and returns immediately)
print("\nCalling chatStart...")
self.workflow = await chatStart(
currentUser=self.testUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
workflowId=None
)
print(f"✅ Workflow started with ID: {self.workflow.id}")
print(f" Status: {self.workflow.status}")
print(f" Mode: {self.workflow.workflowMode}")
print(f" Current Round: {self.workflow.currentRound}")
async def waitForWorkflowCompletion(self, maxWaitTime: Optional[int] = None) -> bool:
"""Wait for workflow to complete, checking status periodically.
Args:
maxWaitTime: Maximum wait time in seconds. If None, wait indefinitely.
"""
print("\n" + "="*60)
print("WAITING FOR WORKFLOW COMPLETION")
if maxWaitTime:
print(f"Maximum wait time: {maxWaitTime} seconds")
else:
print("Waiting indefinitely (no timeout)")
print("="*60)
if not self.workflow:
print("❌ No workflow to wait for")
return False
startTime = time.time()
checkInterval = 2 # Check every 2 seconds
lastStatus = None
while True:
# Check timeout if maxWaitTime is set
if maxWaitTime is not None:
elapsed = time.time() - startTime
if elapsed >= maxWaitTime:
print(f"\n⚠️ Workflow did not complete within {maxWaitTime} seconds")
print(f" Final status: {self.workflow.status}")
return False
# Get current workflow status
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not currentWorkflow:
print("❌ Workflow not found in database")
return False
currentStatus = currentWorkflow.status
elapsed = int(time.time() - startTime)
# Print status if it changed
if currentStatus != lastStatus:
print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
lastStatus = currentStatus
# Check if workflow is complete
if currentStatus in ["completed", "stopped", "failed"]:
self.workflow = currentWorkflow
print(f"\n✅ Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
return currentStatus == "completed"
# Wait before next check
await asyncio.sleep(checkInterval)
def analyzeWorkflowResults(self) -> Dict[str, Any]:
"""Analyze workflow results and extract information."""
print("\n" + "="*60)
print("ANALYZING WORKFLOW RESULTS")
print("="*60)
if not self.workflow:
return {"error": "No workflow to analyze"}
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
workflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not workflow:
return {"error": "Workflow not found"}
# Get unified chat data
chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
# Count messages
messages = chatData.get("messages", [])
userMessages = [m for m in messages if m.get("role") == "user"]
assistantMessages = [m for m in messages if m.get("role") == "assistant"]
# Count documents
documents = chatData.get("documents", [])
# Get logs
logs = chatData.get("logs", [])
# Get stats
stats = chatData.get("stats", [])
results = {
"workflowId": workflow.id,
"status": workflow.status,
"workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
"currentRound": workflow.currentRound,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions,
"messageCount": len(messages),
"userMessageCount": len(userMessages),
"assistantMessageCount": len(assistantMessages),
"documentCount": len(documents),
"logCount": len(logs),
"statCount": len(stats),
"messages": messages,
"documents": documents,
"logs": logs,
"stats": stats
}
print(f"Workflow ID: {results['workflowId']}")
print(f"Status: {results['status']}")
print(f"Mode: {results['workflowMode']}")
print(f"Round: {results['currentRound']}")
print(f"Tasks: {results['totalTasks']}")
print(f"Actions: {results['totalActions']}")
print(f"Messages: {results['messageCount']} (User: {results['userMessageCount']}, Assistant: {results['assistantMessageCount']})")
print(f"Documents: {results['documentCount']}")
print(f"Logs: {results['logCount']}")
print(f"Stats: {results['statCount']}")
# Print first user message
if userMessages:
print(f"\nFirst user message:")
print(f" {userMessages[0].get('message', '')[:200]}...")
# Print last assistant message
if assistantMessages:
print(f"\nLast assistant message:")
lastMsg = assistantMessages[-1]
print(f" {lastMsg.get('message', '')[:200]}...")
if lastMsg.get('documents'):
print(f" Documents attached: {len(lastMsg['documents'])}")
# Print document names
if documents:
print(f"\nGenerated documents:")
for doc in documents:
print(f" - {doc.get('fileName', 'unknown')} ({doc.get('fileSize', 0)} bytes)")
return results
async def runTest(self):
"""Run the complete test."""
print("\n" + "="*80)
print("WORKFLOW TEST WITH DOCUMENTS")
print("="*80)
try:
# Initialize
await self.initialize()
# Upload files
fileIds = await self.uploadFiles()
# Start workflow with prompt and files
prompt = "Generiere die ersten 4000 Primzahlen in einer Tabelle mit 10 Spalten pro Zeile."
await self.startWorkflow(prompt, fileIds)
# Wait for completion (no timeout - wait indefinitely)
completed = await self.waitForWorkflowCompletion()
# Analyze results
results = self.analyzeWorkflowResults()
self.testResults = {
"completed": completed,
"results": results
}
print("\n" + "="*80)
print("TEST SUMMARY")
print("="*80)
print(f"Workflow completed: {'' if completed else ''}")
print(f"Status: {results.get('status', 'unknown')}")
print(f"Messages: {results.get('messageCount', 0)}")
print(f"Documents: {results.get('documentCount', 0)}")
return self.testResults
except Exception as e:
import traceback
print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
print(f"Traceback:\n{traceback.format_exc()}")
self.testResults = {
"completed": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return self.testResults
async def main():
"""Run workflow test with documents."""
tester = WorkflowWithDocumentsTester()
results = await tester.runTest()
# Print final results as JSON for easy parsing
print("\n" + "="*80)
print("FINAL RESULTS (JSON)")
print("="*80)
print(json.dumps(results, indent=2, default=str))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,471 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Workflow Test with Prompt Variations - Tests different workflow scenarios:
1. Simple prompt for short answer (no documents)
2. Merge 2 documents and output as Word document
3. Structured data output as Excel file
"""
import asyncio
import json
import sys
import os
import time
from typing import Dict, Any, List, Optional
# Add the gateway to path (go up 2 levels from tests/functional/)
_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if _gateway_path not in sys.path:
sys.path.insert(0, _gateway_path)
# Import the service initialization
from modules.serviceHub import getInterface as getServices
from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
from modules.datamodels.datamodelUam import User
from modules.workflows.automation import chatStart
import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
class WorkflowPromptVariationsTester:
def __init__(self):
# Use root user for testing (has full access to everything)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelUam import Mandate
rootInterface = getRootInterface()
self.testUser = rootInterface.currentUser
# Get initial mandate ID for testing (User has no mandateId - use initial mandate)
self.testMandateId = rootInterface.getInitialId(Mandate)
# Initialize services using the existing system
self.services = getServices(self.testUser, None) # Test user, no workflow
self.testResults = {}
async def initialize(self):
"""Initialize the test environment."""
# Set logging level to INFO to see workflow progress
import logging
logging.getLogger().setLevel(logging.INFO)
print(f"Initialized test with user: {self.testUser.id}")
print(f"Test Mandate ID: {self.testMandateId}")
def _createFile(self, fileName: str, mimeType: str, content: str) -> str:
"""Helper method to create a file and return its ID."""
fileItem = self.services.interfaceDbComponent.createFile(
name=fileName,
mimeType=mimeType,
content=content.encode('utf-8')
)
self.services.interfaceDbComponent.createFileData(fileItem.id, content.encode('utf-8'))
return fileItem.id
async def _startWorkflow(self, prompt: str, fileIds: List[str] = None) -> Any:
"""Start a chat workflow with prompt and optional documents."""
if fileIds is None:
fileIds = []
print(f"\nPrompt: {prompt}")
print(f"Number of files: {len(fileIds)}")
if fileIds:
print(f"File IDs: {fileIds}")
# Create UserInputRequest
userInput = UserInputRequest(
prompt=prompt,
listFileId=fileIds,
userLanguage="en"
)
# Start workflow (this is async and returns immediately)
workflow = await chatStart(
currentUser=self.testUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
workflowId=None
)
print(f"✅ Workflow started with ID: {workflow.id}")
print(f" Status: {workflow.status}")
print(f" Mode: {workflow.workflowMode}")
return workflow
async def _waitForWorkflowCompletion(self, workflow: Any, maxWaitTime: Optional[int] = None) -> bool:
"""Wait for workflow to complete, checking status periodically.
Args:
workflow: The workflow object to wait for
maxWaitTime: Maximum wait time in seconds. If None, wait indefinitely.
"""
if maxWaitTime:
print(f"Maximum wait time: {maxWaitTime} seconds")
else:
print("Waiting indefinitely (no timeout)")
startTime = time.time()
checkInterval = 2 # Check every 2 seconds
lastStatus = None
while True:
# Check timeout if maxWaitTime is set
if maxWaitTime is not None:
elapsed = time.time() - startTime
if elapsed >= maxWaitTime:
print(f"\n⚠️ Workflow did not complete within {maxWaitTime} seconds")
print(f" Final status: {workflow.status}")
return False
# Get current workflow status
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
currentWorkflow = interfaceDbChat.getWorkflow(workflow.id)
if not currentWorkflow:
print("❌ Workflow not found in database")
return False
currentStatus = currentWorkflow.status
elapsed = int(time.time() - startTime)
# Print status if it changed
if currentStatus != lastStatus:
print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
lastStatus = currentStatus
# Check if workflow is complete
if currentStatus in ["completed", "stopped", "failed"]:
print(f"\n✅ Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
return currentStatus == "completed"
# Wait before next check
await asyncio.sleep(checkInterval)
def _analyzeWorkflowResults(self, workflow: Any) -> Dict[str, Any]:
"""Analyze workflow results and extract information."""
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
workflow = interfaceDbChat.getWorkflow(workflow.id)
if not workflow:
return {"error": "Workflow not found"}
# Get unified chat data
chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
# Extract messages and documents from items
items = chatData.get("items", [])
messages = []
allDocuments = []
for item in items:
if item.get("type") == "message":
message = item.get("item")
if message:
# Convert ChatMessage to dict if needed
if hasattr(message, 'dict'):
msgDict = message.dict()
elif hasattr(message, '__dict__'):
msgDict = message.__dict__
else:
msgDict = message if isinstance(message, dict) else {}
messages.append(msgDict)
# Extract documents from message
msgDocuments = msgDict.get("documents", [])
if msgDocuments:
for doc in msgDocuments:
# Convert ChatDocument to dict if needed
if hasattr(doc, 'dict'):
docDict = doc.dict()
elif hasattr(doc, '__dict__'):
docDict = doc.__dict__
else:
docDict = doc if isinstance(doc, dict) else {}
# Only add if not already in list (avoid duplicates)
docId = docDict.get("id") or docDict.get("fileId")
if docId and not any(d.get("id") == docId or d.get("fileId") == docId for d in allDocuments):
allDocuments.append(docDict)
userMessages = [m for m in messages if m.get("role") == "user"]
assistantMessages = [m for m in messages if m.get("role") == "assistant"]
results = {
"workflowId": workflow.id,
"status": workflow.status,
"workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
"currentRound": workflow.currentRound,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions,
"messageCount": len(messages),
"userMessageCount": len(userMessages),
"assistantMessageCount": len(assistantMessages),
"documentCount": len(allDocuments),
"documents": allDocuments
}
print(f" Workflow ID: {results['workflowId']}")
print(f" Status: {results['status']}")
print(f" Messages: {results['messageCount']} (User: {results['userMessageCount']}, Assistant: {results['assistantMessageCount']})")
print(f" Documents: {results['documentCount']}")
# Print document names
if allDocuments:
print(f" Generated documents:")
for doc in allDocuments:
fileName = doc.get("fileName") or doc.get("documentName") or "unknown"
fileSize = doc.get("fileSize") or doc.get("size") or 0
print(f" - {fileName} ({fileSize} bytes)")
return results
async def testSimplePrompt(self) -> Dict[str, Any]:
"""Test 1: Simple prompt for a short answer (no documents)."""
print("\n" + "="*80)
print("TEST 1: SIMPLE PROMPT FOR SHORT ANSWER")
print("="*80)
try:
prompt = "What is the capital of France? Answer in one sentence."
workflow = await self._startWorkflow(prompt, [])
completed = await self._waitForWorkflowCompletion(workflow, maxWaitTime=120)
results = self._analyzeWorkflowResults(workflow)
return {
"testName": "Simple Prompt",
"completed": completed,
"results": results
}
except Exception as e:
import traceback
print(f"❌ Test failed: {type(e).__name__}: {str(e)}")
return {
"testName": "Simple Prompt",
"completed": False,
"error": str(e),
"traceback": traceback.format_exc()
}
async def testMergeDocumentsToWord(self) -> Dict[str, Any]:
"""Test 2: Merge 2 documents and output as Word document."""
print("\n" + "="*80)
print("TEST 2: MERGE 2 DOCUMENTS AND OUTPUT AS WORD")
print("="*80)
try:
# Create first document
doc1Content = """Project Overview
This document outlines the key objectives for our new software project.
The project aims to develop a modern web application with the following features:
- User authentication and authorization
- Real-time data synchronization
- Responsive design for mobile and desktop
- Integration with third-party APIs
Timeline: 6 months
Budget: $500,000
"""
# Create second document
doc2Content = """Technical Specifications
Architecture:
- Frontend: React with TypeScript
- Backend: Python with FastAPI
- Database: PostgreSQL
- Deployment: Docker containers on AWS
Key Requirements:
- Support for 10,000 concurrent users
- 99.9% uptime SLA
- End-to-end encryption for sensitive data
- Comprehensive logging and monitoring
Team Size: 8 developers, 2 designers, 1 project manager
"""
print("\nCreating documents to merge...")
doc1Id = self._createFile("project_overview.txt", "text/plain", doc1Content)
print(f"✅ Created document 1 with ID: {doc1Id}")
doc2Id = self._createFile("technical_specs.txt", "text/plain", doc2Content)
print(f"✅ Created document 2 with ID: {doc2Id}")
prompt = "Merge these two documents into a single comprehensive Word document. Include both the project overview and technical specifications in a well-formatted document with proper headings and sections."
workflow = await self._startWorkflow(prompt, [doc1Id, doc2Id])
completed = await self._waitForWorkflowCompletion(workflow, maxWaitTime=300)
results = self._analyzeWorkflowResults(workflow)
# Check if Word document was created
wordDocFound = False
if results.get("documents"):
for doc in results["documents"]:
fileName = doc.get("fileName", "").lower()
if fileName.endswith(".docx") or fileName.endswith(".doc"):
wordDocFound = True
print(f" ✅ Word document found: {doc.get('fileName')}")
if not wordDocFound:
print(" ⚠️ Warning: No Word document (.docx or .doc) found in results")
return {
"testName": "Merge Documents to Word",
"completed": completed,
"wordDocumentFound": wordDocFound,
"results": results
}
except Exception as e:
import traceback
print(f"❌ Test failed: {type(e).__name__}: {str(e)}")
return {
"testName": "Merge Documents to Word",
"completed": False,
"error": str(e),
"traceback": traceback.format_exc()
}
async def testStructuredDataToExcel(self) -> Dict[str, Any]:
"""Test 3: Structured data output as Excel file."""
print("\n" + "="*80)
print("TEST 3: STRUCTURED DATA OUTPUT AS EXCEL")
print("="*80)
try:
# Create structured data as JSON
structuredData = {
"employees": [
{"id": 1, "name": "John Doe", "department": "Engineering", "salary": 95000, "startDate": "2020-01-15"},
{"id": 2, "name": "Jane Smith", "department": "Marketing", "salary": 85000, "startDate": "2019-03-20"},
{"id": 3, "name": "Bob Johnson", "department": "Engineering", "salary": 100000, "startDate": "2018-06-10"},
{"id": 4, "name": "Alice Williams", "department": "HR", "salary": 75000, "startDate": "2021-09-05"},
{"id": 5, "name": "Charlie Brown", "department": "Sales", "salary": 80000, "startDate": "2020-11-12"},
{"id": 6, "name": "Diana Prince", "department": "Engineering", "salary": 110000, "startDate": "2017-04-22"},
{"id": 7, "name": "Edward Norton", "department": "Marketing", "salary": 90000, "startDate": "2019-08-30"},
{"id": 8, "name": "Fiona Green", "department": "HR", "salary": 78000, "startDate": "2022-01-18"}
],
"departments": [
{"name": "Engineering", "budget": 500000, "headCount": 3},
{"name": "Marketing", "budget": 300000, "headCount": 2},
{"name": "HR", "budget": 200000, "headCount": 2},
{"name": "Sales", "budget": 250000, "headCount": 1}
]
}
jsonContent = json.dumps(structuredData, indent=2)
print("\nCreating structured data file...")
dataFileId = self._createFile("employee_data.json", "application/json", jsonContent)
print(f"✅ Created data file with ID: {dataFileId}")
prompt = "Create an Excel file from this structured data. Include two sheets: one for employees with all their details, and one for departments with summary information. Format the data nicely with proper column headers and make it easy to read."
workflow = await self._startWorkflow(prompt, [dataFileId])
completed = await self._waitForWorkflowCompletion(workflow, maxWaitTime=300)
results = self._analyzeWorkflowResults(workflow)
# Check if Excel document was created
excelDocFound = False
if results.get("documents"):
for doc in results["documents"]:
fileName = doc.get("fileName", "").lower()
if fileName.endswith(".xlsx") or fileName.endswith(".xls"):
excelDocFound = True
print(f" ✅ Excel document found: {doc.get('fileName')}")
if not excelDocFound:
print(" ⚠️ Warning: No Excel document (.xlsx or .xls) found in results")
return {
"testName": "Structured Data to Excel",
"completed": completed,
"excelDocumentFound": excelDocFound,
"results": results
}
except Exception as e:
import traceback
print(f"❌ Test failed: {type(e).__name__}: {str(e)}")
return {
"testName": "Structured Data to Excel",
"completed": False,
"error": str(e),
"traceback": traceback.format_exc()
}
async def runAllTests(self):
"""Run all three test cases."""
print("\n" + "="*80)
print("WORKFLOW PROMPT VARIATIONS TEST SUITE")
print("="*80)
try:
# Initialize
await self.initialize()
# Run all tests
test1Results = await self.testSimplePrompt()
test2Results = await self.testMergeDocumentsToWord()
test3Results = await self.testStructuredDataToExcel()
self.testResults = {
"test1": test1Results,
"test2": test2Results,
"test3": test3Results,
"summary": {
"totalTests": 3,
"passedTests": sum([
1 if test1Results.get("completed") else 0,
1 if test2Results.get("completed") else 0,
1 if test3Results.get("completed") else 0
]),
"failedTests": sum([
1 if not test1Results.get("completed") else 0,
1 if not test2Results.get("completed") else 0,
1 if not test3Results.get("completed") else 0
])
}
}
print("\n" + "="*80)
print("TEST SUITE SUMMARY")
print("="*80)
print(f"Test 1 - Simple Prompt: {'✅ PASSED' if test1Results.get('completed') else '❌ FAILED'}")
print(f"Test 2 - Merge to Word: {'✅ PASSED' if test2Results.get('completed') else '❌ FAILED'}")
if test2Results.get('wordDocumentFound'):
print(f" Word document created: ✅")
print(f"Test 3 - Data to Excel: {'✅ PASSED' if test3Results.get('completed') else '❌ FAILED'}")
if test3Results.get('excelDocumentFound'):
print(f" Excel document created: ✅")
print(f"\nTotal: {self.testResults['summary']['passedTests']}/{self.testResults['summary']['totalTests']} tests passed")
return self.testResults
except Exception as e:
import traceback
print(f"\n❌ Test suite failed with error: {type(e).__name__}: {str(e)}")
print(f"Traceback:\n{traceback.format_exc()}")
self.testResults = {
"error": str(e),
"traceback": traceback.format_exc()
}
return self.testResults
async def main():
"""Run workflow prompt variations test suite."""
tester = WorkflowPromptVariationsTester()
results = await tester.runAllTests()
# Print final results as JSON for easy parsing
print("\n" + "="*80)
print("FINAL RESULTS (JSON)")
print("="*80)
print(json.dumps(results, indent=2, default=str))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,735 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Document Generation Formats Test - Tests document generation in all supported formats
Tests HTML, PDF, DOCX, XLSX, and PPTX generation with images and various content types.
"""
import asyncio
import json
import sys
import os
import time
import base64
from typing import Dict, Any, List, Optional
# Add the gateway to path (go up 2 levels from tests/functional/)
_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if _gateway_path not in sys.path:
sys.path.insert(0, _gateway_path)
# Import the service initialization
from modules.serviceHub import getInterface as getServices
from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
from modules.datamodels.datamodelUam import User
from modules.workflows.automation import chatStart
import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
class DocumentGenerationFormatsTester:
def __init__(self):
# Use root user for testing (has full access to everything)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelUam import Mandate
rootInterface = getRootInterface()
self.testUser = rootInterface.currentUser
# Get initial mandate ID for testing (User has no mandateId - use initial mandate)
self.testMandateId = rootInterface.getInitialId(Mandate)
# Initialize services using the existing system
self.services = getServices(self.testUser, None) # Test user, no workflow
self.workflow = None
self.testResults = {}
self.generatedDocuments = {}
self.pdfFileId = None # Store PDF file ID for reuse
async def initialize(self):
"""Initialize the test environment."""
# Enable debug file logging for tests
from modules.shared.configuration import APP_CONFIG
APP_CONFIG.set("APP_DEBUG_CHAT_WORKFLOW_ENABLED", True)
# Set logging level to INFO to see workflow progress
import logging
logging.getLogger().setLevel(logging.INFO)
print(f"Initialized test with user: {self.testUser.id}")
print(f"Test Mandate ID: {self.testMandateId}")
print(f"Debug logging enabled: {APP_CONFIG.get('APP_DEBUG_CHAT_WORKFLOW_ENABLED', False)}")
# Upload PDF file for testing
await self.uploadPdfFile()
async def uploadPdfFile(self):
"""Upload the PDF file and store its file ID."""
pdfPath = os.path.join(os.path.dirname(__file__), "..", "..", "..", "local", "temp", "B2025-02c.pdf")
pdfPath = os.path.abspath(pdfPath)
if not os.path.exists(pdfPath):
print(f"⚠️ Warning: PDF file not found at {pdfPath}")
print(" Test will continue without PDF attachment")
return
try:
# Read PDF file
with open(pdfPath, "rb") as f:
pdfContent = f.read()
# Create file using services.interfaceDbComponent
if not hasattr(self.services, 'interfaceDbComponent') or not self.services.interfaceDbComponent:
print("⚠️ Warning: interfaceDbComponent not available in services")
print(" Test will continue without PDF attachment")
return
interfaceDbComponent = self.services.interfaceDbComponent
fileItem = interfaceDbComponent.createFile(
name="B2025-02c.pdf",
mimeType="application/pdf",
content=pdfContent
)
# Store file data
interfaceDbComponent.createFileData(fileItem.id, pdfContent)
self.pdfFileId = fileItem.id
print(f"✅ Uploaded PDF file: {fileItem.fileName} (ID: {self.pdfFileId}, Size: {len(pdfContent)} bytes)")
except Exception as e:
import traceback
print(f"⚠️ Warning: Failed to upload PDF file: {str(e)}")
print(f" Traceback: {traceback.format_exc()}")
print(" Test will continue without PDF attachment")
def createTestPrompt(self, format: str) -> str:
"""Create a unified test prompt for document generation in the specified format.
The prompt requests:
- Extraction of images from the attached PDF
- Generation of a new image
- Document creation with both images
"""
basePrompt = (
"Create a professional document about 'Fuel Station Receipt Analysis' with the following content:\n"
"1) A main title\n"
"2) An introduction paragraph explaining the receipt analysis\n"
"3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
"4) A section analyzing the receipt data with bullet points\n"
"5) Generate a new image showing a visual representation of fuel consumption trends\n"
"6) A conclusion paragraph with recommendations\n\n"
"Make sure to include both: the image extracted from the PDF and the newly generated image.\n"
f"Format the output as {format.upper()}."
)
return basePrompt
def createRefactoringTestPrompt(self, testType: str, format: str = "html") -> str:
"""Create test prompts for specific refactoring features.
Args:
testType: Type of refactoring test:
- "intent_analysis": Test DocumentIntent analysis
- "conditional_extraction": Test conditional extraction (extract vs render)
- "image_render": Test image rendering as asset
- "multi_document": Test multi-document rendering
- "metadata_preservation": Test metadata preservation
format: Output format (default: html)
"""
prompts = {
"intent_analysis": (
"Create a document with the following requirements:\n"
"1) Extract text content from the attached PDF\n"
"2) Include images from the PDF as visual elements (render them, don't extract text from them)\n"
"3) Generate a summary document\n\n"
"This tests that the system correctly identifies which documents need extraction vs rendering."
),
"conditional_extraction": (
"Create a document that:\n"
"1) Extracts and uses text from the attached PDF\n"
"2) Renders images from the PDF as visual assets (not as extracted text)\n"
"3) Generates new content based on the extracted text\n\n"
"This tests conditional extraction - only extract what needs extraction, render what needs rendering."
),
"image_render": (
"Create a document that includes images from the attached PDF.\n"
"The images should be rendered as visual elements in the document, not extracted as text.\n"
"Include a title and description for each image.\n\n"
"This tests the image asset pipeline with render intent."
),
"multi_document": (
"Create multiple separate documents:\n"
"1) Document 1: Summary of the PDF content\n"
"2) Document 2: Analysis of the PDF content\n"
"3) Document 3: Recommendations based on the PDF content\n\n"
"Each document should be separate and complete.\n"
"This tests multi-document generation and rendering."
),
"metadata_preservation": (
"Create a document that extracts content from the attached PDF.\n"
"The document should clearly show which content came from which source document.\n"
"Include source references in the generated content.\n\n"
"This tests that metadata (documentId, mimeType) is preserved in the generation prompt."
)
}
prompt = prompts.get(testType, self.createTestPrompt(format))
return f"{prompt}\n\nFormat the output as {format.upper()}."
async def generateDocumentInFormat(self, format: str) -> Dict[str, Any]:
"""Generate a document in the specified format using workflow."""
print("\n" + "="*80)
print(f"GENERATING DOCUMENT IN {format.upper()} FORMAT")
print("="*80)
prompt = self.createTestPrompt(format)
print(f"Prompt: {prompt[:200]}...")
# Create user input request with PDF file attachment
listFileId = []
if self.pdfFileId:
listFileId = [self.pdfFileId]
print(f"Attaching PDF file (ID: {self.pdfFileId})")
else:
print("⚠️ No PDF file attached (file upload may have failed)")
# Create user input request
userInput = UserInputRequest(
prompt=prompt,
listFileId=listFileId,
userLanguage="en"
)
# Start workflow
print(f"\nStarting workflow for {format.upper()} generation...")
workflow = await chatStart(
currentUser=self.testUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
workflowId=None
)
if not workflow:
return {
"success": False,
"error": "Failed to start workflow"
}
self.workflow = workflow
print(f"Workflow started: {workflow.id}")
# Wait for workflow completion (no timeout - wait indefinitely)
print(f"Waiting for workflow completion...")
completed = await self.waitForWorkflowCompletion(timeout=None)
if not completed:
return {
"success": False,
"error": "Workflow did not complete",
"workflowId": workflow.id,
"status": workflow.status if workflow else "unknown"
}
# Analyze results
results = self.analyzeWorkflowResults()
# Extract documents for this format
documents = results.get("documents", [])
formatDocuments = [d for d in documents if d.get("fileName", "").endswith(f".{format.lower()}")]
return {
"success": True,
"format": format,
"workflowId": workflow.id,
"status": results.get("status"),
"documentCount": len(formatDocuments),
"documents": formatDocuments,
"results": results
}
async def waitForWorkflowCompletion(self, timeout: Optional[int] = None, checkInterval: int = 2) -> bool:
"""Wait for workflow to complete."""
if not self.workflow:
return False
startTime = time.time()
lastStatus = None
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
if timeout is None:
print("Waiting indefinitely (no timeout)")
while True:
# Check timeout only if specified
if timeout is not None and time.time() - startTime > timeout:
print(f"\n⏱️ Timeout after {timeout} seconds")
return False
# Get current workflow status
try:
currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not currentWorkflow:
print("\n❌ Workflow not found")
return False
currentStatus = currentWorkflow.status
elapsed = int(time.time() - startTime)
# Print status if it changed
if currentStatus != lastStatus:
print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
lastStatus = currentStatus
# Check if workflow is complete
if currentStatus in ["completed", "stopped", "failed"]:
self.workflow = currentWorkflow
statusIcon = "" if currentStatus == "completed" else ""
print(f"\n{statusIcon} Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
return currentStatus == "completed"
# Wait before next check
await asyncio.sleep(checkInterval)
except Exception as e:
print(f"\n⚠️ Error checking workflow status: {str(e)}")
await asyncio.sleep(checkInterval)
def analyzeWorkflowResults(self) -> Dict[str, Any]:
"""Analyze workflow results and extract information."""
if not self.workflow:
return {"error": "No workflow to analyze"}
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
workflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not workflow:
return {"error": "Workflow not found"}
# Get unified chat data
chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
# Count messages
messages = chatData.get("messages", [])
userMessages = [m for m in messages if m.get("role") == "user"]
assistantMessages = [m for m in messages if m.get("role") == "assistant"]
# Count documents
documents = chatData.get("documents", [])
# Get logs
logs = chatData.get("logs", [])
results = {
"workflowId": workflow.id,
"status": workflow.status,
"workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
"currentRound": workflow.currentRound,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions,
"messageCount": len(messages),
"userMessageCount": len(userMessages),
"assistantMessageCount": len(assistantMessages),
"documentCount": len(documents),
"logCount": len(logs),
"documents": documents,
"logs": logs
}
print(f"\nWorkflow Results:")
print(f" Status: {results['status']}")
print(f" Tasks: {results['totalTasks']}")
print(f" Actions: {results['totalActions']}")
print(f" Messages: {results['messageCount']}")
print(f" Documents: {results['documentCount']}")
# Print document details
if documents:
print(f"\nGenerated Documents:")
for doc in documents:
fileName = doc.get("fileName", "unknown")
fileSize = doc.get("fileSize", 0)
mimeType = doc.get("mimeType", "unknown")
print(f" - {fileName} ({fileSize} bytes, {mimeType})")
return results
def verifyDocumentFormat(self, document: Dict[str, Any], expectedFormat: str) -> Dict[str, Any]:
"""Verify that a document matches the expected format."""
fileName = document.get("fileName", "")
mimeType = document.get("mimeType", "")
fileSize = document.get("fileSize", 0)
# Expected MIME types
expectedMimeTypes = {
"html": ["text/html", "application/xhtml+xml"],
"pdf": ["application/pdf"],
"docx": ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"],
"xlsx": ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"],
"pptx": ["application/vnd.openxmlformats-officedocument.presentationml.presentation"]
}
# Expected file extensions
expectedExtensions = {
"html": [".html", ".htm"],
"pdf": [".pdf"],
"docx": [".docx"],
"xlsx": [".xlsx"],
"pptx": [".pptx"]
}
formatLower = expectedFormat.lower()
expectedMimes = expectedMimeTypes.get(formatLower, [])
expectedExts = expectedExtensions.get(formatLower, [])
# Check file extension
hasCorrectExtension = any(fileName.lower().endswith(ext) for ext in expectedExts)
# Check MIME type
hasCorrectMimeType = any(mimeType.lower() == mime.lower() for mime in expectedMimes)
# Check file size (should be > 0)
hasValidSize = fileSize > 0
verification = {
"format": expectedFormat,
"fileName": fileName,
"mimeType": mimeType,
"fileSize": fileSize,
"hasCorrectExtension": hasCorrectExtension,
"hasCorrectMimeType": hasCorrectMimeType,
"hasValidSize": hasValidSize,
"isValid": hasCorrectExtension and hasValidSize
}
return verification
async def testRefactoringFeatures(self) -> Dict[str, Any]:
"""Test specific refactoring features."""
print("\n" + "="*80)
print("TESTING REFACTORING FEATURES")
print("="*80)
refactoringTests = [
("intent_analysis", "html"),
("conditional_extraction", "html"),
("image_render", "html"),
("multi_document", "html"),
("metadata_preservation", "html")
]
results = {}
for testType, format in refactoringTests:
try:
print(f"\n{'='*80}")
print(f"Testing Refactoring Feature: {testType}")
print(f"{'='*80}")
prompt = self.createRefactoringTestPrompt(testType, format)
print(f"Prompt: {prompt[:200]}...")
# Create user input request with PDF file attachment
listFileId = []
if self.pdfFileId:
listFileId = [self.pdfFileId]
print(f"Attaching PDF file (ID: {self.pdfFileId})")
else:
print("⚠️ No PDF file attached (file upload may have failed)")
userInput = UserInputRequest(
prompt=prompt,
listFileId=listFileId,
userLanguage="en"
)
# Start workflow
print(f"\nStarting workflow for {testType} test...")
workflow = await chatStart(
currentUser=self.testUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
workflowId=None
)
if not workflow:
results[testType] = {
"success": False,
"error": "Failed to start workflow"
}
continue
self.workflow = workflow
print(f"Workflow started: {workflow.id}")
# Wait for workflow completion (no timeout - wait indefinitely)
completed = await self.waitForWorkflowCompletion(timeout=None)
if not completed:
results[testType] = {
"success": False,
"error": "Workflow did not complete",
"workflowId": workflow.id
}
continue
# Analyze results
workflowResults = self.analyzeWorkflowResults()
# Check for specific refactoring features
verification = self.verifyRefactoringFeature(testType, workflowResults)
results[testType] = {
"success": True,
"workflowId": workflow.id,
"verification": verification,
"workflowResults": workflowResults
}
print(f"\n{testType} test completed!")
print(f" Verification: {'✅ PASS' if verification.get('passed', False) else '❌ FAIL'}")
if verification.get("details"):
for detail in verification["details"]:
print(f" - {detail}")
await asyncio.sleep(2)
except Exception as e:
import traceback
print(f"\n❌ Error testing {testType}: {str(e)}")
print(traceback.format_exc())
results[testType] = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return results
def verifyRefactoringFeature(self, testType: str, workflowResults: Dict[str, Any]) -> Dict[str, Any]:
"""Verify that a refactoring feature works correctly."""
documents = workflowResults.get("documents", [])
logs = workflowResults.get("logs", [])
verification = {
"testType": testType,
"passed": False,
"details": []
}
if testType == "intent_analysis":
# Check that intent analysis was performed
intentLogs = [log for log in logs if "intent" in str(log).lower() or "analyzing document intent" in str(log).lower()]
if intentLogs:
verification["details"].append("Intent analysis logs found")
verification["passed"] = True
else:
verification["details"].append("No intent analysis logs found")
elif testType == "conditional_extraction":
# Check that extraction and rendering both occurred
extractionLogs = [log for log in logs if "extract" in str(log).lower()]
renderLogs = [log for log in logs if "render" in str(log).lower() or "image" in str(log).lower()]
if extractionLogs and renderLogs:
verification["details"].append("Both extraction and rendering occurred")
verification["passed"] = True
else:
verification["details"].append(f"Missing logs: extraction={len(extractionLogs)}, render={len(renderLogs)}")
elif testType == "image_render":
# Check that images were rendered (not extracted as text)
imageLogs = [log for log in logs if "image" in str(log).lower()]
if imageLogs:
verification["details"].append("Image rendering logs found")
verification["passed"] = True
else:
verification["details"].append("No image rendering logs found")
elif testType == "multi_document":
# Check that multiple documents were generated
if len(documents) >= 2:
verification["details"].append(f"Multiple documents generated: {len(documents)}")
verification["passed"] = True
else:
verification["details"].append(f"Expected multiple documents, got {len(documents)}")
elif testType == "metadata_preservation":
# Check that metadata was preserved (check logs for documentId references)
metadataLogs = [log for log in logs if "documentId" in str(log) or "SOURCE:" in str(log)]
if metadataLogs:
verification["details"].append("Metadata preservation logs found")
verification["passed"] = True
else:
verification["details"].append("No metadata preservation logs found")
return verification
async def testAllFormats(self) -> Dict[str, Any]:
"""Test document generation in all formats."""
print("\n" + "="*80)
print("TESTING DOCUMENT GENERATION IN ALL FORMATS")
print("="*80)
formats = ["html", "pdf", "docx", "xlsx", "pptx"]
results = {}
for format in formats:
try:
print(f"\n{'='*80}")
print(f"Testing {format.upper()} format...")
print(f"{'='*80}")
result = await self.generateDocumentInFormat(format)
results[format] = result
if result.get("success"):
documents = result.get("documents", [])
if documents:
# Verify first document
verification = self.verifyDocumentFormat(documents[0], format)
result["verification"] = verification
print(f"\n{format.upper()} generation successful!")
print(f" Documents: {len(documents)}")
print(f" Verification: {'✅ PASS' if verification['isValid'] else '❌ FAIL'}")
if verification.get("fileName"):
print(f" File: {verification['fileName']}")
print(f" Size: {verification['fileSize']} bytes")
print(f" MIME: {verification['mimeType']}")
else:
print(f"\n⚠️ {format.upper()} generation completed but no documents found")
else:
error = result.get("error", "Unknown error")
print(f"\n{format.upper()} generation failed: {error}")
# Small delay between tests
await asyncio.sleep(2)
except Exception as e:
import traceback
print(f"\n❌ Error testing {format.upper()}: {str(e)}")
print(traceback.format_exc())
results[format] = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return results
async def runTest(self, includeRefactoringTests: bool = True):
"""Run the complete test.
Args:
includeRefactoringTests: If True, also run refactoring feature tests
"""
print("\n" + "="*80)
print("DOCUMENT GENERATION FORMATS TEST")
print("="*80)
try:
# Initialize
await self.initialize()
# Test refactoring features first (if enabled)
refactoringResults = {}
if includeRefactoringTests:
refactoringResults = await self.testRefactoringFeatures()
# Test all formats
formatResults = await self.testAllFormats()
# Summary
print("\n" + "="*80)
print("TEST SUMMARY")
print("="*80)
# Refactoring tests summary
refactoringSuccessCount = 0
refactoringFailCount = 0
if includeRefactoringTests and refactoringResults:
print("\nRefactoring Features:")
for testType, result in refactoringResults.items():
if result.get("success"):
refactoringSuccessCount += 1
verification = result.get("verification", {})
passed = verification.get("passed", False)
statusIcon = "" if passed else "⚠️"
print(f"{statusIcon} {testType:25s}: {'PASS' if passed else 'FAIL'}")
else:
refactoringFailCount += 1
error = result.get("error", "Unknown error")
print(f"{testType:25s}: FAIL - {error}")
print(f"Refactoring Tests: {refactoringSuccessCount} passed, {refactoringFailCount} failed out of {len(refactoringResults)} tests")
# Format tests summary
print("\nFormat Tests:")
successCount = 0
failCount = 0
for format, result in formatResults.items():
if result.get("success"):
successCount += 1
status = "✅ PASS"
docCount = result.get("documentCount", 0)
verification = result.get("verification", {})
isValid = verification.get("isValid", False)
statusIcon = "" if isValid else "⚠️"
print(f"{statusIcon} {format.upper():6s}: {status} - {docCount} document(s)")
else:
failCount += 1
error = result.get("error", "Unknown error")
print(f"{format.upper():6s}: FAIL - {error}")
print(f"\nFormat Tests: {successCount} passed, {failCount} failed out of {len(formatResults)} formats")
# Calculate totals
totalSuccess = successCount + refactoringSuccessCount if includeRefactoringTests else successCount
totalFail = failCount + refactoringFailCount if includeRefactoringTests else failCount
self.testResults = {
"success": failCount == 0 and (not includeRefactoringTests or refactoringFailCount == 0),
"formatTests": {
"successCount": successCount,
"failCount": failCount,
"totalFormats": len(formatResults),
"results": formatResults
},
"refactoringTests": {
"successCount": refactoringSuccessCount if includeRefactoringTests else 0,
"failCount": refactoringFailCount if includeRefactoringTests else 0,
"totalTests": len(refactoringResults) if includeRefactoringTests else 0,
"results": refactoringResults if includeRefactoringTests else {}
},
"totalSuccess": totalSuccess,
"totalFail": totalFail
}
return self.testResults
except Exception as e:
import traceback
print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
print(f"Traceback:\n{traceback.format_exc()}")
self.testResults = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return self.testResults
async def main():
"""Run document generation formats test."""
tester = DocumentGenerationFormatsTester()
results = await tester.runTest()
# Print final results as JSON for easy parsing
print("\n" + "="*80)
print("FINAL RESULTS (JSON)")
print("="*80)
print(json.dumps(results, indent=2, default=str))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,558 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Document Generation Formats Test 10 - Tests document generation in DOCX, XLSX, PPTX, and PDF formats
Tests professional document formats with various content types including tables, images, and structured data.
"""
import asyncio
import json
import sys
import os
import time
import base64
from typing import Dict, Any, List, Optional
# Add the gateway to path (go up 2 levels from tests/functional/)
_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if _gateway_path not in sys.path:
sys.path.insert(0, _gateway_path)
# Import the service initialization
from modules.serviceHub import getInterface as getServices
from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
from modules.datamodels.datamodelUam import User
from modules.workflows.automation import chatStart
import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
class DocumentGenerationFormatsTester10:
def __init__(self):
# Use root user for testing (has full access to everything)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelUam import Mandate
rootInterface = getRootInterface()
self.testUser = rootInterface.currentUser
# Get initial mandate ID for testing (User has no mandateId - use initial mandate)
self.testMandateId = rootInterface.getInitialId(Mandate)
# Initialize services using the existing system
self.services = getServices(self.testUser, None) # Test user, no workflow
self.workflow = None
self.testResults = {}
self.generatedDocuments = {}
self.pdfFileId = None # Store PDF file ID for reuse
async def initialize(self):
"""Initialize the test environment."""
# Enable debug file logging for tests
from modules.shared.configuration import APP_CONFIG
APP_CONFIG.set("APP_DEBUG_CHAT_WORKFLOW_ENABLED", True)
# Set logging level to INFO to see workflow progress
import logging
logging.getLogger().setLevel(logging.INFO)
print(f"Initialized test with user: {self.testUser.id}")
print(f"Test Mandate ID: {self.testMandateId}")
print(f"Debug logging enabled: {APP_CONFIG.get('APP_DEBUG_CHAT_WORKFLOW_ENABLED', False)}")
# Upload PDF file for testing
await self.uploadPdfFile()
async def uploadPdfFile(self):
"""Upload the PDF file and store its file ID."""
pdfPath = os.path.join(os.path.dirname(__file__), "..", "..", "..", "local", "temp", "B2025-02c.pdf")
pdfPath = os.path.abspath(pdfPath)
if not os.path.exists(pdfPath):
print(f"⚠️ Warning: PDF file not found at {pdfPath}")
print(" Test will continue without PDF attachment")
return
try:
# Read PDF file
with open(pdfPath, "rb") as f:
pdfContent = f.read()
# Create file using services.interfaceDbComponent
if not hasattr(self.services, 'interfaceDbComponent') or not self.services.interfaceDbComponent:
print("⚠️ Warning: interfaceDbComponent not available in services")
print(" Test will continue without PDF attachment")
return
interfaceDbComponent = self.services.interfaceDbComponent
fileItem = interfaceDbComponent.createFile(
name="B2025-02c.pdf",
mimeType="application/pdf",
content=pdfContent
)
# Store file data
interfaceDbComponent.createFileData(fileItem.id, pdfContent)
self.pdfFileId = fileItem.id
print(f"✅ Uploaded PDF file: {fileItem.fileName} (ID: {self.pdfFileId}, Size: {len(pdfContent)} bytes)")
except Exception as e:
import traceback
print(f"⚠️ Warning: Failed to upload PDF file: {str(e)}")
print(f" Traceback: {traceback.format_exc()}")
print(" Test will continue without PDF attachment")
def createTestPrompt(self, format: str) -> str:
"""Create a test prompt for document generation in the specified format.
The prompt requests:
- Professional document structure with title, sections, tables, and images
- Extraction of content from attached PDF
- Structured data presentation appropriate for the format
"""
formatPrompts = {
"docx": (
"Create a professional Word document about 'Fuel Station Receipt Analysis' with:\n"
"1) A main title\n"
"2) An executive summary paragraph\n"
"3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
"4) A detailed analysis section with:\n"
" - Bullet points of key findings\n"
" - A table summarizing transaction details\n"
"5) A conclusion section with recommendations\n\n"
"Format as a professional DOCX document with proper headings and structure."
),
"xlsx": (
"Create an Excel spreadsheet analyzing the fuel station receipt from the attached PDF (B2025-02c.pdf).\n"
"Include:\n"
"1) A summary sheet with key metrics\n"
"2) A detailed data sheet with:\n"
" - Transaction details in rows\n"
" - Columns for: Date, Item, Quantity, Price, Total\n"
" - Proper formatting and headers\n"
"3) A calculations sheet with:\n"
" - VAT calculations\n"
" - Net and gross totals\n\n"
"Format as a professional XLSX spreadsheet with formulas and formatting."
),
"pptx": (
"Create a PowerPoint presentation about 'Fuel Station Receipt Analysis' with:\n"
"1) Title slide with main title\n"
"2) Overview slide explaining the receipt analysis\n"
"3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
"4) Analysis slides with:\n"
" - Bullet points of key findings\n"
" - Visual representation of data\n"
"5) Conclusion slide with recommendations\n\n"
"Format as a professional PPTX presentation with consistent styling."
),
"pdf": (
"Create a professional PDF document about 'Fuel Station Receipt Analysis' with:\n"
"1) A main title\n"
"2) An introduction paragraph explaining the receipt analysis\n"
"3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
"4) A section analyzing the receipt data with:\n"
" - Bullet points of key findings\n"
" - A table summarizing transaction details\n"
"5) A conclusion paragraph with recommendations\n\n"
"Format as a professional PDF document suitable for printing."
),
"html": (
"Create a professional HTML document about 'Fuel Station Receipt Analysis' with:\n"
"1) A main title\n"
"2) An introduction paragraph explaining the receipt analysis\n"
"3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
"4) A section analyzing the receipt data with:\n"
" - Bullet points of key findings\n"
" - A table summarizing transaction details\n"
"5) A conclusion paragraph with recommendations\n\n"
"Format as a professional HTML document with proper styling, responsive design, and embedded CSS."
)
}
return formatPrompts.get(format.lower(), formatPrompts["docx"])
async def generateDocumentInFormat(self, format: str) -> Dict[str, Any]:
"""Generate a document in the specified format using workflow."""
print("\n" + "="*80)
print(f"GENERATING DOCUMENT IN {format.upper()} FORMAT")
print("="*80)
prompt = self.createTestPrompt(format)
print(f"Prompt: {prompt[:200]}...")
# Create user input request with PDF file attachment
listFileId = []
if self.pdfFileId:
listFileId = [self.pdfFileId]
print(f"Attaching PDF file (ID: {self.pdfFileId})")
else:
print("⚠️ No PDF file attached (file upload may have failed)")
# Create user input request
userInput = UserInputRequest(
prompt=prompt,
listFileId=listFileId,
userLanguage="en"
)
# Start workflow
print(f"\nStarting workflow for {format.upper()} generation...")
workflow = await chatStart(
currentUser=self.testUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
workflowId=None
)
if not workflow:
return {
"success": False,
"error": "Failed to start workflow"
}
self.workflow = workflow
print(f"Workflow started: {workflow.id}")
# Wait for workflow completion (no timeout - wait indefinitely)
print(f"Waiting for workflow completion...")
completed = await self.waitForWorkflowCompletion(timeout=None)
if not completed:
return {
"success": False,
"error": "Workflow did not complete",
"workflowId": workflow.id,
"status": workflow.status if workflow else "unknown"
}
# Analyze results
results = self.analyzeWorkflowResults()
# Extract documents for this format
documents = results.get("documents", [])
formatDocuments = [d for d in documents if d.get("fileName", "").endswith(f".{format.lower()}")]
return {
"success": True,
"format": format,
"workflowId": workflow.id,
"status": results.get("status"),
"documentCount": len(formatDocuments),
"documents": formatDocuments,
"results": results
}
async def waitForWorkflowCompletion(self, timeout: Optional[int] = None, checkInterval: int = 2) -> bool:
"""Wait for workflow to complete."""
if not self.workflow:
return False
startTime = time.time()
lastStatus = None
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
if timeout is None:
print("Waiting indefinitely (no timeout)")
while True:
# Check timeout only if specified
if timeout is not None and time.time() - startTime > timeout:
print(f"\n⏱️ Timeout after {timeout} seconds")
return False
# Get current workflow status
try:
currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not currentWorkflow:
print("\n❌ Workflow not found")
return False
currentStatus = currentWorkflow.status
elapsed = int(time.time() - startTime)
# Print status if it changed
if currentStatus != lastStatus:
print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
lastStatus = currentStatus
# Check if workflow is complete
if currentStatus in ["completed", "stopped", "failed"]:
self.workflow = currentWorkflow
statusIcon = "" if currentStatus == "completed" else ""
print(f"\n{statusIcon} Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
return currentStatus == "completed"
# Wait before next check
await asyncio.sleep(checkInterval)
except Exception as e:
print(f"\n⚠️ Error checking workflow status: {str(e)}")
await asyncio.sleep(checkInterval)
def analyzeWorkflowResults(self) -> Dict[str, Any]:
"""Analyze workflow results and extract information."""
if not self.workflow:
return {"error": "No workflow to analyze"}
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
workflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not workflow:
return {"error": "Workflow not found"}
# Get unified chat data
chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
# Count messages
messages = chatData.get("messages", [])
userMessages = [m for m in messages if m.get("role") == "user"]
assistantMessages = [m for m in messages if m.get("role") == "assistant"]
# Count documents
documents = chatData.get("documents", [])
# Get logs
logs = chatData.get("logs", [])
results = {
"workflowId": workflow.id,
"status": workflow.status,
"workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
"currentRound": workflow.currentRound,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions,
"messageCount": len(messages),
"userMessageCount": len(userMessages),
"assistantMessageCount": len(assistantMessages),
"documentCount": len(documents),
"logCount": len(logs),
"documents": documents,
"logs": logs
}
print(f"\nWorkflow Results:")
print(f" Status: {results['status']}")
print(f" Tasks: {results['totalTasks']}")
print(f" Actions: {results['totalActions']}")
print(f" Messages: {results['messageCount']}")
print(f" Documents: {results['documentCount']}")
# Print document details
if documents:
print(f"\nGenerated Documents:")
for doc in documents:
fileName = doc.get("fileName", "unknown")
fileSize = doc.get("fileSize", 0)
mimeType = doc.get("mimeType", "unknown")
documentType = doc.get("documentType", "N/A")
print(f" - {fileName} ({fileSize} bytes, {mimeType}, type: {documentType})")
return results
def verifyDocumentFormat(self, document: Dict[str, Any], expectedFormat: str) -> Dict[str, Any]:
"""Verify that a document matches the expected format and contains expected metadata."""
fileName = document.get("fileName", "")
mimeType = document.get("mimeType", "")
fileSize = document.get("fileSize", 0)
documentType = document.get("documentType")
metadata = document.get("metadata")
# Expected MIME types
expectedMimeTypes = {
"pdf": ["application/pdf"],
"docx": ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"],
"xlsx": ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"],
"pptx": ["application/vnd.openxmlformats-officedocument.presentationml.presentation"],
"html": ["text/html", "application/xhtml+xml"]
}
# Expected file extensions
expectedExtensions = {
"pdf": [".pdf"],
"docx": [".docx"],
"xlsx": [".xlsx"],
"pptx": [".pptx"],
"html": [".html", ".htm"]
}
formatLower = expectedFormat.lower()
expectedMimes = expectedMimeTypes.get(formatLower, [])
expectedExts = expectedExtensions.get(formatLower, [])
# Check file extension
hasCorrectExtension = any(fileName.lower().endswith(ext) for ext in expectedExts)
# Check MIME type
hasCorrectMimeType = any(mimeType.lower() == mime.lower() for mime in expectedMimes)
# Check file size (should be > 0)
hasValidSize = fileSize > 0
# Check document type (should be present)
hasDocumentType = documentType is not None
# Check metadata (should be present)
hasMetadata = metadata is not None and isinstance(metadata, dict)
verification = {
"format": expectedFormat,
"fileName": fileName,
"mimeType": mimeType,
"fileSize": fileSize,
"documentType": documentType,
"hasMetadata": hasMetadata,
"hasCorrectExtension": hasCorrectExtension,
"hasCorrectMimeType": hasCorrectMimeType,
"hasValidSize": hasValidSize,
"hasDocumentType": hasDocumentType,
"isValid": hasCorrectExtension and hasValidSize and hasCorrectMimeType,
"isComplete": hasCorrectExtension and hasValidSize and hasCorrectMimeType and hasDocumentType and hasMetadata
}
return verification
async def testAllFormats(self) -> Dict[str, Any]:
"""Test document generation in DOCX, XLSX, PPTX, PDF, and HTML formats."""
print("\n" + "="*80)
print("TESTING DOCUMENT GENERATION IN ALL FORMATS")
print("="*80)
# Test all document formats
formats = ["docx", "xlsx", "pptx", "pdf", "html"]
results = {}
for format in formats:
try:
print(f"\n{'='*80}")
print(f"Testing {format.upper()} format...")
print(f"{'='*80}")
result = await self.generateDocumentInFormat(format)
results[format] = result
if result.get("success"):
documents = result.get("documents", [])
if documents:
# Verify first document
verification = self.verifyDocumentFormat(documents[0], format)
result["verification"] = verification
print(f"\n{format.upper()} generation successful!")
print(f" Documents: {len(documents)}")
print(f" Verification: {'✅ PASS' if verification['isValid'] else '❌ FAIL'}")
print(f" Complete (with metadata): {'✅ YES' if verification['isComplete'] else '❌ NO'}")
if verification.get("fileName"):
print(f" File: {verification['fileName']}")
print(f" Size: {verification['fileSize']} bytes")
print(f" MIME: {verification['mimeType']}")
print(f" Document Type: {verification.get('documentType', 'N/A')}")
print(f" Has Metadata: {'' if verification.get('hasMetadata') else ''}")
else:
print(f"\n⚠️ {format.upper()} generation completed but no documents found")
else:
error = result.get("error", "Unknown error")
print(f"\n{format.upper()} generation failed: {error}")
# Small delay between tests
await asyncio.sleep(2)
except Exception as e:
import traceback
print(f"\n❌ Error testing {format.upper()}: {str(e)}")
print(traceback.format_exc())
results[format] = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return results
async def runTest(self):
"""Run the complete test."""
print("\n" + "="*80)
print("DOCUMENT GENERATION FORMATS TEST 10 - ALL FORMATS")
print("="*80)
try:
# Initialize
await self.initialize()
# Test all formats
formatResults = await self.testAllFormats()
# Summary
print("\n" + "="*80)
print("TEST SUMMARY")
print("="*80)
# Format tests summary
print("\nFormat Tests:")
successCount = 0
failCount = 0
completeCount = 0 # Documents with metadata
for format, result in formatResults.items():
if result.get("success"):
successCount += 1
verification = result.get("verification", {})
isValid = verification.get("isValid", False)
isComplete = verification.get("isComplete", False)
if isComplete:
completeCount += 1
statusIcon = "" if isValid else "⚠️"
completeIcon = "" if isComplete else ""
docCount = result.get("documentCount", 0)
print(f"{statusIcon} {format.upper():6s}: {'PASS' if isValid else 'FAIL'} - {docCount} document(s) - Metadata: {completeIcon}")
else:
failCount += 1
error = result.get("error", "Unknown error")
print(f"{format.upper():6s}: FAIL - {error}")
print(f"\nFormat Tests: {successCount} passed, {failCount} failed out of {len(formatResults)} formats")
print(f"Complete Documents (with metadata): {completeCount} out of {successCount} successful generations")
self.testResults = {
"success": failCount == 0,
"formatTests": {
"successCount": successCount,
"failCount": failCount,
"completeCount": completeCount,
"totalFormats": len(formatResults),
"results": formatResults
},
"totalSuccess": successCount,
"totalFail": failCount
}
return self.testResults
except Exception as e:
import traceback
print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
print(f"Traceback:\n{traceback.format_exc()}")
self.testResults = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return self.testResults
async def main():
"""Run document generation formats test 10."""
tester = DocumentGenerationFormatsTester10()
results = await tester.runTest()
# Print final results as JSON for easy parsing
print("\n" + "="*80)
print("FINAL RESULTS (JSON)")
print("="*80)
print(json.dumps(results, indent=2, default=str))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,559 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Code Generation Formats Test 11 - Tests code generation in JSON, CSV, and XML formats
Tests code generation with structured data formats including validation and formatting.
"""
import asyncio
import json
import sys
import os
import time
import csv
import io
import xml.etree.ElementTree as ET
from typing import Dict, Any, List, Optional
# Add the gateway to path (go up 2 levels from tests/functional/)
_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
if _gateway_path not in sys.path:
sys.path.insert(0, _gateway_path)
# Import the service initialization
from modules.serviceHub import getInterface as getServices
from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
from modules.datamodels.datamodelUam import User
from modules.workflows.automation import chatStart
import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
class CodeGenerationFormatsTester11:
def __init__(self):
# Use root user for testing (has full access to everything)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelUam import Mandate
rootInterface = getRootInterface()
self.testUser = rootInterface.currentUser
# Get initial mandate ID for testing (User has no mandateId - use initial mandate)
self.testMandateId = rootInterface.getInitialId(Mandate)
# Initialize services using the existing system
self.services = getServices(self.testUser, None) # Test user, no workflow
self.workflow = None
self.testResults = {}
self.generatedDocuments = {}
async def initialize(self):
"""Initialize the test environment."""
# Enable debug file logging for tests
from modules.shared.configuration import APP_CONFIG
APP_CONFIG.set("APP_DEBUG_CHAT_WORKFLOW_ENABLED", True)
# Set logging level to INFO to see workflow progress
import logging
logging.getLogger().setLevel(logging.INFO)
print(f"Initialized test with user: {self.testUser.id}")
print(f"Test Mandate ID: {self.testMandateId}")
print(f"Debug logging enabled: {APP_CONFIG.get('APP_DEBUG_CHAT_WORKFLOW_ENABLED', False)}")
def createTestPrompt(self, format: str) -> str:
"""Create a test prompt for code generation in the specified format.
The prompt requests 3 files for each format:
- Structured data generation appropriate for the format
- Proper formatting and validation
"""
formatPrompts = {
"json": (
"Generate 3 JSON code files for a customer management system:\n"
"1) Create a config.json file with:\n"
" - Application name: 'Customer Manager'\n"
" - Version: '1.0.0'\n"
" - Database settings: host, port, name\n"
" - API settings: baseUrl, timeout\n"
"2) Create a customers.json file with an array of customer objects:\n"
" - Each customer should have: id, name, email, phone, address\n"
" - Include at least 3 sample customers\n"
"3) Create a settings.json file with:\n"
" - Theme settings: darkMode, fontSize, language\n"
" - Notification settings: email, sms, push\n"
" - Feature flags: enableAnalytics, enableReports\n\n"
"Format all files as valid JSON with proper indentation."
),
"csv": (
"Generate 3 CSV code files for expense tracking:\n"
"1) Create an expenses.csv file with:\n"
" - Header row: Documentname, Datum, Händler, Kreditkartennummer, Gesamtbetrag, Währung, MWST-Satz\n"
" - Data rows with at least 5 expense entries\n"
" - Use consistent date format (DD.MM.YYYY)\n"
" - Use CHF as currency\n"
" - Use 7.7% as VAT rate\n"
"2) Create a categories.csv file with:\n"
" - Header row: CategoryID, CategoryName, Description, ParentCategory\n"
" - Data rows with at least 8 categories\n"
"3) Create a vendors.csv file with:\n"
" - Header row: VendorID, VendorName, ContactPerson, Email, Phone, Address\n"
" - Data rows with at least 6 vendors\n\n"
"Format all files as valid CSV with proper header row and consistent column count."
),
"xml": (
"Generate 3 XML code files for a product catalog:\n"
"1) Create a products.xml file with:\n"
" - Root element: <catalog>\n"
" - Each product as <product> element with:\n"
" - <id>, <name>, <description>, <price>, <category>\n"
" - Include at least 4 products\n"
"2) Create a categories.xml file with:\n"
" - Root element: <categories>\n"
" - Each category as <category> element with:\n"
" - <id>, <name>, <description>, <parentId>\n"
" - Include at least 5 categories\n"
"3) Create a suppliers.xml file with:\n"
" - Root element: <suppliers>\n"
" - Each supplier as <supplier> element with:\n"
" - <id>, <name>, <contact>, <address>\n"
" - Include at least 3 suppliers\n\n"
"Format all files as valid XML with proper indentation and structure."
)
}
return formatPrompts.get(format.lower(), formatPrompts["json"])
async def generateCodeInFormat(self, format: str) -> Dict[str, Any]:
"""Generate code in the specified format using workflow."""
print("\n" + "="*80)
print(f"GENERATING CODE IN {format.upper()} FORMAT")
print("="*80)
prompt = self.createTestPrompt(format)
print(f"Prompt: {prompt[:200]}...")
# Create user input request
userInput = UserInputRequest(
prompt=prompt,
listFileId=[],
userLanguage="en"
)
# Start workflow
print(f"\nStarting workflow for {format.upper()} code generation...")
workflow = await chatStart(
currentUser=self.testUser,
userInput=userInput,
workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
workflowId=None
)
if not workflow:
return {
"success": False,
"error": "Failed to start workflow"
}
self.workflow = workflow
print(f"Workflow started: {workflow.id}")
# Wait for workflow completion (no timeout - wait indefinitely)
print(f"Waiting for workflow completion...")
completed = await self.waitForWorkflowCompletion(timeout=None)
if not completed:
return {
"success": False,
"error": "Workflow did not complete",
"workflowId": workflow.id,
"status": workflow.status if workflow else "unknown"
}
# Analyze results
results = self.analyzeWorkflowResults()
# Extract documents for this format
documents = results.get("documents", [])
formatDocuments = [d for d in documents if d.get("fileName", "").endswith(f".{format.lower()}")]
return {
"success": True,
"format": format,
"workflowId": workflow.id,
"status": results.get("status"),
"documentCount": len(formatDocuments),
"documents": formatDocuments,
"results": results
}
async def waitForWorkflowCompletion(self, timeout: Optional[int] = None, checkInterval: int = 2) -> bool:
"""Wait for workflow to complete."""
if not self.workflow:
return False
startTime = time.time()
lastStatus = None
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
if timeout is None:
print("Waiting indefinitely (no timeout)")
while True:
# Check timeout only if specified
if timeout is not None and time.time() - startTime > timeout:
print(f"\n⏱️ Timeout after {timeout} seconds")
return False
# Get current workflow status
try:
currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not currentWorkflow:
print("\n❌ Workflow not found")
return False
currentStatus = currentWorkflow.status
elapsed = int(time.time() - startTime)
# Print status if it changed
if currentStatus != lastStatus:
print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
lastStatus = currentStatus
# Check if workflow is complete
if currentStatus in ["completed", "stopped", "failed"]:
self.workflow = currentWorkflow
statusIcon = "" if currentStatus == "completed" else ""
print(f"\n{statusIcon} Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
return currentStatus == "completed"
# Wait before next check
await asyncio.sleep(checkInterval)
except Exception as e:
print(f"\n⚠️ Error checking workflow status: {str(e)}")
await asyncio.sleep(checkInterval)
def analyzeWorkflowResults(self) -> Dict[str, Any]:
"""Analyze workflow results and extract information."""
if not self.workflow:
return {"error": "No workflow to analyze"}
interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
workflow = interfaceDbChat.getWorkflow(self.workflow.id)
if not workflow:
return {"error": "Workflow not found"}
# Get unified chat data
chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
# Count messages
messages = chatData.get("messages", [])
userMessages = [m for m in messages if m.get("role") == "user"]
assistantMessages = [m for m in messages if m.get("role") == "assistant"]
# Count documents
documents = chatData.get("documents", [])
# Get logs
logs = chatData.get("logs", [])
results = {
"workflowId": workflow.id,
"status": workflow.status,
"workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
"currentRound": workflow.currentRound,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions,
"messageCount": len(messages),
"userMessageCount": len(userMessages),
"assistantMessageCount": len(assistantMessages),
"documentCount": len(documents),
"logCount": len(logs),
"documents": documents,
"logs": logs
}
print(f"\nWorkflow Results:")
print(f" Status: {results['status']}")
print(f" Tasks: {results['totalTasks']}")
print(f" Actions: {results['totalActions']}")
print(f" Messages: {results['messageCount']}")
print(f" Documents: {results['documentCount']}")
# Print document details
if documents:
print(f"\nGenerated Documents:")
for doc in documents:
fileName = doc.get("fileName", "unknown")
fileSize = doc.get("fileSize", 0)
mimeType = doc.get("mimeType", "unknown")
print(f" - {fileName} ({fileSize} bytes, {mimeType})")
return results
def verifyCodeFormat(self, document: Dict[str, Any], expectedFormat: str) -> Dict[str, Any]:
"""Verify that a code file matches the expected format and is valid."""
fileName = document.get("fileName", "")
mimeType = document.get("mimeType", "")
fileSize = document.get("fileSize", 0)
# Expected MIME types
expectedMimeTypes = {
"json": ["application/json"],
"csv": ["text/csv"],
"xml": ["application/xml", "text/xml"]
}
# Expected file extensions
expectedExtensions = {
"json": [".json"],
"csv": [".csv"],
"xml": [".xml"]
}
formatLower = expectedFormat.lower()
expectedMimes = expectedMimeTypes.get(formatLower, [])
expectedExts = expectedExtensions.get(formatLower, [])
# Check file extension
hasCorrectExtension = any(fileName.lower().endswith(ext) for ext in expectedExts)
# Check MIME type
hasCorrectMimeType = any(mimeType.lower() == mime.lower() for mime in expectedMimes)
# Check file size (should be > 0)
hasValidSize = fileSize > 0
# Try to read and validate content
isValidContent = False
validationError = None
try:
# Get file content from fileId
fileId = document.get("fileId")
if fileId and hasattr(self.services, 'interfaceDbComponent'):
fileData = self.services.interfaceDbComponent.getFileData(fileId)
if fileData:
content = fileData.decode('utf-8') if isinstance(fileData, bytes) else fileData
# Validate format-specific syntax
if formatLower == "json":
try:
json.loads(content)
isValidContent = True
except json.JSONDecodeError as e:
validationError = f"Invalid JSON: {str(e)}"
elif formatLower == "csv":
try:
reader = csv.reader(io.StringIO(content))
rows = list(reader)
if len(rows) > 0:
# Check header row exists
headerCount = len(rows[0])
# Check all rows have same column count
allRowsValid = all(len(row) == headerCount for row in rows)
isValidContent = allRowsValid
if not allRowsValid:
validationError = "CSV rows have inconsistent column counts"
else:
validationError = "CSV file is empty"
except Exception as e:
validationError = f"CSV parsing error: {str(e)}"
elif formatLower == "xml":
try:
ET.fromstring(content)
isValidContent = True
except ET.ParseError as e:
validationError = f"Invalid XML: {str(e)}"
else:
validationError = "Could not read file data"
else:
validationError = "No fileId available"
except Exception as e:
validationError = f"Error reading/validating file: {str(e)}"
verification = {
"format": expectedFormat,
"fileName": fileName,
"mimeType": mimeType,
"fileSize": fileSize,
"hasCorrectExtension": hasCorrectExtension,
"hasCorrectMimeType": hasCorrectMimeType,
"hasValidSize": hasValidSize,
"isValidContent": isValidContent,
"validationError": validationError,
"isValid": hasCorrectExtension and hasValidSize and hasCorrectMimeType,
"isComplete": hasCorrectExtension and hasValidSize and hasCorrectMimeType and isValidContent
}
return verification
async def testAllFormats(self) -> Dict[str, Any]:
"""Test code generation in JSON, CSV, and XML formats."""
print("\n" + "="*80)
print("TESTING CODE GENERATION IN ALL FORMATS")
print("="*80)
# Test all code formats
formats = ["json", "csv", "xml"]
results = {}
for format in formats:
try:
print(f"\n{'='*80}")
print(f"Testing {format.upper()} format...")
print(f"{'='*80}")
result = await self.generateCodeInFormat(format)
results[format] = result
if result.get("success"):
documents = result.get("documents", [])
if documents:
# Verify all documents (expecting 3 files per format)
verifications = []
for doc in documents:
verification = self.verifyCodeFormat(doc, format)
verifications.append(verification)
result["verifications"] = verifications
# Count valid documents
validCount = sum(1 for v in verifications if v.get("isValid"))
contentValidCount = sum(1 for v in verifications if v.get("isValidContent"))
print(f"\n{format.upper()} generation successful!")
print(f" Documents: {len(documents)} (expected: 3)")
print(f" Valid Format: {validCount}/{len(documents)}")
print(f" Valid Content: {contentValidCount}/{len(documents)}")
# Print details for each file
for i, verification in enumerate(verifications, 1):
statusIcon = "" if verification.get("isValid") else ""
contentIcon = "" if verification.get("isValidContent") else ""
print(f" File {i}: {statusIcon} Format, {contentIcon} Content - {verification.get('fileName', 'unknown')}")
if verification.get("validationError"):
print(f" Error: {verification['validationError']}")
else:
print(f"\n⚠️ {format.upper()} generation completed but no documents found")
else:
error = result.get("error", "Unknown error")
print(f"\n{format.upper()} generation failed: {error}")
# Small delay between tests
await asyncio.sleep(2)
except Exception as e:
import traceback
print(f"\n❌ Error testing {format.upper()}: {str(e)}")
print(traceback.format_exc())
results[format] = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return results
async def runTest(self):
"""Run the complete test."""
print("\n" + "="*80)
print("CODE GENERATION FORMATS TEST 11 - JSON, CSV, XML")
print("="*80)
try:
# Initialize
await self.initialize()
# Test all formats
formatResults = await self.testAllFormats()
# Summary
print("\n" + "="*80)
print("TEST SUMMARY")
print("="*80)
# Format tests summary
print("\nFormat Tests:")
successCount = 0
failCount = 0
completeCount = 0 # Files with valid content
for format, result in formatResults.items():
if result.get("success"):
successCount += 1
verifications = result.get("verifications", [])
docCount = result.get("documentCount", 0)
# Count valid files
validCount = sum(1 for v in verifications if v.get("isValid"))
contentValidCount = sum(1 for v in verifications if v.get("isValidContent"))
completeCount += contentValidCount
# Overall status (all files valid)
allValid = len(verifications) > 0 and all(v.get("isValid") for v in verifications)
allContentValid = len(verifications) > 0 and all(v.get("isValidContent") for v in verifications)
statusIcon = "" if allValid else "⚠️"
contentIcon = "" if allContentValid else ""
print(f"{statusIcon} {format.upper():6s}: {'PASS' if allValid else 'PARTIAL'} - {docCount} file(s) ({validCount} valid format, {contentValidCount} valid content)")
# Print errors if any
for v in verifications:
if v.get("validationError"):
print(f" {v.get('fileName', 'unknown')}: {v['validationError']}")
else:
failCount += 1
error = result.get("error", "Unknown error")
print(f"{format.upper():6s}: FAIL - {error}")
print(f"\nFormat Tests: {successCount} passed, {failCount} failed out of {len(formatResults)} formats")
print(f"Valid Content Files: {completeCount} total files with valid content")
self.testResults = {
"success": failCount == 0,
"formatTests": {
"successCount": successCount,
"failCount": failCount,
"completeCount": completeCount,
"totalFormats": len(formatResults),
"results": formatResults
},
"totalSuccess": successCount,
"totalFail": failCount
}
return self.testResults
except Exception as e:
import traceback
print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
print(f"Traceback:\n{traceback.format_exc()}")
self.testResults = {
"success": False,
"error": str(e),
"traceback": traceback.format_exc()
}
return self.testResults
async def main():
"""Run code generation formats test 11."""
tester = CodeGenerationFormatsTester11()
results = await tester.runTest()
# Print final results as JSON for easy parsing
print("\n" + "="*80)
print("FINAL RESULTS (JSON)")
print("="*80)
print(json.dumps(results, indent=2, default=str))
if __name__ == "__main__":
asyncio.run(main())