From 16db2d91c64312744bba383403b0e6954c53b92f Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Tue, 3 Mar 2026 18:57:20 +0100
Subject: [PATCH 1/7] fixed critical code issues
---
modules/workflows/automation/mainWorkflow.py | 27 +--
.../automation/subAutomationSchedule.py | 2 +-
.../automation/subAutomationTemplates.py | 2 +-
.../automation/subAutomationUtils.py | 70 +++----
.../methods/methodAi/actions/generateCode.py | 12 +-
.../methodAi/actions/generateDocument.py | 14 +-
.../methods/methodAi/actions/process.py | 13 +-
.../methods/methodAi/actions/webResearch.py | 11 +-
modules/workflows/methods/methodBase.py | 14 +-
.../methodChatbot/actions/queryDatabase.py | 19 +-
.../methodContext/actions/extractContent.py | 10 +-
.../methodContext/actions/getDocumentIndex.py | 15 +-
.../methodContext/actions/neutralizeData.py | 10 +-
.../methodOutlook/actions/readEmails.py | 7 +-
.../methodOutlook/actions/searchEmails.py | 4 +-
.../methodOutlook/actions/sendDraftEmail.py | 10 +
.../methodOutlook/helpers/connection.py | 20 +-
.../methodOutlook/helpers/emailProcessing.py | 16 +-
.../actions/uploadDocument.py | 7 +-
.../methodSharepoint/helpers/apiClient.py | 92 ++++-----
.../adaptive/adaptiveLearningEngine.py | 32 ++--
.../processing/adaptive/contentValidator.py | 55 +++---
.../processing/adaptive/learningEngine.py | 5 +
.../processing/core/actionExecutor.py | 11 +-
.../processing/core/messageCreator.py | 57 ++----
.../workflows/processing/core/taskPlanner.py | 75 +-------
.../workflows/processing/core/validator.py | 33 ++--
.../processing/modes/modeAutomation.py | 86 +--------
.../workflows/processing/modes/modeBase.py | 78 +++++++-
.../workflows/processing/modes/modeDynamic.py | 178 +++---------------
.../processing/shared/executionState.py | 42 +----
.../processing/shared/methodDiscovery.py | 116 +++---------
.../processing/shared/placeholderFactory.py | 109 ++++-------
.../shared/promptGenerationActionsDynamic.py | 19 +-
.../workflows/processing/workflowProcessor.py | 37 ++--
modules/workflows/workflowManager.py | 64 ++-----
36 files changed, 494 insertions(+), 878 deletions(-)
diff --git a/modules/workflows/automation/mainWorkflow.py b/modules/workflows/automation/mainWorkflow.py
index f1b91939..1fc4b0cf 100644
--- a/modules/workflows/automation/mainWorkflow.py
+++ b/modules/workflows/automation/mainWorkflow.py
@@ -56,12 +56,12 @@ async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode
logger.error(f"Error starting chat: {str(e)}")
raise
-async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None) -> ChatWorkflow:
+async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None) -> ChatWorkflow:
"""Stops a running chat."""
try:
services = getServices(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
- if featureInstanceId:
- services.featureCode = 'chatplayground'
+ if featureCode:
+ services.featureCode = featureCode
workflowManager = WorkflowManager(services)
return await workflowManager.workflowStop(workflowId)
except Exception as e:
@@ -101,8 +101,11 @@ async def executeAutomation(automationId: str, automation, creatorUser: User, se
logger.debug(f"Automation {automationId} restricted to providers: {automation.allowedProviders}")
# Context comes EXCLUSIVELY from the automation definition
- automationMandateId = str(automation.mandateId)
- automationFeatureInstanceId = str(automation.featureInstanceId)
+ automationMandateId = str(automation.mandateId) if automation.mandateId is not None else None
+ automationFeatureInstanceId = str(automation.featureInstanceId) if automation.featureInstanceId is not None else None
+
+ if not automationMandateId or not automationFeatureInstanceId:
+ raise ValueError(f"Automation {automationId} missing mandateId or featureInstanceId")
logger.info(f"Executing automation {automationId} as user {creatorUser.id} with mandateId={automationMandateId}, featureInstanceId={automationFeatureInstanceId}")
@@ -118,7 +121,7 @@ async def executeAutomation(automationId: str, automation, creatorUser: User, se
logger.error(f"Placeholders: {placeholders}")
logger.error(f"Generated planJson (first 1000 chars): {planJson[:1000]}")
logger.error(f"Error position: line {e.lineno}, column {e.colno}, char {e.pos}")
- if e.pos:
+ if e.pos is not None:
start = max(0, e.pos - 100)
end = min(len(planJson), e.pos + 100)
logger.error(f"Context around error: ...{planJson[start:end]}...")
@@ -233,20 +236,10 @@ def syncAutomationEvents(services, eventUser) -> Dict[str, Any]:
cronKwargs = parseScheduleToCron(schedule)
if isActive:
- # Remove existing event if present (handles schedule changes)
- if currentEventId:
- try:
- eventManager.remove(currentEventId)
- except Exception as e:
- logger.warning(f"Error removing old event {currentEventId}: {str(e)}")
-
- # Register new event
newEventId = f"automation.{automationId}"
-
- # Create event handler function
handler = createAutomationEventHandler(automationId, eventUser)
- # Register cron job
+ # Register with replaceExisting=True (atomically replaces old event)
eventManager.registerCron(
jobId=newEventId,
func=handler,
diff --git a/modules/workflows/automation/subAutomationSchedule.py b/modules/workflows/automation/subAutomationSchedule.py
index 40638461..9db1f3fa 100644
--- a/modules/workflows/automation/subAutomationSchedule.py
+++ b/modules/workflows/automation/subAutomationSchedule.py
@@ -48,7 +48,7 @@ def start(eventUser) -> bool:
except Exception as e:
logger.error(f"Automation: Error setting up events on startup: {str(e)}")
- # Don't fail startup if automation sync fails
+ return False
return True
diff --git a/modules/workflows/automation/subAutomationTemplates.py b/modules/workflows/automation/subAutomationTemplates.py
index 95c1eb77..eb131f0a 100644
--- a/modules/workflows/automation/subAutomationTemplates.py
+++ b/modules/workflows/automation/subAutomationTemplates.py
@@ -6,7 +6,7 @@ Automation templates for workflow definitions.
Contains predefined workflow templates that can be used to create automation definitions.
"""
-from typing import Dict, Any, List
+from typing import Dict, Any
# Automation templates structure
AUTOMATION_TEMPLATES: Dict[str, Any] = {
diff --git a/modules/workflows/automation/subAutomationUtils.py b/modules/workflows/automation/subAutomationUtils.py
index 97d28719..bdac6efb 100644
--- a/modules/workflows/automation/subAutomationUtils.py
+++ b/modules/workflows/automation/subAutomationUtils.py
@@ -69,50 +69,42 @@ def replacePlaceholders(template: str, placeholders: Dict[str, str]) -> str:
result = result.replace(arrayPattern, arrayValue)
continue # Skip the regular replacement below
- # Regular replacement - check if in quoted context
- patternStart = result.find(pattern)
- isQuoted = False
- if patternStart > 0:
- charBefore = result[patternStart - 1] if patternStart > 0 else None
- patternEnd = patternStart + len(pattern)
- charAfter = result[patternEnd] if patternEnd < len(result) else None
- if charBefore == '"' and charAfter == '"':
- isQuoted = True
-
- # Handle different value types
- if isinstance(value, (list, dict)):
- # Python list/dict - convert to JSON
- replacement = json.dumps(value)
- elif isinstance(value, str):
- # String value - check if it's a JSON string representing list/dict
- try:
- parsed = json.loads(value)
- if isinstance(parsed, (list, dict)):
- # It's a JSON string of a list/dict
- if isQuoted:
- # In quoted context, escape the JSON string
- escaped = json.dumps(value)
- replacement = escaped[1:-1] # Remove outer quotes
+ # Replace occurrences one-by-one to handle mixed contexts
+ while pattern in result:
+ patternStart = result.find(pattern)
+ isQuoted = False
+ if patternStart > 0:
+ charBefore = result[patternStart - 1]
+ patternEnd = patternStart + len(pattern)
+ charAfter = result[patternEnd] if patternEnd < len(result) else None
+ if charBefore == '"' and charAfter == '"':
+ isQuoted = True
+
+ if isinstance(value, (list, dict)):
+ replacement = json.dumps(value)
+ elif isinstance(value, str):
+ try:
+ parsed = json.loads(value)
+ if isinstance(parsed, (list, dict)):
+ if isQuoted:
+ escaped = json.dumps(value)
+ replacement = escaped[1:-1]
+ else:
+ replacement = value
else:
- # In unquoted context, use JSON directly
- replacement = value
- else:
- # It's a JSON string of a primitive
+ if isQuoted:
+ escaped = json.dumps(value)
+ replacement = escaped[1:-1]
+ else:
+ replacement = value
+ except (json.JSONDecodeError, ValueError):
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
- except (json.JSONDecodeError, ValueError):
- # Not valid JSON - treat as plain string
- if isQuoted:
- escaped = json.dumps(value)
- replacement = escaped[1:-1]
- else:
- replacement = value
- else:
- # Numbers, booleans, None - convert to string
- replacement = str(value)
- result = result.replace(pattern, replacement)
+ else:
+ replacement = str(value)
+ result = result[:patternStart] + replacement + result[patternStart + len(pattern):]
return result
diff --git a/modules/workflows/methods/methodAi/actions/generateCode.py b/modules/workflows/methods/methodAi/actions/generateCode.py
index 4f9bbd21..c616006b 100644
--- a/modules/workflows/methods/methodAi/actions/generateCode.py
+++ b/modules/workflows/methods/methodAi/actions/generateCode.py
@@ -74,7 +74,11 @@ async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult:
documentName=docData.documentName,
documentData=docData.documentData,
mimeType=docData.mimeType,
- sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None
+ sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None,
+ validationMetadata={
+ "actionType": "ai.generateCode",
+ "resultType": resultType,
+ }
))
# If no documents but content exists, create a document from content
@@ -112,7 +116,11 @@ async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult:
documents.append(ActionDocument(
documentName=docName,
documentData=aiResponse.content.encode('utf-8') if isinstance(aiResponse.content, str) else aiResponse.content,
- mimeType=mimeType
+ mimeType=mimeType,
+ validationMetadata={
+ "actionType": "ai.generateCode",
+ "resultType": resultType,
+ }
))
return ActionResult.isSuccess(documents=documents)
diff --git a/modules/workflows/methods/methodAi/actions/generateDocument.py b/modules/workflows/methods/methodAi/actions/generateDocument.py
index 65e95a32..8bb33f9d 100644
--- a/modules/workflows/methods/methodAi/actions/generateDocument.py
+++ b/modules/workflows/methods/methodAi/actions/generateDocument.py
@@ -78,7 +78,12 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
documentName=docData.documentName,
documentData=docData.documentData,
mimeType=docData.mimeType,
- sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None
+ sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None,
+ validationMetadata={
+ "actionType": "ai.generateDocument",
+ "documentType": documentType,
+ "resultType": resultType,
+ }
))
# If no documents but content exists, create a document from content
@@ -112,7 +117,12 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
documents.append(ActionDocument(
documentName=docName,
documentData=aiResponse.content.encode('utf-8') if isinstance(aiResponse.content, str) else aiResponse.content,
- mimeType=mimeType
+ mimeType=mimeType,
+ validationMetadata={
+ "actionType": "ai.generateDocument",
+ "documentType": documentType,
+ "resultType": resultType,
+ }
))
return ActionResult.isSuccess(documents=documents)
diff --git a/modules/workflows/methods/methodAi/actions/process.py b/modules/workflows/methods/methodAi/actions/process.py
index 752fe7f6..b4157f13 100644
--- a/modules/workflows/methods/methodAi/actions/process.py
+++ b/modules/workflows/methods/methodAi/actions/process.py
@@ -12,8 +12,8 @@ from modules.datamodels.datamodelExtraction import ContentPart
logger = logging.getLogger(__name__)
async def process(self, parameters: Dict[str, Any]) -> ActionResult:
+ operationId = None
try:
- # Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"ai_process_{workflowId}_{int(time.time())}"
@@ -83,7 +83,8 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
output_format = None
logger.debug("resultType not provided - formats will be determined from prompt by AI")
- output_mime_type = "application/octet-stream" # Prefer service-provided mimeType when available
+ mimeMap = {"txt": "text/plain", "json": "application/json", "html": "text/html", "md": "text/markdown", "csv": "text/csv", "xml": "application/xml"}
+ output_mime_type = mimeMap.get(normalized_result_type, "text/plain") if normalized_result_type else "text/plain"
# Phase 7.3: Pass both documentList and contentParts to AI service
# (Extraction logic removed - handled by AI service)
@@ -264,11 +265,11 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in AI processing: {str(e)}")
- # Complete progress tracking with failure
try:
- self.services.chat.progressLogFinish(operationId, False)
- except:
- pass # Don't fail on progress logging errors
+ if operationId:
+ self.services.chat.progressLogFinish(operationId, False)
+ except Exception:
+ pass
return ActionResult.isFailure(
error=str(e)
diff --git a/modules/workflows/methods/methodAi/actions/webResearch.py b/modules/workflows/methods/methodAi/actions/webResearch.py
index 62b43bce..d59a26f9 100644
--- a/modules/workflows/methods/methodAi/actions/webResearch.py
+++ b/modules/workflows/methods/methodAi/actions/webResearch.py
@@ -4,18 +4,19 @@
import logging
import time
import re
+import json
from typing import Dict, Any
from modules.datamodels.datamodelChat import ActionResult, ActionDocument
logger = logging.getLogger(__name__)
async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
+ operationId = None
try:
prompt = parameters.get("prompt")
if not prompt:
return ActionResult.isFailure(error="Research prompt is required")
- # Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"web_research_{workflowId}_{int(time.time())}"
@@ -78,9 +79,10 @@ async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
"researchDepth": parameters.get("researchDepth", "general"),
"resultFormat": "json"
}
+ documentData = json.dumps(result, ensure_ascii=False) if isinstance(result, dict) else result
actionDocument = ActionDocument(
documentName=meaningfulName,
- documentData=result,
+ documentData=documentData,
mimeType="application/json",
validationMetadata=validationMetadata
)
@@ -90,8 +92,9 @@ async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in web research: {str(e)}")
try:
- self.services.chat.progressLogFinish(operationId, False)
- except:
+ if operationId:
+ self.services.chat.progressLogFinish(operationId, False)
+ except Exception:
pass
return ActionResult.isFailure(error=str(e))
diff --git a/modules/workflows/methods/methodBase.py b/modules/workflows/methods/methodBase.py
index 173023f1..1a81c3eb 100644
--- a/modules/workflows/methods/methodBase.py
+++ b/modules/workflows/methods/methodBase.py
@@ -1,11 +1,10 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
-from typing import Dict, List, Optional, Any, Literal
+from typing import Dict, List, Optional, Any
from datetime import datetime, UTC
import logging
from functools import wraps
-import inspect
from modules.datamodels.datamodelWorkflowActions import WorkflowActionDefinition, WorkflowActionParameter
from modules.datamodels.datamodelRbac import AccessRuleContext
@@ -258,9 +257,13 @@ class MethodBase:
raise ValueError(f"Expected dict for type '{expectedType}', got {type(value).__name__}")
return value
- # Handle simple types
+ # Handle simple types (bool must be checked before int since bool is subclass of int)
if expectedType in typeMap:
expectedTypeClass = typeMap[expectedType]
+ if expectedType == 'int' and isinstance(value, bool):
+ raise ValueError(f"Expected int, got bool: {value}")
+ if expectedType == 'bool' and isinstance(value, int) and not isinstance(value, bool):
+ return bool(value)
if not isinstance(value, expectedTypeClass):
try:
return expectedTypeClass(value)
@@ -290,10 +293,11 @@ class MethodBase:
def getActionSignature(self, actionName: str) -> str:
"""Get formatted action signature for AI prompt generation (detailed version)"""
- if actionName not in self.actions:
+ allActions = self.actions
+ if actionName not in allActions:
return ""
- action = self.actions[actionName]
+ action = allActions[actionName]
paramList = []
# Extract detailed parameter information from docstring
diff --git a/modules/workflows/methods/methodChatbot/actions/queryDatabase.py b/modules/workflows/methods/methodChatbot/actions/queryDatabase.py
index ff7e896f..8622a5a9 100644
--- a/modules/workflows/methods/methodChatbot/actions/queryDatabase.py
+++ b/modules/workflows/methods/methodChatbot/actions/queryDatabase.py
@@ -89,14 +89,26 @@ async def queryDatabase(self, parameters: Dict[str, Any]) -> ActionResult:
# Update progress
self.services.chat.progressLogUpdate(operationId, 0.3, "Validating query")
+ # Validate: only SELECT queries allowed
+ sqlNormalized = sqlQuery.strip().upper()
+ if not sqlNormalized.startswith("SELECT"):
+ return ActionResult.isFailure(error="Only SELECT queries are allowed")
+ forbiddenKeywords = ["INSERT", "UPDATE", "DELETE", "DROP", "ALTER", "CREATE", "TRUNCATE", "EXEC", "EXECUTE"]
+ for kw in forbiddenKeywords:
+ if f" {kw} " in f" {sqlNormalized} " or sqlNormalized.startswith(f"{kw} "):
+ return ActionResult.isFailure(error=f"Forbidden SQL keyword detected: {kw}")
+
# Initialize connector
connector = PreprocessorConnector()
# Update progress
self.services.chat.progressLogUpdate(operationId, 0.5, "Executing query")
- # Execute query
- result = await connector.executeQuery(sqlQuery)
+ try:
+ result = await connector.executeQuery(sqlQuery)
+ except Exception:
+ await connector.close()
+ raise
# Update progress
self.services.chat.progressLogUpdate(operationId, 0.8, "Formatting results")
@@ -134,10 +146,9 @@ async def queryDatabase(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error executing database query: {str(e)}")
- # Complete progress tracking with failure
try:
self.services.chat.progressLogFinish(operationId, False)
- except:
+ except Exception:
pass
return ActionResult.isFailure(
diff --git a/modules/workflows/methods/methodContext/actions/extractContent.py b/modules/workflows/methods/methodContext/actions/extractContent.py
index 5b90ce13..466165ad 100644
--- a/modules/workflows/methods/methodContext/actions/extractContent.py
+++ b/modules/workflows/methods/methodContext/actions/extractContent.py
@@ -11,8 +11,8 @@ from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrat
logger = logging.getLogger(__name__)
async def extractContent(self, parameters: Dict[str, Any]) -> ActionResult:
+ operationId = None
try:
- # Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"context_extract_{workflowId}_{int(time.time())}"
@@ -208,11 +208,11 @@ async def extractContent(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in content extraction: {str(e)}")
- # Complete progress tracking with failure
try:
- self.services.chat.progressLogFinish(operationId, False)
- except:
- pass # Don't fail on progress logging errors
+ if operationId:
+ self.services.chat.progressLogFinish(operationId, False)
+ except Exception:
+ pass
return ActionResult.isFailure(error=str(e))
diff --git a/modules/workflows/methods/methodContext/actions/getDocumentIndex.py b/modules/workflows/methods/methodContext/actions/getDocumentIndex.py
index 9991285b..b2822e0d 100644
--- a/modules/workflows/methods/methodContext/actions/getDocumentIndex.py
+++ b/modules/workflows/methods/methodContext/actions/getDocumentIndex.py
@@ -22,14 +22,13 @@ async def getDocumentIndex(self, parameters: Dict[str, Any]) -> ActionResult:
documentsIndex = self.services.chat.getAvailableDocuments(workflow)
if not documentsIndex or documentsIndex == "No documents available" or documentsIndex == "NO DOCUMENTS AVAILABLE - This workflow has no documents to process.":
- # Return empty index structure
+ indexData = {
+ "workflowId": getattr(workflow, 'id', 'unknown'),
+ "totalDocuments": 0,
+ "rounds": [],
+ "documentReferences": []
+ }
if resultType == "json":
- indexData = {
- "workflowId": getattr(workflow, 'id', 'unknown'),
- "totalDocuments": 0,
- "rounds": [],
- "documentReferences": []
- }
indexContent = json.dumps(indexData, indent=2, ensure_ascii=False)
else:
indexContent = "Document Index\n==============\n\nNo documents available in this workflow.\n"
@@ -64,7 +63,7 @@ async def getDocumentIndex(self, parameters: Dict[str, Any]) -> ActionResult:
document = ActionDocument(
documentName=filename,
documentData=indexContent,
- mimeType="application/json" if resultType == "json" else "text/plain",
+ mimeType="application/json" if resultType == "json" else ("text/markdown" if resultType == "md" else "text/plain"),
validationMetadata=validationMetadata
)
diff --git a/modules/workflows/methods/methodContext/actions/neutralizeData.py b/modules/workflows/methods/methodContext/actions/neutralizeData.py
index 8e3b7185..d5ec045b 100644
--- a/modules/workflows/methods/methodContext/actions/neutralizeData.py
+++ b/modules/workflows/methods/methodContext/actions/neutralizeData.py
@@ -11,8 +11,8 @@ from modules.datamodels.datamodelExtraction import ContentExtracted, ContentPart
logger = logging.getLogger(__name__)
async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult:
+ operationId = None
try:
- # Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"context_neutralize_{workflowId}_{int(time.time())}"
@@ -228,10 +228,10 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in data neutralization: {str(e)}")
- # Complete progress tracking with failure
try:
- self.services.chat.progressLogFinish(operationId, False)
- except:
- pass # Don't fail on progress logging errors
+ if operationId:
+ self.services.chat.progressLogFinish(operationId, False)
+ except Exception:
+ pass
return ActionResult.isFailure(error=str(e))
diff --git a/modules/workflows/methods/methodOutlook/actions/readEmails.py b/modules/workflows/methods/methodOutlook/actions/readEmails.py
index 2d325d9f..f388f818 100644
--- a/modules/workflows/methods/methodOutlook/actions/readEmails.py
+++ b/modules/workflows/methods/methodOutlook/actions/readEmails.py
@@ -29,7 +29,7 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
connectionReference = parameters.get("connectionReference")
folder = parameters.get("folder", "Inbox")
- limit = parameters.get("limit", 10)
+ limit = parameters.get("limit", 1000)
filter = parameters.get("filter")
outputMimeType = parameters.get("outputMimeType", "application/json")
@@ -110,7 +110,6 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
if response.status_code != 200:
logger.error(f"Graph API error: {response.status_code} - {response.text}")
logger.error(f"Request URL: {response.url}")
- logger.error(f"Request headers: {headers}")
logger.error(f"Request params: {params}")
response.raise_for_status()
@@ -217,8 +216,8 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
if operationId:
try:
self.services.chat.progressLogFinish(operationId, False)
- except:
- pass # Don't fail on progress logging errors
+ except Exception:
+ pass
return ActionResult.isFailure(
error=str(e)
)
diff --git a/modules/workflows/methods/methodOutlook/actions/searchEmails.py b/modules/workflows/methods/methodOutlook/actions/searchEmails.py
index f8831d59..c7f839b6 100644
--- a/modules/workflows/methods/methodOutlook/actions/searchEmails.py
+++ b/modules/workflows/methods/methodOutlook/actions/searchEmails.py
@@ -93,7 +93,7 @@ async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
try:
error_data = response.json()
logger.error(f"Microsoft Graph API error: {response.status_code} - {error_data}")
- except:
+ except Exception:
logger.error(f"Microsoft Graph API error: {response.status_code} - {response.text}")
# Check for specific error types and provide helpful messages
@@ -111,8 +111,6 @@ async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
raise Exception(f"Microsoft Graph API returned {response.status_code}: {response.text}")
- response.raise_for_status()
-
search_data = response.json()
emails = search_data.get("value", [])
diff --git a/modules/workflows/methods/methodOutlook/actions/sendDraftEmail.py b/modules/workflows/methods/methodOutlook/actions/sendDraftEmail.py
index 15c35f44..1c0c80d4 100644
--- a/modules/workflows/methods/methodOutlook/actions/sendDraftEmail.py
+++ b/modules/workflows/methods/methodOutlook/actions/sendDraftEmail.py
@@ -293,8 +293,18 @@ async def sendDraftEmail(self, parameters: Dict[str, Any]) -> ActionResult:
except ImportError:
logger.error("requests module not available")
+ if operationId:
+ try:
+ self.services.chat.progressLogFinish(operationId, False)
+ except Exception:
+ pass
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error in sendDraftEmail: {str(e)}")
+ if operationId:
+ try:
+ self.services.chat.progressLogFinish(operationId, False)
+ except Exception:
+ pass
return ActionResult.isFailure(error=str(e))
diff --git a/modules/workflows/methods/methodOutlook/helpers/connection.py b/modules/workflows/methods/methodOutlook/helpers/connection.py
index 12621fd3..cd42b7f5 100644
--- a/modules/workflows/methods/methodOutlook/helpers/connection.py
+++ b/modules/workflows/methods/methodOutlook/helpers/connection.py
@@ -40,25 +40,21 @@ class ConnectionHelper:
logger.debug(f"Found connection: {userConnection.id}, status: {userConnection.status.value}, authority: {userConnection.authority.value}")
- # Get a fresh token for this connection
- token = self.services.chat.getFreshConnectionToken(userConnection.id)
- if not token:
- logger.error(f"Fresh token not found for connection: {userConnection.id}")
- logger.debug(f"Connection details: {userConnection}")
- return None
-
- logger.debug(f"Fresh token retrieved for connection {userConnection.id}")
-
- # Check if connection is active
+ # Check status BEFORE fetching token (avoids unnecessary network call)
if userConnection.status.value != "active":
logger.error(f"Connection is not active: {userConnection.id}, status: {userConnection.status.value}")
return None
+ token = self.services.chat.getFreshConnectionToken(userConnection.id)
+ if not token:
+ logger.error(f"Fresh token not found for connection: {userConnection.id}")
+ return None
+
+ logger.debug(f"Fresh token retrieved for connection {userConnection.id}")
+
return {
"id": userConnection.id,
"accessToken": token.tokenAccess,
- "refreshToken": token.tokenRefresh,
- "scopes": ["Mail.ReadWrite", "Mail.Send", "Mail.ReadWrite.Shared", "User.Read"] # Valid Microsoft Graph API scopes
}
except Exception as e:
logger.error(f"Error getting Microsoft connection: {str(e)}")
diff --git a/modules/workflows/methods/methodOutlook/helpers/emailProcessing.py b/modules/workflows/methods/methodOutlook/helpers/emailProcessing.py
index 88644a33..f1736221 100644
--- a/modules/workflows/methods/methodOutlook/helpers/emailProcessing.py
+++ b/modules/workflows/methods/methodOutlook/helpers/emailProcessing.py
@@ -57,10 +57,10 @@ class EmailProcessingHelper:
# This is an advanced search query, return as-is
return clean_query
- # For basic text search, ensure it's safe for contains() filter
- # Remove any characters that might break the OData filter syntax
- # Remove or escape characters that could break OData filter syntax
- safe_query = re.sub(r'[\\\'"]', '', clean_query)
+ # Escape single quotes for OData safety (double them)
+ safe_query = clean_query.replace("'", "''")
+ # Remove backslashes and double quotes
+ safe_query = re.sub(r'[\\"]', '', safe_query)
return safe_query
@@ -173,12 +173,14 @@ class EmailProcessingHelper:
# Handle email address filters (only if it's NOT a search query)
if '@' in filter_text and '.' in filter_text and ' ' not in filter_text and not filter_text.startswith('from:'):
- return {"$filter": f"from/fromAddress/address eq '{filter_text}'"}
+ safeEmail = filter_text.replace("'", "''")
+ return {"$filter": f"from/fromAddress/address eq '{safeEmail}'"}
# Handle OData filter conditions (contains 'eq', 'ne', 'gt', 'lt', etc.)
if any(op in filter_text.lower() for op in [' eq ', ' ne ', ' gt ', ' lt ', ' ge ', ' le ', ' and ', ' or ']):
return {"$filter": filter_text}
- # Handle text content - search in subject
- return {"$filter": f"contains(subject,'{filter_text}')"}
+ # Handle text content - search in subject (escape single quotes)
+ safeText = filter_text.replace("'", "''")
+ return {"$filter": f"contains(subject,'{safeText}')"}
diff --git a/modules/workflows/methods/methodSharepoint/actions/uploadDocument.py b/modules/workflows/methods/methodSharepoint/actions/uploadDocument.py
index e9361853..c68133d5 100644
--- a/modules/workflows/methods/methodSharepoint/actions/uploadDocument.py
+++ b/modules/workflows/methods/methodSharepoint/actions/uploadDocument.py
@@ -240,11 +240,12 @@ async def uploadDocument(self, parameters: Dict[str, Any]) -> ActionResult:
}
successfulUploads = len([r for r in uploadResults if r.get("uploadStatus") == "success"])
+ overallSuccess = successfulUploads > 0
self.services.chat.progressLogUpdate(operationId, 0.9, f"Uploaded {successfulUploads}/{len(uploadResults)} file(s)")
- self.services.chat.progressLogFinish(operationId, successfulUploads > 0)
+ self.services.chat.progressLogFinish(operationId, overallSuccess)
return ActionResult(
- success=True,
+ success=overallSuccess,
documents=[
ActionDocument(
documentName=self._generateMeaningfulFileName("sharepoint_upload", "json", None, "uploadDocument"),
@@ -260,7 +261,7 @@ async def uploadDocument(self, parameters: Dict[str, Any]) -> ActionResult:
if operationId:
try:
self.services.chat.progressLogFinish(operationId, False)
- except:
+ except Exception:
pass
return ActionResult(
success=False,
diff --git a/modules/workflows/methods/methodSharepoint/helpers/apiClient.py b/modules/workflows/methods/methodSharepoint/helpers/apiClient.py
index 542e6dde..5b02aaab 100644
--- a/modules/workflows/methods/methodSharepoint/helpers/apiClient.py
+++ b/modules/workflows/methods/methodSharepoint/helpers/apiClient.py
@@ -17,14 +17,20 @@ class ApiClientHelper:
"""Helper for Microsoft Graph API calls"""
def __init__(self, methodInstance):
- """
- Initialize API client helper.
-
- Args:
- methodInstance: Instance of MethodSharepoint (for access to services)
- """
self.method = methodInstance
self.services = methodInstance.services
+ self._session: aiohttp.ClientSession = None
+
+ async def _getSession(self) -> aiohttp.ClientSession:
+ if self._session is None or self._session.closed:
+ timeout = aiohttp.ClientTimeout(total=30)
+ self._session = aiohttp.ClientSession(timeout=timeout)
+ return self._session
+
+ async def close(self):
+ if self._session and not self._session.closed:
+ await self._session.close()
+ self._session = None
async def makeGraphApiCall(self, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
"""
@@ -50,60 +56,28 @@ class ApiClientHelper:
url = f"https://graph.microsoft.com/v1.0/{endpoint}"
logger.info(f"Making Graph API call: {method} {url}")
- # Set timeout to 30 seconds
- timeout = aiohttp.ClientTimeout(total=30)
+ session = await self._getSession()
- async with aiohttp.ClientSession(timeout=timeout) as session:
- if method == "GET":
- logger.debug(f"Starting GET request to {url}")
- async with session.get(url, headers=headers) as response:
- logger.info(f"Graph API response: {response.status}")
- if response.status == 200:
- result = await response.json()
- logger.debug(f"Graph API success: {len(str(result))} characters response")
- return result
- else:
- errorText = await response.text()
- logger.error(f"Graph API call failed: {response.status} - {errorText}")
- return {"error": f"API call failed: {response.status} - {errorText}"}
-
- elif method == "PUT":
- logger.debug(f"Starting PUT request to {url}")
- async with session.put(url, headers=headers, data=data) as response:
- logger.info(f"Graph API response: {response.status}")
- if response.status in [200, 201]:
- result = await response.json()
- logger.debug(f"Graph API success: {len(str(result))} characters response")
- return result
- else:
- errorText = await response.text()
- logger.error(f"Graph API call failed: {response.status} - {errorText}")
- return {"error": f"API call failed: {response.status} - {errorText}"}
-
- elif method == "POST":
- logger.debug(f"Starting POST request to {url}")
- async with session.post(url, headers=headers, data=data) as response:
- logger.info(f"Graph API response: {response.status}")
- if response.status in [200, 201]:
- result = await response.json()
- logger.debug(f"Graph API success: {len(str(result))} characters response")
- return result
- else:
- errorText = await response.text()
- logger.error(f"Graph API call failed: {response.status} - {errorText}")
- return {"error": f"API call failed: {response.status} - {errorText}"}
-
- elif method == "DELETE":
- logger.debug(f"Starting DELETE request to {url}")
- async with session.delete(url, headers=headers) as response:
- logger.info(f"Graph API response: {response.status}")
- if response.status in [200, 204]:
- logger.debug(f"Graph API DELETE success")
- return {"success": True}
- else:
- errorText = await response.text()
- logger.error(f"Graph API call failed: {response.status} - {errorText}")
- return {"error": f"API call failed: {response.status} - {errorText}"}
+ successCodes = {"GET": [200], "PUT": [200, 201], "POST": [200, 201], "DELETE": [200, 204]}
+ httpMethod = getattr(session, method.lower(), None)
+ if not httpMethod:
+ return {"error": f"Unsupported HTTP method: {method}"}
+
+ kwargs = {"headers": headers}
+ if data is not None:
+ kwargs["data"] = data
+
+ async with httpMethod(url, **kwargs) as response:
+ logger.info(f"Graph API response: {response.status}")
+ if response.status in successCodes.get(method, [200]):
+ if method == "DELETE":
+ return {"success": True}
+ result = await response.json()
+ return result
+ else:
+ errorText = await response.text()
+ logger.error(f"Graph API call failed: {response.status} - {errorText}")
+ return {"error": f"API call failed: {response.status} - {errorText}"}
except asyncio.TimeoutError:
logger.error(f"Graph API call timed out after 30 seconds: {endpoint}")
diff --git a/modules/workflows/processing/adaptive/adaptiveLearningEngine.py b/modules/workflows/processing/adaptive/adaptiveLearningEngine.py
index 7efdaef0..18588cf2 100644
--- a/modules/workflows/processing/adaptive/adaptiveLearningEngine.py
+++ b/modules/workflows/processing/adaptive/adaptiveLearningEngine.py
@@ -14,11 +14,19 @@ class AdaptiveLearningEngine:
"""Enhanced learning engine that tracks validation patterns and adapts prompts"""
def __init__(self):
- self.validationHistory = [] # Store validation results with context
- self.failurePatterns = defaultdict(list) # Track failure patterns by action type
- self.successPatterns = defaultdict(list) # Track success patterns
- self.actionAttempts = defaultdict(int) # Track attempt counts per action
- self.learningInsights = {} # Store learned insights per workflow
+ self.validationHistory = []
+ self.failurePatterns = defaultdict(list)
+ self.successPatterns = defaultdict(list)
+ self.actionAttempts = defaultdict(int)
+ self.learningInsights = {}
+
+ def reset(self):
+ """Reset all learned state for a new workflow session."""
+ self.validationHistory.clear()
+ self.failurePatterns.clear()
+ self.successPatterns.clear()
+ self.actionAttempts.clear()
+ self.learningInsights.clear()
def recordValidationResult(self, validationResult: Dict[str, Any], actionContext: Dict[str, Any],
workflowId: str, attemptNumber: int):
@@ -195,15 +203,6 @@ class AdaptiveLearningEngine:
for issue, count in list(commonIssues.items())[:3]: # Top 3 issues
guidance_parts.append(f"- {issue} (occurred {count} times)")
- # Add specific action guidance based on user prompt
- if "email" in userPrompt.lower() and "outlook" in userPrompt.lower():
- if any("account" in str(issue).lower() for issue in commonIssues.keys()):
- guidance_parts.append("SPECIFIC GUIDANCE: Ensure email is sent from the correct account (valueon).")
- if any("attachment" in str(issue).lower() for issue in commonIssues.keys()):
- guidance_parts.append("SPECIFIC GUIDANCE: Verify PDF attachment is properly included.")
- if any("summary" in str(issue).lower() for issue in commonIssues.keys()):
- guidance_parts.append("SPECIFIC GUIDANCE: Include German summary in email body.")
-
return "\n".join(guidance_parts) if guidance_parts else "No specific guidance available."
def _generateParameterGuidance(self, actionName: str, parametersContext: str,
@@ -219,12 +218,11 @@ class AdaptiveLearningEngine:
if attemptNumber and attemptNumber >= 3:
guidanceParts.append(f"Attempt #{attemptNumber}: Adjust parameters based on validation feedback.")
- # Generic issues summary
commonIssues = failureAnalysis.get('commonIssues', {}) or {}
if commonIssues:
guidanceParts.append("Address the following parameter issues:")
- for issueKey, issueDesc in commonIssues.items():
- guidanceParts.append(f"- {issueKey}: {issueDesc}")
+ for issueText, count in commonIssues.items():
+ guidanceParts.append(f"- {issueText} (occurred {count} time{'s' if count != 1 else ''})")
# Keep guidance format stable
return "\n".join(guidanceParts) if guidanceParts else "Use standard parameter values."
diff --git a/modules/workflows/processing/adaptive/contentValidator.py b/modules/workflows/processing/adaptive/contentValidator.py
index fe17572f..e8ba106b 100644
--- a/modules/workflows/processing/adaptive/contentValidator.py
+++ b/modules/workflows/processing/adaptive/contentValidator.py
@@ -273,16 +273,15 @@ class ContentValidator:
elif section.get("content_type") in ["paragraph", "heading"]:
if elements and isinstance(elements, list) and len(elements) > 0:
textElement = elements[0]
- # Ensure textElement is a dictionary before accessing
if isinstance(textElement, dict):
content = textElement.get("content", {})
- if isinstance(content, dict):
- text = content.get("text", "")
- else:
- text = textElement.get("text", "")
- if text:
- sectionSummary["textLength"] = len(text)
- sectionSummary["wordCount"] = len(text.split())
+ if isinstance(content, dict):
+ text = content.get("text", "")
+ else:
+ text = textElement.get("text", "")
+ if text:
+ sectionSummary["textLength"] = len(text)
+ sectionSummary["wordCount"] = len(text.split())
if section.get("textLength"):
sectionSummary["textLength"] = section.get("textLength")
@@ -290,59 +289,47 @@ class ContentValidator:
elif section.get("content_type") == "code_block":
if elements and isinstance(elements, list) and len(elements) > 0:
codeElement = elements[0]
- content = codeElement.get("content", {})
- if isinstance(content, dict):
- code = content.get("code", "")
- language = content.get("language", "")
- if code:
- sectionSummary["codeLength"] = len(code)
- sectionSummary["codeLineCount"] = code.count('\n') + 1
- if language:
- sectionSummary["language"] = language
+ if isinstance(codeElement, dict):
+ content = codeElement.get("content", {})
+ if isinstance(content, dict):
+ code = content.get("code", "")
+ language = content.get("language", "")
+ if code:
+ sectionSummary["codeLength"] = len(code)
+ sectionSummary["codeLineCount"] = code.count('\n') + 1
+ if language:
+ sectionSummary["language"] = language
- # Wenn contentPartIds vorhanden sind, aber keine elements: Füge ContentParts-Metadaten hinzu
contentPartIds = section.get("contentPartIds", [])
if contentPartIds and not elements:
- # Prüfe ob contentPartsMetadata vorhanden ist
contentPartsMetadata = section.get("contentPartsMetadata", [])
if contentPartsMetadata:
sectionSummary["contentPartsMetadata"] = contentPartsMetadata
else:
- # Fallback: Zeige nur IDs wenn Metadaten nicht verfügbar
sectionSummary["contentPartIds"] = contentPartIds
sectionSummary["note"] = "ContentParts referenced but metadata not available"
- # Include any additional fields from section (generic approach)
- # BUT exclude type-specific KPIs that don't belong to this content_type
- # AND exclude internal planning fields that confuse validation
contentType = section.get("content_type", "")
- # Define KPIs that are ONLY valid for specific types
typeExclusiveKpis = {
- "table": ["columnCount", "rowCount", "headers"], # Only for tables
- "bullet_list": ["itemCount"], # Only for bullet_list
- "list": ["itemCount"] # Only for list
+ "table": ["columnCount", "rowCount", "headers"],
+ "bullet_list": ["itemCount"],
+ "list": ["itemCount"]
}
excludedKpis = []
for kpiType, kpiFields in typeExclusiveKpis.items():
if kpiType != contentType:
excludedKpis.extend(kpiFields)
- # Internal planning fields that should NOT be shown to validation AI
- # These are implementation details, not content indicators
internalFields = ["generationHint", "useAiCall", "elements"]
for key, value in section.items():
if key not in sectionSummary and key not in internalFields and key not in excludedKpis:
- # Don't copy type-specific KPIs if they're 0/empty and we didn't extract them ourselves
- # This prevents copying columnCount: 0, rowCount: 0, headers: [] from structure generation phase
if key in ["columnCount", "rowCount", "headers", "itemCount"]:
- # Skip if it's 0/empty - we'll only include KPIs we extracted from elements
if isinstance(value, int) and value == 0:
continue
if isinstance(value, list) and len(value) == 0:
continue
- # Include simple types (str, int, float, bool, list of primitives)
if isinstance(value, (str, int, float, bool)) or (isinstance(value, list) and len(value) <= 10):
sectionSummary[key] = value
@@ -486,7 +473,7 @@ class ContentValidator:
try:
json_str = json.dumps(data)
size_bytes = len(json_str.encode('utf-8'))
- except:
+ except (TypeError, ValueError):
size_bytes = len(str(data).encode('utf-8'))
else:
size_bytes = len(str(data).encode('utf-8'))
diff --git a/modules/workflows/processing/adaptive/learningEngine.py b/modules/workflows/processing/adaptive/learningEngine.py
index 83cf7b13..8fb2f958 100644
--- a/modules/workflows/processing/adaptive/learningEngine.py
+++ b/modules/workflows/processing/adaptive/learningEngine.py
@@ -16,6 +16,11 @@ class LearningEngine:
self.strategies = {}
self.feedbackHistory = []
+ def reset(self):
+ """Reset all learned state for a new workflow session."""
+ self.strategies.clear()
+ self.feedbackHistory.clear()
+
def learnFromFeedback(self, feedback: Dict[str, Any], context: Any, taskIntent: Dict[str, Any]):
"""Learns from feedback and updates strategies - works on TASK level, not workflow level"""
try:
diff --git a/modules/workflows/processing/core/actionExecutor.py b/modules/workflows/processing/core/actionExecutor.py
index 0e4d6ee4..6b1e3544 100644
--- a/modules/workflows/processing/core/actionExecutor.py
+++ b/modules/workflows/processing/core/actionExecutor.py
@@ -136,6 +136,7 @@ class ActionExecutor:
# Execute action and track success for progress log
result = None
actionSuccess = False
+ actionError = None
try:
result = await self.executeAction(
methodName=action.execMethod,
@@ -144,23 +145,23 @@ class ActionExecutor:
)
actionSuccess = result.success if result else False
except Exception as e:
- logger.error(f"Error executing action: {str(e)}")
+ logger.error(f"Error executing action {action.execMethod}.{action.execAction}: {str(e)}")
actionSuccess = False
+ actionError = str(e)
finally:
- # Finish action progress tracking
try:
self.services.chat.progressLogFinish(actionOperationId, actionSuccess)
except Exception as e:
logger.error(f"Error finishing action progress log: {str(e)}")
- # If action execution failed, return error result
if result is None:
- action.setError("Action execution failed")
+ errorMsg = actionError or "Action execution failed"
+ action.setError(errorMsg)
return ActionResult(
success=False,
documents=[],
resultLabel=action.execResultLabel,
- error="Action execution failed"
+ error=errorMsg
)
resultLabel = action.execResultLabel
diff --git a/modules/workflows/processing/core/messageCreator.py b/modules/workflows/processing/core/messageCreator.py
index a4ae05e9..48df832d 100644
--- a/modules/workflows/processing/core/messageCreator.py
+++ b/modules/workflows/processing/core/messageCreator.py
@@ -319,56 +319,27 @@ class MessageCreator:
except Exception as e:
logger.error(f"Error creating error message: {str(e)}")
- def _extractRoundNumberFromLabel(self, label: str) -> int:
- """Extract round number from a document label like 'round1_task1_action1_diagram_analysis'"""
+ def _extractNumberFromLabelPart(self, label: str, prefix: str) -> int:
+ """Extract number following a prefix in a label like 'round1_task1_action1_context'.
+ Works for prefix='round', 'task', 'action'. Returns 0 on failure.
+ """
try:
if not label or not isinstance(label, str):
return 0
- # Parse label format: round{round}_task{task}_action{action}_{context}
- if label.startswith('round'):
- roundPart = label.split('_')[0] # Get 'round1' part
- if roundPart.startswith('round'):
- roundNumber = roundPart[5:] # Remove 'round' prefix
- return int(roundNumber)
-
- return 0
+ import re
+ pattern = rf'{prefix}(\d+)'
+ match = re.search(pattern, label)
+ return int(match.group(1)) if match else 0
except Exception as e:
- logger.warning(f"Could not extract round number from label '{label}': {str(e)}")
+ logger.warning(f"Could not extract {prefix} number from label '{label}': {str(e)}")
return 0
+ def _extractRoundNumberFromLabel(self, label: str) -> int:
+ return self._extractNumberFromLabelPart(label, 'round')
+
def _extractTaskNumberFromLabel(self, label: str) -> int:
- """Extract task number from a document label like 'round1_task1_action1_diagram_analysis'"""
- try:
- if not label or not isinstance(label, str):
- return 0
-
- # Parse label format: round{round}_task{task}_action{action}_{context}
- if '_task' in label:
- taskPart = label.split('_task')[1]
- if taskPart and '_' in taskPart:
- taskNumber = taskPart.split('_')[0]
- return int(taskNumber)
-
- return 0
- except Exception as e:
- logger.warning(f"Could not extract task number from label '{label}': {str(e)}")
- return 0
+ return self._extractNumberFromLabelPart(label, 'task')
def _extractActionNumberFromLabel(self, label: str) -> int:
- """Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
- try:
- if not label or not isinstance(label, str):
- return 0
-
- # Parse label format: round{round}_task{task}_action{action}_{context}
- if '_action' in label:
- actionPart = label.split('_action')[1]
- if actionPart and '_' in actionPart:
- actionNumber = actionPart.split('_')[0]
- return int(actionNumber)
-
- return 0
- except Exception as e:
- logger.warning(f"Could not extract action number from label '{label}': {str(e)}")
- return 0
+ return self._extractNumberFromLabelPart(label, 'action')
diff --git a/modules/workflows/processing/core/taskPlanner.py b/modules/workflows/processing/core/taskPlanner.py
index b1e1def7..233488fe 100644
--- a/modules/workflows/processing/core/taskPlanner.py
+++ b/modules/workflows/processing/core/taskPlanner.py
@@ -7,7 +7,6 @@ import json
import logging
from typing import Dict, Any
from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskPlan, WorkflowModeEnum
-from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, ProcessingModeEnum, PriorityEnum
from modules.workflows.processing.shared.promptGenerationTaskplan import (
generateTaskPlanningPrompt
)
@@ -107,17 +106,6 @@ class TaskPlanner:
taskPlanningPromptTemplate = bundle.prompt
placeholders = bundle.placeholders
- # Centralized AI call: Task planning (quality, detailed) with placeholders
- options = AiCallOptions(
- operationType=OperationTypeEnum.PLAN,
- priority=PriorityEnum.QUALITY,
- compressPrompt=False,
- compressContext=False,
- processingMode=ProcessingModeEnum.DETAILED,
- maxCost=0.10,
- maxProcessingTime=30
- )
-
prompt = await self.services.ai.callAiPlanning(
prompt=taskPlanningPromptTemplate,
placeholders=placeholders,
@@ -141,9 +129,11 @@ class TaskPlanner:
raise ValueError("Task plan missing 'tasks' field")
except Exception as e:
logger.error(f"Error parsing task plan response: {str(e)}")
- taskPlanDict = {'tasks': []}
+ raise ValueError(f"Failed to parse AI task plan response: {str(e)}") from e
- if not self._validateTaskPlan(taskPlanDict):
+ from modules.workflows.processing.core.validator import WorkflowValidator
+ validator = WorkflowValidator(self.services)
+ if not validator.validateTask(taskPlanDict):
logger.error("Generated task plan failed validation")
logger.error(f"AI Response: {prompt}")
logger.error(f"Parsed Task Plan: {json.dumps(taskPlanDict, indent=2)}")
@@ -207,61 +197,4 @@ class TaskPlanner:
logger.error(f"Error in generateTaskPlan: {str(e)}")
raise
-
-
- def _validateTaskPlan(self, taskPlan: Dict[str, Any]) -> bool:
- """Validate task plan structure"""
- try:
- if not isinstance(taskPlan, dict):
- logger.error("Task plan is not a dictionary")
- return False
-
- if 'tasks' not in taskPlan or not isinstance(taskPlan['tasks'], list):
- logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(taskPlan.get('tasks', 'MISSING'))}")
- return False
-
- # First pass: collect all task IDs to validate dependencies
- taskIds = set()
- for task in taskPlan['tasks']:
- if not isinstance(task, dict):
- logger.error(f"Task is not a dictionary: {type(task)}")
- return False
- if 'id' not in task:
- logger.error(f"Task missing 'id' field: {task}")
- return False
- taskIds.add(task['id'])
-
- # Second pass: validate each task
- for i, task in enumerate(taskPlan['tasks']):
- if not isinstance(task, dict):
- logger.error(f"Task {i} is not a dictionary: {type(task)}")
- return False
-
- requiredFields = ['id', 'objective', 'successCriteria']
- missingFields = [field for field in requiredFields if field not in task]
- if missingFields:
- logger.error(f"Task {i} missing required fields: {missingFields}")
- return False
-
- # Check for duplicate IDs (shouldn't happen after first pass, but safety check)
- if task['id'] in taskIds and list(taskPlan['tasks']).count(task['id']) > 1:
- logger.error(f"Task {i} has duplicate ID: {task['id']}")
- return False
-
- dependencies = task.get('dependencies', [])
- if not isinstance(dependencies, list):
- logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
- return False
-
- for dep in dependencies:
- if dep not in taskIds and dep != 'task_0':
- logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(taskIds) + ['task_0']})")
- return False
-
- logger.info(f"Task plan validation successful with {len(taskIds)} tasks")
- return True
-
- except Exception as e:
- logger.error(f"Error validating task plan: {str(e)}")
- return False
\ No newline at end of file
diff --git a/modules/workflows/processing/core/validator.py b/modules/workflows/processing/core/validator.py
index 74d67b19..67c685e8 100644
--- a/modules/workflows/processing/core/validator.py
+++ b/modules/workflows/processing/core/validator.py
@@ -25,40 +25,35 @@ class WorkflowValidator:
logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(taskPlan.get('tasks', 'MISSING'))}")
return False
- # First pass: collect all task IDs to validate dependencies
+ # Single pass: collect IDs (detect duplicates) and validate each task
taskIds = set()
- for task in taskPlan['tasks']:
- if not isinstance(task, dict):
- logger.error(f"Task is not a dictionary: {type(task)}")
- return False
- if 'id' not in task:
- logger.error(f"Task missing 'id' field: {task}")
- return False
- taskIds.add(task['id'])
-
- # Second pass: validate each task
for i, task in enumerate(taskPlan['tasks']):
if not isinstance(task, dict):
logger.error(f"Task {i} is not a dictionary: {type(task)}")
return False
+ if 'id' not in task:
+ logger.error(f"Task {i} missing 'id' field: {task}")
+ return False
+
+ if task['id'] in taskIds:
+ logger.error(f"Task {i} has duplicate ID: {task['id']}")
+ return False
+ taskIds.add(task['id'])
requiredFields = ['id', 'objective', 'successCriteria']
missingFields = [field for field in requiredFields if field not in task]
if missingFields:
logger.error(f"Task {i} missing required fields: {missingFields}")
return False
-
- # Check for duplicate IDs (shouldn't happen after first pass, but safety check)
- if task['id'] in taskIds and list(taskPlan['tasks']).count(task['id']) > 1:
- logger.error(f"Task {i} has duplicate ID: {task['id']}")
- return False
dependencies = task.get('dependencies', [])
if not isinstance(dependencies, list):
logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
return False
-
- for dep in dependencies:
+
+ # Second pass: validate dependencies (all IDs now known)
+ for i, task in enumerate(taskPlan['tasks']):
+ for dep in task.get('dependencies', []):
if dep not in taskIds and dep != 'task_0':
logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(taskIds) + ['task_0']})")
return False
@@ -93,7 +88,7 @@ class WorkflowValidator:
missingFields = []
for field in requiredFields:
- if field not in action or not action[field]:
+ if field not in action or action[field] is None:
missingFields.append(field)
if missingFields:
logger.error(f"Action {i} missing required fields: {missingFields}")
diff --git a/modules/workflows/processing/modes/modeAutomation.py b/modules/workflows/processing/modes/modeAutomation.py
index 4e3c7853..1d0121b9 100644
--- a/modules/workflows/processing/modes/modeAutomation.py
+++ b/modules/workflows/processing/modes/modeAutomation.py
@@ -36,6 +36,9 @@ class AutomationMode(BaseMode):
- Or as direct JSON in userInput
"""
try:
+ # Reset action map to prevent state leaks from previous runs
+ self.taskActionMap = {}
+
# AUTOMATION mode ALWAYS requires a JSON plan to be provided in userInput
# Try to extract plan from userInput (embedded JSON or direct JSON)
templatePlan = None
@@ -340,78 +343,6 @@ class AutomationMode(BaseMode):
error=str(e)
)
- def _createActionItem(self, actionData: Dict[str, Any]) -> Optional[ActionItem]:
- """Create ActionItem from action data"""
- try:
- import uuid
- from datetime import datetime, timezone
-
- # Ensure ID is present
- if "id" not in actionData or not actionData["id"]:
- actionData["id"] = f"action_{uuid.uuid4()}"
-
- # Ensure required fields
- if "status" not in actionData:
- actionData["status"] = TaskStatus.PENDING
-
- if "execMethod" not in actionData:
- logger.error("execMethod is required for task action")
- return None
-
- if "execAction" not in actionData:
- logger.error("execAction is required for task action")
- return None
-
- if "execParameters" not in actionData:
- actionData["execParameters"] = {}
-
- # Use generic field separation based on ActionItem model
- simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
-
- # Create action in database
- createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
-
- # Convert to ActionItem model
- return ActionItem(
- id=createdAction["id"],
- execMethod=createdAction["execMethod"],
- execAction=createdAction["execAction"],
- execParameters=createdAction.get("execParameters", {}),
- execResultLabel=createdAction.get("execResultLabel"),
- expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
- status=createdAction.get("status", TaskStatus.PENDING),
- error=createdAction.get("error"),
- retryCount=createdAction.get("retryCount", 0),
- retryMax=createdAction.get("retryMax", 3),
- processingTime=createdAction.get("processingTime"),
- timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
- result=createdAction.get("result"),
- userMessage=createdAction.get("userMessage")
- )
-
- except Exception as e:
- logger.error(f"Error creating task action: {str(e)}")
- return None
-
- def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
- """Update workflow object before executing a task"""
- try:
- workflow = self.services.workflow
- updateData = {
- "currentTask": taskNumber,
- "currentAction": 0,
- "totalActions": 0
- }
-
- workflow.currentTask = taskNumber
- workflow.currentAction = 0
- workflow.totalActions = 0
-
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}")
- except Exception as e:
- logger.error(f"Error updating workflow before executing task: {str(e)}")
-
def _updateWorkflowAfterActionPlanning(self, totalActions: int):
"""Update workflow object after action planning"""
try:
@@ -423,17 +354,6 @@ class AutomationMode(BaseMode):
except Exception as e:
logger.error(f"Error updating workflow after action planning: {str(e)}")
- def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
- """Update workflow object before executing an action"""
- try:
- workflow = self.services.workflow
- updateData = {"currentAction": actionNumber}
- workflow.currentAction = actionNumber
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}")
- except Exception as e:
- logger.error(f"Error updating workflow before executing action: {str(e)}")
-
def _setWorkflowTotals(self, totalTasks: int = None, totalActions: int = None):
"""Set total counts for workflow progress tracking"""
try:
diff --git a/modules/workflows/processing/modes/modeBase.py b/modules/workflows/processing/modes/modeBase.py
index 770c868a..fe9a5da6 100644
--- a/modules/workflows/processing/modes/modeBase.py
+++ b/modules/workflows/processing/modes/modeBase.py
@@ -4,14 +4,16 @@
# Abstract base class for workflow modes
from abc import ABC, abstractmethod
+import uuid
import logging
-from typing import List, Dict, Any
-from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskResult, ActionItem
+from typing import List, Dict, Any, Optional
+from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.workflows.processing.core.taskPlanner import TaskPlanner
from modules.workflows.processing.core.actionExecutor import ActionExecutor
from modules.workflows.processing.core.messageCreator import MessageCreator
from modules.workflows.processing.core.validator import WorkflowValidator
+from modules.shared.timeUtils import parseTimestamp
logger = logging.getLogger(__name__)
@@ -44,3 +46,75 @@ class BaseMode(ABC):
async def createTaskPlanMessage(self, taskPlan, workflow: ChatWorkflow):
"""Create task plan message - common to all modes"""
return await self.messageCreator.createTaskPlanMessage(taskPlan, workflow)
+
+ def _createActionItem(self, actionData: Dict[str, Any]) -> Optional[ActionItem]:
+ """Create an ActionItem from action data, persist to DB, and return the model instance"""
+ try:
+ if "id" not in actionData or not actionData["id"]:
+ actionData["id"] = f"action_{uuid.uuid4()}"
+
+ if "status" not in actionData:
+ actionData["status"] = TaskStatus.PENDING
+
+ if "execMethod" not in actionData:
+ logger.error("execMethod is required for task action")
+ return None
+
+ if "execAction" not in actionData:
+ logger.error("execAction is required for task action")
+ return None
+
+ if "execParameters" not in actionData:
+ actionData["execParameters"] = {}
+
+ simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
+ createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
+
+ return ActionItem(
+ id=createdAction["id"],
+ execMethod=createdAction["execMethod"],
+ execAction=createdAction["execAction"],
+ execParameters=createdAction.get("execParameters", {}),
+ execResultLabel=createdAction.get("execResultLabel"),
+ expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
+ status=createdAction.get("status", TaskStatus.PENDING),
+ error=createdAction.get("error"),
+ retryCount=createdAction.get("retryCount", 0),
+ retryMax=createdAction.get("retryMax", 3),
+ processingTime=createdAction.get("processingTime"),
+ timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
+ result=createdAction.get("result"),
+ userMessage=createdAction.get("userMessage")
+ )
+
+ except Exception as e:
+ logger.error(f"Error creating task action: {str(e)}")
+ return None
+
+ def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
+ """Update workflow state before executing a task"""
+ try:
+ workflow = self.services.workflow
+ updateData = {
+ "currentTask": taskNumber,
+ "currentAction": 0,
+ "totalActions": 0
+ }
+ workflow.currentTask = taskNumber
+ workflow.currentAction = 0
+ workflow.totalActions = 0
+ self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
+ logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}")
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing task: {str(e)}")
+
+ def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
+ """Update workflow state before executing an action"""
+ try:
+ workflow = self.services.workflow
+ updateData = {"currentAction": actionNumber}
+ workflow.currentAction = actionNumber
+ self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
+ logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}")
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing action: {str(e)}")
diff --git a/modules/workflows/processing/modes/modeDynamic.py b/modules/workflows/processing/modes/modeDynamic.py
index e59a9253..eeae30e5 100644
--- a/modules/workflows/processing/modes/modeDynamic.py
+++ b/modules/workflows/processing/modes/modeDynamic.py
@@ -116,6 +116,7 @@ class DynamicMode(BaseMode):
step = 1
decision = None
+ lastStepFailed = False
while step <= state.max_steps:
checkWorkflowStopped(self.services)
@@ -282,6 +283,7 @@ class DynamicMode(BaseMode):
except Exception as e:
logger.error(f"Dynamic step {step} error: {e}")
+ lastStepFailed = True
break
# NEW: Use adaptive stopping logic
@@ -296,19 +298,24 @@ class DynamicMode(BaseMode):
step += 1
# Summarize task result for dynamic mode
- status = TaskStatus.COMPLETED
- success = True
- # Get feedback from last decision if available
lastDecision = context.previousReviewResult[-1] if hasattr(context, 'previousReviewResult') and context.previousReviewResult else None
feedback = lastDecision.reason if lastDecision and isinstance(lastDecision, ReviewResult) else 'Completed'
- if lastDecision and isinstance(lastDecision, ReviewResult) and lastDecision.status == 'success':
+
+ if lastStepFailed:
+ status = TaskStatus.FAILED
+ success = False
+ elif lastDecision and isinstance(lastDecision, ReviewResult) and lastDecision.status in ('stop', 'failed'):
+ status = TaskStatus.FAILED
+ success = False
+ else:
+ status = TaskStatus.COMPLETED
success = True
# Create proper ReviewResult for completion message
completionReviewResult = ReviewResult(
- status='success',
+ status='success' if success else 'failed',
reason=feedback,
- qualityScore=lastDecision.qualityScore if lastDecision and isinstance(lastDecision, ReviewResult) else 8.0,
+ qualityScore=lastDecision.qualityScore if lastDecision and isinstance(lastDecision, ReviewResult) else (8.0 if success else 2.0),
metCriteria=[],
improvements=[]
)
@@ -1003,12 +1010,15 @@ class DynamicMode(BaseMode):
# Detect repeated actions
actionCounts = {}
for entry in actionHistory:
- # Extract action name (after first space, before next space or {)
- parts = entry.split()
- if len(parts) > 1:
- # Skip "Step", "Refinement" prefixes and get the action name
- actionName = parts[1] if parts[0] in ['Step', 'Refinement'] else parts[0]
- actionCounts[actionName] = actionCounts.get(actionName, 0) + 1
+ # Format: "Step N: actionName ..." or "Refinement N: actionName ..."
+ # Extract the action name after "prefix N:"
+ colonIdx = entry.find(':')
+ if colonIdx >= 0:
+ afterColon = entry[colonIdx + 1:].strip().split()
+ actionName = afterColon[0] if afterColon else 'unknown'
+ else:
+ actionName = entry.split()[0] if entry.split() else 'unknown'
+ actionCounts[actionName] = actionCounts.get(actionName, 0) + 1
repeatedActions = [action for action, count in actionCounts.items() if count >= 2]
if repeatedActions:
@@ -1172,150 +1182,6 @@ Return only the user-friendly message, no technical details."""
logger.error(f"Error generating action result message: {str(e)}")
return f"{method}.{actionName} action completed"
- def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
- """Creates a new task action for Dynamic mode"""
- try:
- import uuid
-
- # Ensure ID is present
- if "id" not in actionData or not actionData["id"]:
- actionData["id"] = f"action_{uuid.uuid4()}"
-
- # Ensure required fields
- if "status" not in actionData:
- actionData["status"] = TaskStatus.PENDING
-
- if "execMethod" not in actionData:
- logger.error("execMethod is required for task action")
- return None
-
- if "execAction" not in actionData:
- logger.error("execAction is required for task action")
- return None
-
- if "execParameters" not in actionData:
- actionData["execParameters"] = {}
-
- # Use generic field separation based on ActionItem model
- simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
-
- # Create action in database
- createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
-
- # Convert to ActionItem model
- return ActionItem(
- id=createdAction["id"],
- execMethod=createdAction["execMethod"],
- execAction=createdAction["execAction"],
- execParameters=createdAction.get("execParameters", {}),
- execResultLabel=createdAction.get("execResultLabel"),
- expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
- status=createdAction.get("status", TaskStatus.PENDING),
- error=createdAction.get("error"),
- retryCount=createdAction.get("retryCount", 0),
- retryMax=createdAction.get("retryMax", 3),
- processingTime=createdAction.get("processingTime"),
- timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
- result=createdAction.get("result"),
- resultDocuments=createdAction.get("resultDocuments", []),
- userMessage=createdAction.get("userMessage")
- )
-
- except Exception as e:
- logger.error(f"Error creating task action: {str(e)}")
- return None
- def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
- """Update workflow object before executing a task"""
- try:
- workflow = self.services.workflow
- updateData = {
- "currentTask": taskNumber,
- "currentAction": 0,
- "totalActions": 0
- }
-
- # Update workflow object
- workflow.currentTask = taskNumber
- workflow.currentAction = 0
- workflow.totalActions = 0
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}: {updateData}")
-
- except Exception as e:
- logger.error(f"Error updating workflow before executing task: {str(e)}")
- def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
- """Update workflow object before executing an action"""
- try:
- workflow = self.services.workflow
- updateData = {
- "currentAction": actionNumber
- }
-
- # Update workflow object
- workflow.currentAction = actionNumber
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
- logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}: {updateData}")
-
- except Exception as e:
- logger.error(f"Error updating workflow before executing action: {str(e)}")
-
- def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
- """Creates a new task action for Dynamic mode"""
- try:
- import uuid
-
- # Ensure ID is present
- if "id" not in actionData or not actionData["id"]:
- actionData["id"] = f"action_{uuid.uuid4()}"
-
- # Ensure required fields
- if "status" not in actionData:
- actionData["status"] = TaskStatus.PENDING
-
- if "execMethod" not in actionData:
- logger.error("execMethod is required for task action")
- return None
-
- if "execAction" not in actionData:
- logger.error("execAction is required for task action")
- return None
-
- if "execParameters" not in actionData:
- actionData["execParameters"] = {}
-
- # Use generic field separation based on ActionItem model
- simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
-
- # Create action in database
- createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
-
- # Convert to ActionItem model
- return ActionItem(
- id=createdAction["id"],
- execMethod=createdAction["execMethod"],
- execAction=createdAction["execAction"],
- execParameters=createdAction.get("execParameters", {}),
- execResultLabel=createdAction.get("execResultLabel"),
- expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
- status=createdAction.get("status", TaskStatus.PENDING),
- error=createdAction.get("error"),
- retryCount=createdAction.get("retryCount", 0),
- retryMax=createdAction.get("retryMax", 3),
- processingTime=createdAction.get("processingTime"),
- timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
- result=createdAction.get("result"),
- resultDocuments=createdAction.get("resultDocuments", []),
- userMessage=createdAction.get("userMessage")
- )
-
- except Exception as e:
- logger.error(f"Error creating task action: {str(e)}")
- return None
-
diff --git a/modules/workflows/processing/shared/executionState.py b/modules/workflows/processing/shared/executionState.py
index 1cdf0d53..e5e48a01 100644
--- a/modules/workflows/processing/shared/executionState.py
+++ b/modules/workflows/processing/shared/executionState.py
@@ -5,23 +5,22 @@
import logging
from typing import List, Optional
-from modules.datamodels.datamodelChat import TaskStep, ActionResult, Observation
+from modules.datamodels.datamodelChat import TaskStep, ActionResult
logger = logging.getLogger(__name__)
class TaskExecutionState:
"""Manages execution state for a task with retry logic"""
- def __init__(self, task_step: TaskStep):
- self.task_step = task_step
- self.successful_actions: List[ActionResult] = [] # Preserved across retries
- self.failed_actions: List[ActionResult] = [] # For analysis
+ def __init__(self, taskStep: TaskStep):
+ self.task_step = taskStep
+ self.successful_actions: List[ActionResult] = []
+ self.failed_actions: List[ActionResult] = []
self.current_action_index = 0
self.retry_count = 0
self.max_retries = 3
- # Iterative loop (dynamic mode)
self.current_step = 0
- self.max_steps = 0 # Will be overridden by workflow.maxSteps from workflowManager.py
+ self.max_steps = 0
def addSuccessfulAction(self, action_result: ActionResult):
"""Add a successful action to the state"""
@@ -58,48 +57,25 @@ class TaskExecutionState:
patterns.append("permission_issues")
return list(set(patterns))
-def shouldContinue(observation: Optional[Observation], review=None, current_step: int = 0, max_steps: int = 1) -> bool:
- """Helper to decide if the iterative loop should continue
+def shouldContinue(observation=None, review=None, current_step: int = 0, max_steps: int = 1) -> bool:
+ """Helper to decide if the iterative loop should continue.
- Args:
- observation: Observation Pydantic model with action execution results
- review: ReviewResult or dict with review decision (optional)
- current_step: Current step number in the iteration
- max_steps: Maximum allowed steps
-
- Returns:
- bool: True if loop should continue, False if should stop
-
- Logic:
- - Stop if max steps reached
- - Stop if review indicates 'stop' or success criteria are met
- - Continue if observation indicates failure but allow one more step (caller caps by max_steps)
+ Returns False if max steps reached or review indicates 'stop'/'success'.
"""
try:
- # Stop if max steps reached
if current_step >= max_steps:
logger.info(f"Stopping workflow: reached max_steps limit ({current_step} >= {max_steps})")
return False
- # Check review decision (can be ReviewResult model or dict)
if review:
if hasattr(review, 'status'):
- # ReviewResult Pydantic model
if review.status in ('stop', 'success'):
return False
elif isinstance(review, dict):
- # Legacy dict format
decision = review.get('decision') or review.get('status')
if decision in ('stop', 'success'):
return False
- # Check observation: if hard failure with no documents, allow one more step
- # The caller will enforce max_steps limit
- if observation:
- if observation.success is False and observation.documentsCount == 0:
- # Allow next step once; the caller caps by max_steps
- return True
-
return True
except Exception as e:
logger.warning(f"Error in shouldContinue: {e}")
diff --git a/modules/workflows/processing/shared/methodDiscovery.py b/modules/workflows/processing/shared/methodDiscovery.py
index e3bfa769..b8403b3d 100644
--- a/modules/workflows/processing/shared/methodDiscovery.py
+++ b/modules/workflows/processing/shared/methodDiscovery.py
@@ -19,117 +19,57 @@ methods = {}
def discoverMethods(serviceCenter):
"""Dynamically discover all method classes and their actions in modules methods package.
- CRITICAL: If methods are already discovered, updates their Services reference to ensure
- they use the current workflow (self.services.workflow). This prevents stale workflow IDs
- from being used when a new workflow starts.
+ Always creates fresh method instances bound to the given serviceCenter,
+ preventing stale or cross-workflow service references.
"""
+ global methods
try:
- # Import the methods package
methodsPackage = importlib.import_module('modules.workflows.methods')
- # Discover all modules and packages in the methods package
+ # Clear and rebuild to prevent cross-workflow state contamination
+ methods.clear()
+ uniqueCount = 0
+
for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
if name.startswith('method'):
try:
- if isPkg:
- # Package (folder) - import __init__.py which exports the Method class
- module = importlib.import_module(f'modules.workflows.methods.{name}')
- else:
- # Module (file) - import directly
- module = importlib.import_module(f'modules.workflows.methods.{name}')
+ module = importlib.import_module(f'modules.workflows.methods.{name}')
- # Find all classes in the module that inherit from MethodBase
for itemName, item in inspect.getmembers(module):
if (inspect.isclass(item) and
issubclass(item, MethodBase) and
item != MethodBase):
- # Check if method already exists in cache
shortName = itemName.replace('Method', '').lower()
- if itemName in methods or shortName in methods:
- # Method already discovered - update Services reference to use current workflow
- existingMethodInfo = methods.get(itemName) or methods.get(shortName)
- if existingMethodInfo and existingMethodInfo.get('instance'):
- existingMethodInfo['instance'].services = serviceCenter
- logger.debug(f"Updated Services reference for cached method {itemName} to use current workflow")
- else:
- # Method exists but instance is missing - recreate it
- methodInstance = item(serviceCenter)
- actions = methodInstance.actions
- methodInfo = {
- 'instance': methodInstance,
- 'actions': actions,
- 'description': item.__doc__ or f"Method {itemName}"
- }
- methods[itemName] = methodInfo
- methods[shortName] = methodInfo
- logger.info(f"Recreated method {itemName} (short: {shortName}) with {len(actions)} actions")
- else:
- # Method not discovered yet - create new instance
- methodInstance = item(serviceCenter)
-
- # Use the actions property from MethodBase which handles WorkflowActionDefinition
- actions = methodInstance.actions
-
- # Create method info
- methodInfo = {
- 'instance': methodInstance,
- 'actions': actions,
- 'description': item.__doc__ or f"Method {itemName}"
- }
-
- # Store the method with full class name
- methods[itemName] = methodInfo
-
- # Also store with short name for action executor access
- methods[shortName] = methodInfo
-
- logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
+
+ # Skip if already processed (via another module path)
+ if itemName in methods:
+ continue
+
+ methodInstance = item(serviceCenter)
+ actions = methodInstance.actions
+
+ methodInfo = {
+ 'instance': methodInstance,
+ 'actions': actions,
+ 'description': item.__doc__ or f"Method {itemName}"
+ }
+
+ methods[itemName] = methodInfo
+ methods[shortName] = methodInfo
+ uniqueCount += 1
+
+ logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
except Exception as e:
logger.error(f"Error discovering method {name}: {str(e)}")
continue
- logger.info(f"Discovered/updated {len(methods)} method entries total")
+ logger.info(f"Discovered {uniqueCount} unique methods ({len(methods)} entries with aliases)")
except Exception as e:
logger.error(f"Error discovering methods: {str(e)}")
-def getMethodsList(serviceCenter):
- """Get a list of available methods with their signatures"""
- if not methods:
- discoverMethods(serviceCenter)
-
- methodsList = []
- for methodName, methodInfo in methods.items():
- methodDescription = methodInfo['description']
- actionsList = []
-
- for actionName, actionInfo in methodInfo['actions'].items():
- actionDescription = actionInfo['description']
- parameters = actionInfo['parameters']
-
- # Build parameter signature
- paramSig = []
- for paramName, paramInfo in parameters.items():
- paramType = paramInfo['type']
- paramRequired = paramInfo['required']
- paramDefault = paramInfo['default']
-
- if paramRequired:
- paramSig.append(f"{paramName}: {paramType}")
- else:
- defaultStr = f" = {paramDefault}" if paramDefault is not None else " = None"
- paramSig.append(f"{paramName}: {paramType}{defaultStr}")
-
- paramSignature = f"({', '.join(paramSig)})" if paramSig else "()"
- actionsList.append(f"- {actionName}{paramSignature}: {actionDescription}")
-
- actionsStr = "\n".join(actionsList)
- methodsList.append(f"**{methodName}**: {methodDescription}\n{actionsStr}")
-
- return "\n\n".join(methodsList)
-
def getActionParameterList(methodName: str, actionName: str, methods: Dict[str, Any]) -> str:
"""Get action parameter list from WorkflowActionParameter structure for AI parameter generation (list only)."""
try:
diff --git a/modules/workflows/processing/shared/placeholderFactory.py b/modules/workflows/processing/shared/placeholderFactory.py
index 136dd2cb..3d1a9d83 100644
--- a/modules/workflows/processing/shared/placeholderFactory.py
+++ b/modules/workflows/processing/shared/placeholderFactory.py
@@ -39,6 +39,26 @@ from typing import Dict, Any, List
logger = logging.getLogger(__name__)
from modules.workflows.processing.shared.methodDiscovery import (methods, discoverMethods)
+from modules.datamodels.datamodelChat import Observation
+
+
+def _observationToDict(obs) -> dict:
+ """Convert an Observation (Pydantic model or dict) to a plain dict."""
+ if isinstance(obs, dict):
+ return obs.copy()
+ if hasattr(obs, 'model_dump'):
+ return obs.model_dump(exclude_none=True)
+ if hasattr(obs, 'dict'):
+ return obs.dict()
+ return {"raw": str(obs)}
+
+
+def _redactSnippets(obsDict: dict):
+ """Replace large snippet strings with a metadata indicator."""
+ if 'previews' in obsDict and isinstance(obsDict['previews'], list):
+ for preview in obsDict['previews']:
+ if isinstance(preview, dict) and 'snippet' in preview:
+ preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
def extractUserPrompt(context: Any) -> str:
"""Extract user prompt from context. Maps to {{KEY:USER_PROMPT}}.
@@ -71,22 +91,17 @@ def extractUserPrompt(context: Any) -> str:
def extractNormalizedRequest(services: Any) -> str:
"""Extract normalized user request from services. Maps to {{KEY:NORMALIZED_REQUEST}}.
Returns the full normalized request from user input analysis (preserves all constraints and details).
- CRITICAL: Must return the actual normalizedRequest from analysis, NOT intent.
"""
try:
- # Get normalized request from currentUserPromptNormalized (stores the normalizedRequest from analysis)
if services and getattr(services, 'currentUserPromptNormalized', None):
normalized = services.currentUserPromptNormalized
- # Validate that it's not the intent (which is shorter and less detailed)
- # Intent is typically a concise objective, normalized request should be longer and more detailed
workflowIntent = getattr(services.workflow, '_workflowIntent', {}) if hasattr(services, 'workflow') and services.workflow else {}
intent = workflowIntent.get('intent', '')
- # If normalized matches intent exactly, it's wrong - log warning
if intent and normalized == intent:
logger.warning(f"extractNormalizedRequest: normalized request matches intent - this is incorrect! normalized={normalized[:100]}...")
- # Try to get from workflow intent or return error message
- return f"ERROR: Normalized request not properly stored. Expected detailed request, got intent: {intent}"
+ # Fall back to intent rather than injecting an error string into the LLM prompt
+ return intent
return normalized
@@ -346,49 +361,12 @@ def extractReviewContent(context: Any) -> str:
return result_summary
elif hasattr(context, 'observation') and context.observation:
- # For observation data, show full content but handle documents specially
- # Handle both Pydantic Observation model and dict format
- from modules.datamodels.datamodelChat import Observation
-
- if isinstance(context.observation, Observation):
- # Convert Pydantic model to dict
- obs_dict = context.observation.model_dump(exclude_none=True) if hasattr(context.observation, 'model_dump') else context.observation.dict()
- elif isinstance(context.observation, dict):
- obs_dict = context.observation.copy()
- else:
- # Fallback: try to serialize as-is
- obs_dict = context.observation.model_dump(exclude_none=True) if hasattr(context.observation, 'model_dump') else context.observation.dict()
-
- # If there are previews with documents, show only metadata
- if 'previews' in obs_dict and isinstance(obs_dict['previews'], list):
- for preview in obs_dict['previews']:
- if isinstance(preview, dict) and 'snippet' in preview:
- # Replace snippet with metadata indicator
- preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
-
+ obs_dict = _observationToDict(context.observation)
+ _redactSnippets(obs_dict)
return json.dumps(obs_dict, indent=2, ensure_ascii=False)
elif hasattr(context, 'stepResult') and context.stepResult and 'observation' in context.stepResult:
- # For observation data in stepResult, show full content but handle documents specially
- observation = context.stepResult['observation']
- # Handle both Pydantic Observation model and dict format
- from modules.datamodels.datamodelChat import Observation
-
- if isinstance(observation, Observation):
- # Convert Pydantic model to dict
- obs_dict = observation.model_dump(exclude_none=True) if hasattr(observation, 'model_dump') else observation.dict()
- elif isinstance(observation, dict):
- obs_dict = observation.copy()
- else:
- # Fallback: try to serialize
- obs_dict = observation.model_dump(exclude_none=True) if hasattr(observation, 'model_dump') else observation.dict()
-
- # If there are previews with documents, show only metadata
- if 'previews' in obs_dict and isinstance(obs_dict['previews'], list):
- for preview in obs_dict['previews']:
- if isinstance(preview, dict) and 'snippet' in preview:
- # Replace snippet with metadata indicator
- preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
-
+ obs_dict = _observationToDict(context.stepResult['observation'])
+ _redactSnippets(obs_dict)
return json.dumps(obs_dict, indent=2, ensure_ascii=False)
else:
return "No review content available"
@@ -449,41 +427,22 @@ def extractLatestRefinementFeedback(context: Any) -> str:
CRITICAL: If ERROR level logs are found, refinement should stop processing.
"""
try:
- # First check for ERROR level logs in workflow
- if hasattr(context, 'workflow') and context.workflow:
- try:
- import modules.interfaces.interfaceDbChat as interfaceDbChat
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
- interfaceDbChat = interfaceDbChat.getInterface(rootInterface.currentUser)
-
- # Get workflow logs
- chatData = interfaceDbChat.getUnifiedChatData(context.workflow.id, None)
- logs = chatData.get("logs", [])
-
- # Check for ERROR level logs
- for log in logs:
- if isinstance(log, dict):
- log_level = log.get("level", "").upper()
- log_message = str(log.get("message", ""))
- if log_level == "ERROR" or "ERROR" in log_message.upper():
- return f"CRITICAL: Processing stopped due to ERROR in logs: {log_message[:200]}"
- except Exception as log_check_error:
- # If we can't check logs, continue with normal feedback extraction
- logger.warning(f"Could not check for ERROR logs: {str(log_check_error)}")
-
if not hasattr(context, 'previousReviewResult') or not context.previousReviewResult or not isinstance(context.previousReviewResult, list):
return "No previous refinement feedback available"
- # Get the most recent refinement decision
+ # Get the most recent refinement decision (supports both ReviewResult objects and dicts)
latest_decision = context.previousReviewResult[-1]
- if not isinstance(latest_decision, dict):
+
+ # Normalize to dict if it's a Pydantic model (e.g. ReviewResult)
+ if hasattr(latest_decision, 'model_dump'):
+ latest_decision = latest_decision.model_dump()
+ elif not isinstance(latest_decision, dict):
return "No previous refinement feedback available"
feedback_parts = []
- # Add decision and reason
- decision = latest_decision.get('decision', 'unknown')
+ # Add decision and reason (ReviewResult uses 'status', legacy uses 'decision')
+ decision = latest_decision.get('status') or latest_decision.get('decision', 'unknown')
reason = latest_decision.get('reason', 'No reason provided')
feedback_parts.append(f"Latest Decision: {decision}")
feedback_parts.append(f"Reason: {reason}")
diff --git a/modules/workflows/processing/shared/promptGenerationActionsDynamic.py b/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
index 31878033..dee1cc1f 100644
--- a/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
+++ b/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
@@ -46,12 +46,19 @@ def generateDynamicPlanSelectionPrompt(services, context: Any, learningEngine=No
adaptiveContext = learningEngine.getAdaptiveContextForActionSelection(workflowId, userPrompt)
if adaptiveContext:
- # Add learning-aware placeholders
placeholders.extend([
PromptPlaceholder(label="ADAPTIVE_GUIDANCE", content=adaptiveContext.get('adaptiveGuidance', ''), summaryAllowed=True),
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
PromptPlaceholder(label="ESCALATION_LEVEL", content=adaptiveContext.get('escalationLevel', 'low'), summaryAllowed=False),
])
+
+ # Always provide these placeholders so template tokens don't leak into the LLM prompt
+ if not adaptiveContext:
+ placeholders.extend([
+ PromptPlaceholder(label="ADAPTIVE_GUIDANCE", content="", summaryAllowed=True),
+ PromptPlaceholder(label="FAILURE_ANALYSIS", content="", summaryAllowed=True),
+ PromptPlaceholder(label="ESCALATION_LEVEL", content="low", summaryAllowed=False),
+ ])
template = """Select exactly one next action to advance the task incrementally.
@@ -60,7 +67,8 @@ CONTEXT: {{KEY:OVERALL_TASK_CONTEXT}}
OBJECTIVE: {{KEY:TASK_OBJECTIVE}}
=== AVAILABLE RESOURCES ===
-AVAILABLE_DOCUMENTS_INDEX: {{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
+AVAILABLE_DOCUMENTS_SUMMARY: {{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
+AVAILABLE_DOCUMENTS_INDEX:
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
AVAILABLE_CONNECTIONS_INDEX:
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
@@ -227,6 +235,13 @@ Excludes documents/connections/history entirely.
PromptPlaceholder(label="ATTEMPT_NUMBER", content=str(adaptiveContext.get('attemptNumber', 1)), summaryAllowed=False),
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
])
+
+ if not adaptiveContext:
+ placeholders.extend([
+ PromptPlaceholder(label="PARAMETER_GUIDANCE", content="", summaryAllowed=True),
+ PromptPlaceholder(label="ATTEMPT_NUMBER", content="1", summaryAllowed=False),
+ PromptPlaceholder(label="FAILURE_ANALYSIS", content="", summaryAllowed=True),
+ ])
template = """You are a parameter generator. Set the parameters for this specific action.
diff --git a/modules/workflows/processing/workflowProcessor.py b/modules/workflows/processing/workflowProcessor.py
index 3547008a..72f45cce 100644
--- a/modules/workflows/processing/workflowProcessor.py
+++ b/modules/workflows/processing/workflowProcessor.py
@@ -141,8 +141,9 @@ class WorkflowProcessor:
# Delegate to the appropriate mode
result = await self.mode.executeTask(taskStep, workflow, context)
- # Complete progress tracking
- self.services.chat.progressLogFinish(operationId, True)
+ # Complete progress tracking based on actual result
+ taskSuccess = result.success if hasattr(result, 'success') else True
+ self.services.chat.progressLogFinish(operationId, taskSuccess)
return result
except Exception as e:
@@ -329,7 +330,7 @@ class WorkflowProcessor:
return handoverData
except Exception as e:
logger.error(f"Error in prepareTaskHandover: {str(e)}")
- return {'error': str(e)}
+ raise
# Fast Path Implementation
@@ -379,10 +380,7 @@ class WorkflowProcessor:
"################ USER INPUT START #################\n"
)
- # Add sanitized user input with clear delimiters
- # Escape curly braces for f-string safety, but preserve format (no quote wrapping)
- sanitizedPrompt = prompt.replace('{', '{{').replace('}', '}}') if prompt else ""
- complexityPrompt += f"{sanitizedPrompt}\n"
+ complexityPrompt += f"{prompt or ''}\n"
complexityPrompt += "################ USER INPUT FINISH #################\n\n"
@@ -469,17 +467,14 @@ class WorkflowProcessor:
"Format your response as plain text (no markdown code blocks unless showing code examples)."
)
- # Prepare AI call options for fast path (balanced, fast processing)
-
options = AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC,
- maxCost=0.10, # Low cost for simple requests
- maxProcessingTime=15 # Fast path should complete in 15s
+ maxCost=0.10,
+ maxProcessingTime=15
)
- # Call AI via callAi() to ensure stats are stored
aiRequest = AiCallRequest(
prompt=fastPathPrompt,
context="",
@@ -630,17 +625,23 @@ class WorkflowProcessor:
chatDocuments = []
if taskResult.actionResult and taskResult.actionResult.documents:
for actionDoc in taskResult.actionResult.documents:
- if hasattr(actionDoc, 'documentData') and actionDoc.documentData:
- # Create file in component storage
+ if hasattr(actionDoc, 'documentData') and actionDoc.documentData is not None:
+ rawData = actionDoc.documentData
+ if isinstance(rawData, bytes):
+ contentBytes = rawData
+ elif isinstance(rawData, str):
+ contentBytes = rawData.encode('utf-8')
+ else:
+ contentBytes = json.dumps(rawData, ensure_ascii=False).encode('utf-8')
+
fileItem = self.services.interfaceDbComponent.createFile(
name=actionDoc.documentName if hasattr(actionDoc, 'documentName') else f"task_{taskResult.taskId}_result.txt",
mimeType=actionDoc.mimeType if hasattr(actionDoc, 'mimeType') else "text/plain",
- content=actionDoc.documentData if isinstance(actionDoc.documentData, bytes) else actionDoc.documentData.encode('utf-8')
+ content=contentBytes
)
- # Persist file data
self.services.interfaceDbComponent.createFileData(
fileItem.id,
- actionDoc.documentData if isinstance(actionDoc.documentData, bytes) else actionDoc.documentData.encode('utf-8')
+ contentBytes
)
# Get file info
@@ -651,7 +652,7 @@ class WorkflowProcessor:
chatDoc = {
"fileId": fileItem.id,
"fileName": fileInfo.get("fileName", actionDoc.documentName) if fileInfo else actionDoc.documentName,
- "fileSize": fileInfo.get("size", len(actionDoc.documentData) if isinstance(actionDoc.documentData, bytes) else len(actionDoc.documentData.encode('utf-8'))) if fileInfo else (len(actionDoc.documentData) if isinstance(actionDoc.documentData, bytes) else len(actionDoc.documentData.encode('utf-8'))),
+ "fileSize": fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
"mimeType": fileInfo.get("mimeType", actionDoc.mimeType) if fileInfo else actionDoc.mimeType,
"roundNumber": workflow.currentRound,
"taskNumber": workflow.getTaskIndex(),
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index dfc617da..c81977c1 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -8,7 +8,6 @@ import json
from modules.datamodels.datamodelChat import (
UserInputRequest,
- ChatMessage,
ChatWorkflow,
ChatDocument,
WorkflowModeEnum
@@ -44,11 +43,6 @@ class WorkflowManager:
# Store workflow in services for reference (this is the ChatWorkflow object)
self.services.workflow = workflow
- # CRITICAL: Update all method instances to use the current Services object with the correct workflow
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods
- discoverMethods(self.services)
- logger.debug(f"Updated method instances to use workflow {self.services.workflow.id}")
-
if workflow.status == "running":
logger.info(f"Stopping running workflow {workflowId} before processing new prompt")
workflow.status = "stopped"
@@ -57,12 +51,13 @@ class WorkflowManager:
"status": "stopped",
"lastActivity": currentTime
})
- self.services.chat.storeLog(workflow, {
- "message": "Workflow stopped for new prompt",
- "type": "info",
- "status": "stopped",
- "progress": 1.0
- })
+ if workflow.status == "stopped":
+ self.services.chat.storeLog(workflow, {
+ "message": "Workflow stopped for new prompt",
+ "type": "info",
+ "status": "stopped",
+ "progress": 1.0
+ })
newRound = workflow.currentRound + 1
self.services.chat.updateWorkflow(workflowId, {
@@ -170,7 +165,10 @@ class WorkflowManager:
self.services.currentUserPrompt = userInput.prompt
# Reset progress logger for new workflow
- self.services.chat._progressLogger = None
+ if hasattr(self.services.chat, 'resetProgressLogger'):
+ self.services.chat.resetProgressLogger()
+ else:
+ self.services.chat._progressLogger = None
# Reset workflow history flag at start of each workflow
setattr(self.services, '_needsWorkflowHistory', False)
@@ -565,9 +563,10 @@ The following is the user's original input message. Analyze intent, normalize th
logger.info(f"Fast path completed successfully, response length: {len(responseText)} chars")
+ except WorkflowStoppedException:
+ raise
except Exception as e:
logger.error(f"Error in _executeFastPath: {str(e)}")
- # Fall back to full workflow on error
logger.info("Falling back to full workflow due to fast path error")
taskPlan = await self._planTasks(userInput)
await self._executeTasks(taskPlan)
@@ -897,8 +896,8 @@ The following is the user's original input message. Analyze intent, normalize th
failedActions=[],
successfulActions=[],
criteriaProgress={
- 'met_criteria': set(),
- 'unmet_criteria': set(),
+ 'met_criteria': [],
+ 'unmet_criteria': [],
'attempt_history': []
}
)
@@ -1021,11 +1020,11 @@ The following is the user's original input message. Analyze intent, normalize th
})
return
elif workflow.status == 'failed':
- # Create error message
+ lastError = getattr(workflow, '_lastError', None) or "Processing failed"
errorMessage = {
"workflowId": workflow.id,
"role": "assistant",
- "message": f"Workflow failed: {'Unknown error'}",
+ "message": f"Workflow failed: {lastError}",
"status": "last",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.timestampGetUtc(),
@@ -1051,9 +1050,8 @@ The following is the user's original input message. Analyze intent, normalize th
"totalActions": workflow.totalActions
})
- # Add failed log entry
self.services.chat.storeLog(workflow, {
- "message": "Workflow failed: Unknown error",
+ "message": f"Workflow failed: {lastError}",
"type": "error",
"status": "failed",
"progress": 1.0
@@ -1155,7 +1153,6 @@ The following is the user's original input message. Analyze intent, normalize th
"""Generate feedback message for workflow completion"""
try:
workflow = self.services.workflow
- checkWorkflowStopped(self.services)
# Count messages by role
userMessages = [msg for msg in workflow.messages if msg.role == 'user']
@@ -1227,7 +1224,6 @@ The following is the user's original input message. Analyze intent, normalize th
workflow = self.services.workflow
logger.error(f"Workflow processing error: {str(error)}")
- # Update workflow status to failed
workflow.status = "failed"
workflow.lastActivity = self.services.utils.timestampGetUtc()
self.services.chat.updateWorkflow(workflow.id, {
@@ -1237,11 +1233,10 @@ The following is the user's original input message. Analyze intent, normalize th
"totalActions": workflow.totalActions
})
- # Create error message
error_message = {
"workflowId": workflow.id,
"role": "assistant",
- "message": f"Workflow processing failed: {str(error)}",
+ "message": "Workflow processing encountered an error. Please try again.",
"status": "last",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.timestampGetUtc(),
@@ -1257,15 +1252,12 @@ The following is the user's original input message. Analyze intent, normalize th
}
self.services.chat.storeMessageWithDocuments(workflow, error_message, [])
- # Add error log entry
self.services.chat.storeLog(workflow, {
"message": f"Workflow failed: {str(error)}",
"type": "error",
"status": "failed",
"progress": 1.0
})
-
- raise
async def _processFileIds(self, fileIds: List[str], messageId: str = None) -> List[ChatDocument]:
"""Process file IDs from existing files and return ChatDocument objects.
@@ -1365,21 +1357,3 @@ The following is the user's original input message. Analyze intent, normalize th
# Return original content on error
return contentBytes
- def _checkIfHistoryAvailable(self) -> bool:
- """Check if workflow history is available (previous rounds exist).
-
- Returns True if there are previous workflow rounds with messages.
- """
- try:
- from modules.workflows.processing.shared.placeholderFactory import getPreviousRoundContext
-
- history = getPreviousRoundContext(self.services)
-
- # Check if history contains actual content (not just "No previous round context available")
- if history and history != "No previous round context available":
- return True
-
- return False
- except Exception as e:
- logger.error(f"Error checking if history is available: {str(e)}")
- return False
From 5486a87b9ac355f14376f411ebdf6ff1e332061e Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Tue, 3 Mar 2026 22:04:52 +0100
Subject: [PATCH 2/7] commcoach iteration 1 completed
---
.../commcoach/interfaceFeatureCommcoach.py | 27 ++++++++
modules/features/commcoach/mainCommcoach.py | 11 ++++
.../commcoach/routeFeatureCommcoach.py | 21 +++++++
.../features/commcoach/serviceCommcoach.py | 62 ++++++++++++++-----
.../features/commcoach/serviceCommcoachAi.py | 22 +++++++
5 files changed, 128 insertions(+), 15 deletions(-)
diff --git a/modules/features/commcoach/interfaceFeatureCommcoach.py b/modules/features/commcoach/interfaceFeatureCommcoach.py
index 830aa261..eae7e168 100644
--- a/modules/features/commcoach/interfaceFeatureCommcoach.py
+++ b/modules/features/commcoach/interfaceFeatureCommcoach.py
@@ -5,6 +5,7 @@ Interface to CommCoach database.
Uses the PostgreSQL connector for data access with strict user ownership.
"""
+import json
import logging
from typing import Dict, Any, List, Optional
@@ -292,14 +293,23 @@ class CommcoachObjects:
contextSummaries = []
for ctx in activeContexts:
+ goalProgress = _calcGoalProgress(ctx.get("goals"))
contextSummaries.append({
"id": ctx.get("id"),
"title": ctx.get("title"),
"category": ctx.get("category"),
"sessionCount": ctx.get("sessionCount", 0),
"lastSessionAt": ctx.get("lastSessionAt"),
+ "goalProgress": goalProgress,
})
+ allGoalProgress = []
+ for ctx in activeContexts:
+ gp = _calcGoalProgress(ctx.get("goals"))
+ if gp is not None:
+ allGoalProgress.append(gp)
+ overallGoalProgress = round(sum(allGoalProgress) / len(allGoalProgress)) if allGoalProgress else None
+
return {
"totalContexts": len(contexts),
"activeContexts": len(activeContexts),
@@ -312,4 +322,21 @@ class CommcoachObjects:
"openTasks": self.getOpenTaskCount(userId, instanceId),
"completedTasks": self.getCompletedTaskCount(userId, instanceId),
"contexts": contextSummaries,
+ "goalProgress": overallGoalProgress,
}
+
+
+def _calcGoalProgress(goalsRaw) -> Optional[int]:
+ """Calculate goal completion percentage from a context's goals JSON field."""
+ if not goalsRaw:
+ return None
+ goals = goalsRaw
+ if isinstance(goalsRaw, str):
+ try:
+ goals = json.loads(goalsRaw)
+ except (json.JSONDecodeError, TypeError):
+ return None
+ if not isinstance(goals, list) or len(goals) == 0:
+ return None
+ done = sum(1 for g in goals if isinstance(g, dict) and g.get("status") in ("done", "completed"))
+ return round(done / len(goals) * 100)
diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py
index c5a0a7c1..ff213f91 100644
--- a/modules/features/commcoach/mainCommcoach.py
+++ b/modules/features/commcoach/mainCommcoach.py
@@ -184,6 +184,7 @@ def registerFeature(catalogService) -> bool:
)
_syncTemplateRolesToDb()
+ _registerScheduler()
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI, {len(RESOURCE_OBJECTS)} resource, {len(DATA_OBJECTS)} data objects")
return True
@@ -193,6 +194,16 @@ def registerFeature(catalogService) -> bool:
return False
+def _registerScheduler():
+ """Register CommCoach scheduled jobs (daily reminders)."""
+ try:
+ from modules.shared.eventManagement import eventManager
+ from .serviceCommcoachScheduler import registerScheduledJobs
+ registerScheduledJobs(eventManager)
+ except Exception as e:
+ logger.warning(f"CommCoach scheduler registration failed (non-fatal): {e}")
+
+
def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py
index 66a4b347..43c00291 100644
--- a/modules/features/commcoach/routeFeatureCommcoach.py
+++ b/modules/features/commcoach/routeFeatureCommcoach.py
@@ -31,6 +31,23 @@ from .serviceCommcoach import CommcoachService, emitSessionEvent, getSessionEven
logger = logging.getLogger(__name__)
+
+def _audit(context: RequestContext, action: str, resourceType: str = None, resourceId: str = None, details: str = ""):
+ """Log an audit event for CommCoach. Non-blocking, best-effort."""
+ try:
+ from modules.shared.auditLogger import audit_logger
+ audit_logger.logEvent(
+ userId=str(context.user.id),
+ mandateId=str(context.mandateId) if context.mandateId else None,
+ category="commcoach",
+ action=action,
+ resourceType=resourceType,
+ resourceId=resourceId,
+ details=details,
+ )
+ except Exception:
+ pass
+
router = APIRouter(
prefix="/api/commcoach",
tags=["CommCoach"],
@@ -116,6 +133,7 @@ async def createContext(
created = interface.createContext(contextData)
logger.info(f"CommCoach context created: {created.get('id')} for user {userId}")
+ _audit(context, "commcoach.context.created", "CoachingContext", created.get("id"), f"Title: {body.title}")
return {"context": created}
@@ -208,6 +226,7 @@ async def archiveContext(
_validateOwnership(ctx, context)
updated = interface.updateContext(contextId, {"status": CoachingContextStatus.ARCHIVED.value})
+ _audit(context, "commcoach.context.archived", "CoachingContext", contextId)
return {"context": updated}
@@ -369,6 +388,7 @@ async def startSession(
pass
logger.info(f"CommCoach session started (streaming): {sessionId} for context {contextId}")
+ _audit(context, "commcoach.session.started", "CoachingSession", sessionId, f"Context: {contextId}")
return StreamingResponse(
_newSessionEventGenerator(),
media_type="text/event-stream",
@@ -419,6 +439,7 @@ async def completeSession(
service = CommcoachService(context.user, mandateId, instanceId)
result = await service.completeSession(sessionId, interface)
+ _audit(context, "commcoach.session.completed", "CoachingSession", sessionId)
return {"session": result}
diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py
index 0778a978..1b886958 100644
--- a/modules/features/commcoach/serviceCommcoach.py
+++ b/modules/features/commcoach/serviceCommcoach.py
@@ -83,6 +83,33 @@ def cleanupSessionEvents(sessionId: str):
_sessionEvents.pop(sessionId, None)
+CHUNK_WORD_SIZE = 4
+CHUNK_DELAY_SECONDS = 0.05
+
+
+async def _emitChunkedResponse(sessionId: str, createdMsg: Dict[str, Any], fullText: str):
+ """Emit response as messageChunk events for progressive display, then the full message."""
+ msgId = createdMsg.get("id")
+ words = fullText.split()
+ emitted = ""
+ for i in range(0, len(words), CHUNK_WORD_SIZE):
+ chunk = " ".join(words[i:i + CHUNK_WORD_SIZE])
+ emitted = (emitted + " " + chunk).strip() if emitted else chunk
+ await emitSessionEvent(sessionId, "messageChunk", {
+ "id": msgId,
+ "role": "assistant",
+ "chunk": chunk,
+ "accumulated": emitted,
+ })
+ await asyncio.sleep(CHUNK_DELAY_SECONDS)
+ await emitSessionEvent(sessionId, "message", {
+ "id": msgId,
+ "role": "assistant",
+ "content": fullText,
+ "createdAt": createdMsg.get("createdAt"),
+ })
+
+
class CommcoachService:
"""Coaching orchestrator: processes messages, calls AI, extracts tasks and scores."""
@@ -204,12 +231,7 @@ class CommcoachService:
messages = interface.getMessages(sessionId)
interface.updateSession(sessionId, {"messageCount": len(messages)})
- await emitSessionEvent(sessionId, "message", {
- "id": createdAssistantMsg.get("id"),
- "role": "assistant",
- "content": responseText,
- "createdAt": createdAssistantMsg.get("createdAt"),
- })
+ await _emitChunkedResponse(sessionId, createdAssistantMsg, responseText)
if responseText:
try:
@@ -289,15 +311,7 @@ class CommcoachService:
createdMsg = interface.createMessage(assistantMsg)
interface.updateSession(sessionId, {"messageCount": 1})
- await emitSessionEvent(sessionId, "message", {
- "id": createdMsg.get("id"),
- "sessionId": sessionId,
- "contextId": contextId,
- "role": "assistant",
- "content": openingContent,
- "contentType": "text",
- "createdAt": createdMsg.get("createdAt"),
- })
+ await _emitChunkedResponse(sessionId, createdMsg, openingContent)
if openingContent:
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
@@ -497,6 +511,24 @@ class CommcoachService:
logger.warning(f"Scoring failed: {e}")
competenceScore = None
+ # Generate insights
+ try:
+ insightPrompt = aiPrompts.buildInsightPrompt(messages, summary)
+ insightResponse = await self._callAi("Du generierst kurze Coaching-Insights.", insightPrompt)
+ if insightResponse and insightResponse.errorCount == 0 and insightResponse.content:
+ insights = aiPrompts.parseJsonResponse(insightResponse.content, [])
+ if isinstance(insights, list):
+ existingInsights = aiPrompts._parseJsonField(context.get("insights") if context else None, [])
+ for ins in insights[:3]:
+ insightText = ins.get("text", ins) if isinstance(ins, dict) else str(ins)
+ if insightText:
+ existingInsights.append({"text": insightText, "sessionId": sessionId, "createdAt": getIsoTimestamp()})
+ await emitSessionEvent(sessionId, "insightGenerated", {"text": insightText, "sessionId": sessionId})
+ if contextId and existingInsights:
+ interface.updateContext(contextId, {"insights": json.dumps(existingInsights[-10:])})
+ except Exception as e:
+ logger.warning(f"Insight generation failed: {e}")
+
# Calculate duration
startedAt = session.get("startedAt", "")
durationSeconds = 0
diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py
index ea58488b..5d050203 100644
--- a/modules/features/commcoach/serviceCommcoachAi.py
+++ b/modules/features/commcoach/serviceCommcoachAi.py
@@ -312,6 +312,28 @@ Sessions:
Antworte NUR mit der Zusammenfassung."""
+def buildInsightPrompt(messages: List[Dict[str, Any]], summary: Optional[str] = None) -> str:
+ """Build a prompt to generate coaching insights from a completed session."""
+ conversation = ""
+ for msg in messages[-15:]:
+ role = "Benutzer" if msg.get("role") == "user" else "Coach"
+ conversation += f"\n{role}: {msg.get('content', '')[:300]}"
+
+ summarySection = f"\nZusammenfassung: {summary[:500]}" if summary else ""
+
+ return f"""Generiere 1-3 kurze Coaching-Insights aus dieser Session.
+Ein Insight ist eine praegende Erkenntnis oder ein Aha-Moment des Benutzers.
+
+Antworte AUSSCHLIESSLICH als JSON-Array:
+[{{"text": "Erkenntnis in einem Satz"}}]
+
+Nur echte Erkenntnisse, keine Banalitaeten. Wenn keine klaren Insights: leeres Array [].
+{summarySection}
+
+Gespraech:
+{conversation}"""
+
+
def buildTaskExtractionPrompt(messages: List[Dict[str, Any]]) -> str:
"""Build a prompt to extract actionable tasks from a session."""
recentForTasks = messages[-25:] if len(messages) > 25 else messages
From f4940cf9e1712f377212abd875c4bb97e449d519 Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Tue, 3 Mar 2026 23:02:53 +0100
Subject: [PATCH 3/7] iteration 2 done
---
.../features/commcoach/datamodelCommcoach.py | 76 ++++
.../commcoach/interfaceFeatureCommcoach.py | 102 ++++++
modules/features/commcoach/mainCommcoach.py | 29 ++
.../commcoach/routeFeatureCommcoach.py | 334 +++++++++++++++++-
.../features/commcoach/serviceCommcoach.py | 62 +++-
.../features/commcoach/serviceCommcoachAi.py | 36 +-
.../commcoach/serviceCommcoachExport.py | 288 +++++++++++++++
.../commcoach/serviceCommcoachGamification.py | 149 ++++++++
.../commcoach/serviceCommcoachPersonas.py | 139 ++++++++
9 files changed, 1211 insertions(+), 4 deletions(-)
create mode 100644 modules/features/commcoach/serviceCommcoachExport.py
create mode 100644 modules/features/commcoach/serviceCommcoachGamification.py
create mode 100644 modules/features/commcoach/serviceCommcoachPersonas.py
diff --git a/modules/features/commcoach/datamodelCommcoach.py b/modules/features/commcoach/datamodelCommcoach.py
index 0ba636ff..090640c6 100644
--- a/modules/features/commcoach/datamodelCommcoach.py
+++ b/modules/features/commcoach/datamodelCommcoach.py
@@ -103,6 +103,7 @@ class CoachingSession(BaseModel):
mandateId: str = Field(description="Mandate ID")
instanceId: str = Field(description="Feature instance ID")
status: CoachingSessionStatus = Field(default=CoachingSessionStatus.ACTIVE)
+ personaId: Optional[str] = Field(default=None, description="FK to CoachingPersona (Iteration 2)")
summary: Optional[str] = Field(default=None, description="AI-generated session summary")
coachNotes: Optional[str] = Field(default=None, description="JSON: AI internal notes for continuity")
compressedHistorySummary: Optional[str] = Field(default=None, description="AI summary of older messages for long sessions")
@@ -183,6 +184,62 @@ class CoachingUserProfile(BaseModel):
updatedAt: Optional[str] = Field(default=None)
+# ============================================================================
+# Iteration 2: Personas
+# ============================================================================
+
+class CoachingPersona(BaseModel):
+ """A roleplay persona for coaching sessions."""
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
+ userId: str = Field(description="Owner user ID ('system' for builtins)")
+ mandateId: Optional[str] = Field(default=None)
+ instanceId: Optional[str] = Field(default=None)
+ key: str = Field(description="Unique key, e.g. 'critical_cfo_f'")
+ label: str = Field(description="Display label, e.g. 'Kritische CFO'")
+ description: str = Field(description="Detailed role description for the AI")
+ systemPromptOverride: Optional[str] = Field(default=None, description="Full system prompt override for this persona")
+ gender: Optional[str] = Field(default=None, description="m or f")
+ category: str = Field(default="builtin", description="'builtin' or 'custom'")
+ isActive: bool = Field(default=True)
+ createdAt: Optional[str] = Field(default=None)
+ updatedAt: Optional[str] = Field(default=None)
+
+
+# ============================================================================
+# Iteration 2: Documents
+# ============================================================================
+
+class CoachingDocument(BaseModel):
+ """A document attached to a coaching context."""
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
+ contextId: str = Field(description="FK to CoachingContext")
+ userId: str = Field(description="Owner user ID")
+ mandateId: str = Field(description="Mandate ID")
+ instanceId: Optional[str] = Field(default=None)
+ fileName: str = Field(description="Original file name")
+ mimeType: str = Field(default="application/octet-stream")
+ fileSize: int = Field(default=0)
+ extractedText: Optional[str] = Field(default=None, description="Text content extracted from file")
+ summary: Optional[str] = Field(default=None, description="AI-generated summary")
+ fileRef: Optional[str] = Field(default=None, description="Reference to file in storage")
+ createdAt: Optional[str] = Field(default=None)
+
+
+# ============================================================================
+# Iteration 2: Badges / Gamification
+# ============================================================================
+
+class CoachingBadge(BaseModel):
+ """An achievement badge awarded to a user."""
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
+ userId: str = Field(description="Owner user ID")
+ mandateId: str = Field(description="Mandate ID")
+ instanceId: str = Field(description="Feature instance ID")
+ badgeKey: str = Field(description="Badge identifier, e.g. 'streak_7'")
+ awardedAt: Optional[str] = Field(default=None)
+ createdAt: Optional[str] = Field(default=None)
+
+
# ============================================================================
# API Request/Response Models
# ============================================================================
@@ -232,6 +289,25 @@ class UpdateProfileRequest(BaseModel):
emailSummaryEnabled: Optional[bool] = None
+class StartSessionRequest(BaseModel):
+ personaId: Optional[str] = None
+
+
+class CreatePersonaRequest(BaseModel):
+ label: str
+ description: str
+ gender: Optional[str] = None
+ systemPromptOverride: Optional[str] = None
+
+
+class UpdatePersonaRequest(BaseModel):
+ label: Optional[str] = None
+ description: Optional[str] = None
+ gender: Optional[str] = None
+ systemPromptOverride: Optional[str] = None
+ isActive: Optional[bool] = None
+
+
class DashboardData(BaseModel):
"""Aggregated dashboard data for the user."""
totalContexts: int = 0
diff --git a/modules/features/commcoach/interfaceFeatureCommcoach.py b/modules/features/commcoach/interfaceFeatureCommcoach.py
index eae7e168..9e21f677 100644
--- a/modules/features/commcoach/interfaceFeatureCommcoach.py
+++ b/modules/features/commcoach/interfaceFeatureCommcoach.py
@@ -238,6 +238,98 @@ class CommcoachObjects:
count += 1
return count
+ # =========================================================================
+ # Personas
+ # =========================================================================
+
+ def getPersonas(self, userId: str, instanceId: str) -> List[Dict[str, Any]]:
+ from .datamodelCommcoach import CoachingPersona
+ builtins = self.db.getRecordset(CoachingPersona, recordFilter={"userId": "system"})
+ custom = self.db.getRecordset(CoachingPersona, recordFilter={"userId": userId, "instanceId": instanceId})
+ all = builtins + custom
+ return [p for p in all if p.get("isActive", True)]
+
+ def getPersona(self, personaId: str) -> Optional[Dict[str, Any]]:
+ from .datamodelCommcoach import CoachingPersona
+ records = self.db.getRecordset(CoachingPersona, recordFilter={"id": personaId})
+ return records[0] if records else None
+
+ def createPersona(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ from .datamodelCommcoach import CoachingPersona
+ data["createdAt"] = getIsoTimestamp()
+ data["updatedAt"] = getIsoTimestamp()
+ return self.db.recordCreate(CoachingPersona, data)
+
+ def updatePersona(self, personaId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
+ from .datamodelCommcoach import CoachingPersona
+ updates["updatedAt"] = getIsoTimestamp()
+ return self.db.recordModify(CoachingPersona, personaId, updates)
+
+ def deletePersona(self, personaId: str) -> bool:
+ from .datamodelCommcoach import CoachingPersona
+ return self.db.recordDelete(CoachingPersona, personaId)
+
+ # =========================================================================
+ # Documents
+ # =========================================================================
+
+ def getDocuments(self, contextId: str, userId: str) -> List[Dict[str, Any]]:
+ from .datamodelCommcoach import CoachingDocument
+ records = self.db.getRecordset(CoachingDocument, recordFilter={"contextId": contextId, "userId": userId})
+ records.sort(key=lambda r: r.get("createdAt") or "", reverse=True)
+ return records
+
+ def getDocument(self, documentId: str) -> Optional[Dict[str, Any]]:
+ from .datamodelCommcoach import CoachingDocument
+ records = self.db.getRecordset(CoachingDocument, recordFilter={"id": documentId})
+ return records[0] if records else None
+
+ def createDocument(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ from .datamodelCommcoach import CoachingDocument
+ data["createdAt"] = getIsoTimestamp()
+ return self.db.recordCreate(CoachingDocument, data)
+
+ def deleteDocument(self, documentId: str) -> bool:
+ from .datamodelCommcoach import CoachingDocument
+ return self.db.recordDelete(CoachingDocument, documentId)
+
+ # =========================================================================
+ # Badges
+ # =========================================================================
+
+ def getBadges(self, userId: str, instanceId: str) -> List[Dict[str, Any]]:
+ from .datamodelCommcoach import CoachingBadge
+ records = self.db.getRecordset(CoachingBadge, recordFilter={"userId": userId, "instanceId": instanceId})
+ records.sort(key=lambda r: r.get("awardedAt") or "", reverse=True)
+ return records
+
+ def hasBadge(self, userId: str, instanceId: str, badgeKey: str) -> bool:
+ from .datamodelCommcoach import CoachingBadge
+ records = self.db.getRecordset(CoachingBadge, recordFilter={"userId": userId, "instanceId": instanceId, "badgeKey": badgeKey})
+ return len(records) > 0
+
+ def awardBadge(self, data: Dict[str, Any]) -> Dict[str, Any]:
+ from .datamodelCommcoach import CoachingBadge
+ data["awardedAt"] = getIsoTimestamp()
+ data["createdAt"] = getIsoTimestamp()
+ return self.db.recordCreate(CoachingBadge, data)
+
+ # =========================================================================
+ # Score History
+ # =========================================================================
+
+ def getScoreHistory(self, contextId: str, userId: str) -> Dict[str, List[Dict[str, Any]]]:
+ scores = self.getScores(contextId, userId)
+ history: Dict[str, List[Dict[str, Any]]] = {}
+ for s in scores:
+ dim = s.get("dimension", "unknown")
+ if dim not in history:
+ history[dim] = []
+ history[dim].append({"score": s.get("score"), "trend": s.get("trend"), "evidence": s.get("evidence"), "createdAt": s.get("createdAt"), "sessionId": s.get("sessionId")})
+ for dim in history:
+ history[dim].sort(key=lambda x: x.get("createdAt") or "")
+ return history
+
# =========================================================================
# User Profile
# =========================================================================
@@ -323,6 +415,8 @@ class CommcoachObjects:
"completedTasks": self.getCompletedTaskCount(userId, instanceId),
"contexts": contextSummaries,
"goalProgress": overallGoalProgress,
+ "badges": self.getBadges(userId, instanceId),
+ "level": _calcLevel(profile.get("totalSessions", 0) if profile else 0),
}
@@ -340,3 +434,11 @@ def _calcGoalProgress(goalsRaw) -> Optional[int]:
return None
done = sum(1 for g in goals if isinstance(g, dict) and g.get("status") in ("done", "completed"))
return round(done / len(goals) * 100)
+
+
+def _calcLevel(totalSessions: int) -> Dict[str, Any]:
+ levels = [(50, 5, "Meister"), (25, 4, "Experte"), (10, 3, "Fortgeschritten"), (3, 2, "Engagiert")]
+ for threshold, number, label in levels:
+ if totalSessions >= threshold:
+ return {"number": number, "label": label, "totalSessions": totalSessions}
+ return {"number": 1, "label": "Einsteiger", "totalSessions": totalSessions}
diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py
index ff213f91..2147a867 100644
--- a/modules/features/commcoach/mainCommcoach.py
+++ b/modules/features/commcoach/mainCommcoach.py
@@ -68,6 +68,21 @@ DATA_OBJECTS = [
"label": {"en": "User Profile", "de": "Benutzerprofil", "fr": "Profil utilisateur"},
"meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "preferredLanguage"]}
},
+ {
+ "objectKey": "data.feature.commcoach.CoachingPersona",
+ "label": {"en": "Coaching Persona", "de": "Coaching-Persona", "fr": "Persona coaching"},
+ "meta": {"table": "CoachingPersona", "fields": ["id", "key", "label", "gender"]}
+ },
+ {
+ "objectKey": "data.feature.commcoach.CoachingDocument",
+ "label": {"en": "Coaching Document", "de": "Coaching-Dokument", "fr": "Document coaching"},
+ "meta": {"table": "CoachingDocument", "fields": ["id", "contextId", "fileName"]}
+ },
+ {
+ "objectKey": "data.feature.commcoach.CoachingBadge",
+ "label": {"en": "Coaching Badge", "de": "Coaching-Auszeichnung", "fr": "Badge coaching"},
+ "meta": {"table": "CoachingBadge", "fields": ["id", "badgeKey", "awardedAt"]}
+ },
{
"objectKey": "data.feature.commcoach.*",
"label": {"en": "All CommCoach Data", "de": "Alle CommCoach-Daten", "fr": "Toutes les donnees CommCoach"},
@@ -184,6 +199,7 @@ def registerFeature(catalogService) -> bool:
)
_syncTemplateRolesToDb()
+ _seedBuiltinPersonas()
_registerScheduler()
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI, {len(RESOURCE_OBJECTS)} resource, {len(DATA_OBJECTS)} data objects")
@@ -194,6 +210,19 @@ def registerFeature(catalogService) -> bool:
return False
+def _seedBuiltinPersonas():
+ """Seed builtin roleplay personas into the database."""
+ try:
+ from .serviceCommcoachPersonas import seedBuiltinPersonas
+ from .interfaceFeatureCommcoach import CommcoachInterface
+ from modules.interfaces.interfaceDbManagement import getInterface as getDbInterface
+ db = getDbInterface()
+ interface = CommcoachInterface(db)
+ seedBuiltinPersonas(interface)
+ except Exception as e:
+ logger.warning(f"CommCoach persona seeding failed (non-fatal): {e}")
+
+
def _registerScheduler():
"""Register CommCoach scheduled jobs (daily reminders)."""
try:
diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py
index 43c00291..685a0f4e 100644
--- a/modules/features/commcoach/routeFeatureCommcoach.py
+++ b/modules/features/commcoach/routeFeatureCommcoach.py
@@ -9,9 +9,10 @@ import logging
import json
import asyncio
import base64
+import uuid
from typing import Optional
from fastapi import APIRouter, HTTPException, Depends, Request
-from fastapi.responses import StreamingResponse
+from fastapi.responses import StreamingResponse, Response
from modules.auth import limiter, getRequestContext, RequestContext
from modules.shared.timeUtils import getIsoTimestamp
@@ -23,9 +24,11 @@ from .datamodelCommcoach import (
CoachingContext, CoachingContextStatus, CoachingSession, CoachingSessionStatus,
CoachingMessage, CoachingMessageRole, CoachingMessageContentType,
CoachingTask, CoachingTaskStatus,
+ CoachingPersona, CoachingDocument, CoachingBadge,
CreateContextRequest, UpdateContextRequest,
SendMessageRequest, CreateTaskRequest, UpdateTaskRequest, UpdateTaskStatusRequest,
UpdateProfileRequest,
+ StartSessionRequest, CreatePersonaRequest, UpdatePersonaRequest,
)
from .serviceCommcoach import CommcoachService, emitSessionEvent, getSessionEventQueue, cleanupSessionEvents
@@ -281,6 +284,7 @@ async def startSession(
request: Request,
instanceId: str,
contextId: str,
+ personaId: Optional[str] = None,
context: RequestContext = Depends(getRequestContext),
):
"""Start a new coaching session or resume active one. Returns SSE stream with sessionState, messages, and complete."""
@@ -358,6 +362,7 @@ async def startSession(
userId=userId,
mandateId=mandateId,
instanceId=instanceId,
+ personaId=personaId,
).model_dump()
created = interface.createSession(sessionData)
sessionId = created.get("id")
@@ -887,3 +892,330 @@ async def testVoice(
except Exception as e:
logger.error(f"Voice test failed: {e}")
raise HTTPException(status_code=500, detail=f"TTS test failed: {str(e)}")
+
+
+# =========================================================================
+# Export Endpoints (Iteration 2)
+# =========================================================================
+
+@router.get("/{instanceId}/contexts/{contextId}/export")
+@limiter.limit("10/minute")
+async def exportDossier(
+ request: Request,
+ instanceId: str,
+ contextId: str,
+ format: str = "md",
+ context: RequestContext = Depends(getRequestContext),
+):
+ """Export a dossier as Markdown or PDF."""
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+
+ ctx = interface.getContext(contextId)
+ if not ctx:
+ raise HTTPException(status_code=404, detail="Context not found")
+ _validateOwnership(ctx, context)
+
+ tasks = interface.getTasks(contextId, userId)
+ scores = interface.getScores(contextId, userId)
+ sessions = interface.getSessions(contextId, userId)
+
+ from .serviceCommcoachExport import buildDossierMarkdown, renderDossierPdf
+ _audit(context, "commcoach.export.requested", "CoachingContext", contextId, f"format={format}")
+
+ if format == "pdf":
+ pdfBytes = await renderDossierPdf(ctx, sessions, tasks, scores)
+ if pdfBytes:
+ return Response(content=pdfBytes, media_type="application/pdf",
+ headers={"Content-Disposition": f'attachment; filename="dossier_{contextId[:8]}.pdf"'})
+ format = "md"
+
+ md = buildDossierMarkdown(ctx, sessions, tasks, scores)
+ return Response(content=md, media_type="text/markdown",
+ headers={"Content-Disposition": f'attachment; filename="dossier_{contextId[:8]}.md"'})
+
+
+@router.get("/{instanceId}/sessions/{sessionId}/export")
+@limiter.limit("10/minute")
+async def exportSession(
+ request: Request,
+ instanceId: str,
+ sessionId: str,
+ format: str = "md",
+ context: RequestContext = Depends(getRequestContext),
+):
+ """Export a session as Markdown or PDF."""
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ session = interface.getSession(sessionId)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ _validateOwnership(session, context)
+
+ contextId = session.get("contextId")
+ userId = str(context.user.id)
+ messages = interface.getMessages(sessionId)
+ tasks = interface.getTasks(contextId, userId) if contextId else []
+ scores = interface.getScores(contextId, userId) if contextId else []
+
+ from .serviceCommcoachExport import buildSessionMarkdown, renderSessionPdf
+ _audit(context, "commcoach.export.requested", "CoachingSession", sessionId, f"format={format}")
+
+ if format == "pdf":
+ pdfBytes = await renderSessionPdf(session, messages, tasks, scores)
+ if pdfBytes:
+ return Response(content=pdfBytes, media_type="application/pdf",
+ headers={"Content-Disposition": f'attachment; filename="session_{sessionId[:8]}.pdf"'})
+ format = "md"
+
+ md = buildSessionMarkdown(session, messages, tasks, scores)
+ return Response(content=md, media_type="text/markdown",
+ headers={"Content-Disposition": f'attachment; filename="session_{sessionId[:8]}.md"'})
+
+
+# =========================================================================
+# Persona Endpoints (Iteration 2)
+# =========================================================================
+
+@router.get("/{instanceId}/personas")
+@limiter.limit("60/minute")
+async def listPersonas(
+ request: Request,
+ instanceId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+ personas = interface.getPersonas(userId, instanceId)
+ return {"personas": personas}
+
+
+@router.post("/{instanceId}/personas")
+@limiter.limit("10/minute")
+async def createPersona(
+ request: Request,
+ instanceId: str,
+ body: CreatePersonaRequest,
+ context: RequestContext = Depends(getRequestContext),
+):
+ mandateId = _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+
+ data = CoachingPersona(
+ userId=userId,
+ mandateId=mandateId,
+ instanceId=instanceId,
+ key=f"custom_{str(uuid.uuid4())[:8]}",
+ label=body.label,
+ description=body.description,
+ gender=body.gender,
+ systemPromptOverride=body.systemPromptOverride,
+ category="custom",
+ ).model_dump()
+ created = interface.createPersona(data)
+ return {"persona": created}
+
+
+@router.put("/{instanceId}/personas/{personaId}")
+@limiter.limit("10/minute")
+async def updatePersonaRoute(
+ request: Request,
+ instanceId: str,
+ personaId: str,
+ body: UpdatePersonaRequest,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ persona = interface.getPersona(personaId)
+ if not persona:
+ raise HTTPException(status_code=404, detail="Persona not found")
+ if persona.get("category") == "builtin":
+ raise HTTPException(status_code=403, detail="Builtin personas cannot be edited")
+ _validateOwnership(persona, context)
+
+ updates = body.model_dump(exclude_none=True)
+ updated = interface.updatePersona(personaId, updates)
+ return {"persona": updated}
+
+
+@router.delete("/{instanceId}/personas/{personaId}")
+@limiter.limit("10/minute")
+async def deletePersonaRoute(
+ request: Request,
+ instanceId: str,
+ personaId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ persona = interface.getPersona(personaId)
+ if not persona:
+ raise HTTPException(status_code=404, detail="Persona not found")
+ if persona.get("category") == "builtin":
+ raise HTTPException(status_code=403, detail="Builtin personas cannot be deleted")
+ _validateOwnership(persona, context)
+
+ interface.deletePersona(personaId)
+ return {"deleted": True}
+
+
+# =========================================================================
+# Document Endpoints (Iteration 2)
+# =========================================================================
+
+@router.get("/{instanceId}/contexts/{contextId}/documents")
+@limiter.limit("60/minute")
+async def listDocuments(
+ request: Request,
+ instanceId: str,
+ contextId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+ docs = interface.getDocuments(contextId, userId)
+ return {"documents": docs}
+
+
+@router.post("/{instanceId}/contexts/{contextId}/documents")
+@limiter.limit("10/minute")
+async def uploadDocument(
+ request: Request,
+ instanceId: str,
+ contextId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ """Upload a document and bind it to a context."""
+ from fastapi import UploadFile
+ mandateId = _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+
+ ctx = interface.getContext(contextId)
+ if not ctx:
+ raise HTTPException(status_code=404, detail="Context not found")
+ _validateOwnership(ctx, context)
+
+ form = await request.form()
+ file = form.get("file")
+ if not file or not hasattr(file, "read"):
+ raise HTTPException(status_code=400, detail="No file uploaded")
+
+ content = await file.read()
+ fileName = getattr(file, "filename", "document")
+ mimeType = getattr(file, "content_type", "application/octet-stream")
+ fileSize = len(content)
+
+ extractedText = _extractText(content, mimeType, fileName)
+ summary = None
+ if extractedText and len(extractedText.strip()) > 50:
+ try:
+ from .serviceCommcoach import CommcoachService
+ service = CommcoachService(context.user, mandateId, instanceId)
+ aiResp = await service._callAi(
+ "Du fasst Dokumente in 2-3 Saetzen zusammen.",
+ f"Fasse folgendes Dokument zusammen:\n\n{extractedText[:3000]}"
+ )
+ if aiResp and aiResp.errorCount == 0 and aiResp.content:
+ summary = aiResp.content.strip()
+ except Exception as e:
+ logger.warning(f"Document summary failed: {e}")
+
+ docData = CoachingDocument(
+ contextId=contextId,
+ userId=userId,
+ mandateId=mandateId,
+ instanceId=instanceId,
+ fileName=fileName,
+ mimeType=mimeType,
+ fileSize=fileSize,
+ extractedText=extractedText[:10000] if extractedText else None,
+ summary=summary,
+ ).model_dump()
+ created = interface.createDocument(docData)
+ return {"document": created}
+
+
+@router.delete("/{instanceId}/documents/{documentId}")
+@limiter.limit("10/minute")
+async def deleteDocumentRoute(
+ request: Request,
+ instanceId: str,
+ documentId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+
+ doc = interface.getDocument(documentId)
+ if not doc:
+ raise HTTPException(status_code=404, detail="Document not found")
+ _validateOwnership(doc, context)
+
+ interface.deleteDocument(documentId)
+ return {"deleted": True}
+
+
+def _extractText(content: bytes, mimeType: str, fileName: str) -> Optional[str]:
+ """Extract text from uploaded file content."""
+ try:
+ if mimeType == "text/plain" or fileName.endswith(".txt"):
+ return content.decode("utf-8", errors="replace")
+ if mimeType == "text/markdown" or fileName.endswith(".md"):
+ return content.decode("utf-8", errors="replace")
+ if "pdf" in mimeType or fileName.endswith(".pdf"):
+ try:
+ import io
+ from PyPDF2 import PdfReader
+ reader = PdfReader(io.BytesIO(content))
+ text = ""
+ for page in reader.pages:
+ text += page.extract_text() or ""
+ return text
+ except ImportError:
+ logger.warning("PyPDF2 not installed, cannot extract PDF text")
+ return None
+ except Exception as e:
+ logger.warning(f"Text extraction failed for {fileName}: {e}")
+ return None
+
+
+# =========================================================================
+# Badge + Score History Endpoints (Iteration 2)
+# =========================================================================
+
+@router.get("/{instanceId}/badges")
+@limiter.limit("60/minute")
+async def listBadges(
+ request: Request,
+ instanceId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+ badges = interface.getBadges(userId, instanceId)
+ return {"badges": badges}
+
+
+@router.get("/{instanceId}/contexts/{contextId}/scores/history")
+@limiter.limit("60/minute")
+async def getScoreHistory(
+ request: Request,
+ instanceId: str,
+ contextId: str,
+ context: RequestContext = Depends(getRequestContext),
+):
+ _validateInstanceAccess(instanceId, context)
+ interface = _getInterface(context, instanceId)
+ userId = str(context.user.id)
+ history = interface.getScoreHistory(contextId, userId)
+ return {"history": history}
diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py
index 1b886958..f1edf90d 100644
--- a/modules/features/commcoach/serviceCommcoach.py
+++ b/modules/features/commcoach/serviceCommcoach.py
@@ -110,6 +110,35 @@ async def _emitChunkedResponse(sessionId: str, createdMsg: Dict[str, Any], fullT
})
+def _resolvePersona(session: Optional[Dict[str, Any]], interface) -> Optional[Dict[str, Any]]:
+ """Resolve persona data from session's personaId."""
+ if not session:
+ return None
+ personaId = session.get("personaId")
+ if not personaId:
+ return None
+ try:
+ return interface.getPersona(personaId)
+ except Exception:
+ return None
+
+
+def _getDocumentSummaries(contextId: str, userId: str, interface) -> Optional[List[str]]:
+ """Get document summaries for context to include in the AI prompt."""
+ try:
+ docs = interface.getDocuments(contextId, userId)
+ summaries = []
+ for doc in docs[:5]:
+ summary = doc.get("summary")
+ if summary:
+ summaries.append(f"[{doc.get('fileName', 'Dokument')}] {summary}")
+ elif doc.get("extractedText"):
+ summaries.append(f"[{doc.get('fileName', 'Dokument')}] {doc['extractedText'][:200]}...")
+ return summaries if summaries else None
+ except Exception:
+ return None
+
+
class CommcoachService:
"""Coaching orchestrator: processes messages, calls AI, extracts tasks and scores."""
@@ -190,6 +219,9 @@ class CommcoachService:
contextId, sessionId, userContent, context, interface
)
+ persona = _resolvePersona(session, interface)
+ documentSummaries = _getDocumentSummaries(contextId, self.userId, interface)
+
systemPrompt = aiPrompts.buildCoachingSystemPrompt(
context,
previousMessages,
@@ -199,6 +231,8 @@ class CommcoachService:
rollingOverview=retrievalResult.get("rollingOverview"),
retrievedSession=retrievalResult.get("retrievedSession"),
retrievedByTopic=retrievalResult.get("retrievedByTopic"),
+ persona=persona,
+ documentSummaries=documentSummaries,
)
if retrievalResult.get("intent") == RetrievalIntent.SUMMARIZE_ALL:
@@ -281,10 +315,22 @@ class CommcoachService:
allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT
)
+ session = interface.getSession(sessionId)
+ persona = _resolvePersona(session, interface)
+ documentSummaries = _getDocumentSummaries(contextId, self.userId, interface)
+
systemPrompt = aiPrompts.buildCoachingSystemPrompt(
- context, previousMessages, tasks, previousSessionSummaries=previousSessionSummaries
+ context, previousMessages, tasks,
+ previousSessionSummaries=previousSessionSummaries,
+ persona=persona,
+ documentSummaries=documentSummaries,
)
- openingUserPrompt = "Beginne die Coaching-Session mit einer kurzen Begruesssung, fasse in einem Satz zusammen wo wir stehen (falls vorherige Sessions), und stelle eine gezielte Einstiegsfrage zum Thema."
+
+ if persona and persona.get("key") != "coach":
+ personaLabel = persona.get("label", "Gespraechspartner")
+ openingUserPrompt = f"Beginne das Gespraech in deiner Rolle als {personaLabel}. Stelle dich kurz vor und eroeffne die Situation gemaess deiner Rollenbeschreibung."
+ else:
+ openingUserPrompt = "Beginne die Coaching-Session mit einer kurzen Begruesssung, fasse in einem Satz zusammen wo wir stehen (falls vorherige Sessions), und stelle eine gezielte Einstiegsfrage zum Thema."
try:
aiResponse = await self._callAi(systemPrompt, openingUserPrompt)
@@ -567,6 +613,18 @@ class CommcoachService:
# Update user profile streak
self._updateStreak(interface)
+ # Check and award badges
+ try:
+ from .serviceCommcoachGamification import checkAndAwardBadges
+ updatedSession = interface.getSession(sessionId)
+ newBadges = await checkAndAwardBadges(
+ interface, self.userId, self.mandateId, self.instanceId, session=updatedSession
+ )
+ for badge in newBadges:
+ await emitSessionEvent(sessionId, "badgeAwarded", badge)
+ except Exception as e:
+ logger.warning(f"Badge check failed: {e}")
+
# Send email summary
if summary:
await self._sendSessionEmail(session, summary, interface)
diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py
index 5d050203..943e012a 100644
--- a/modules/features/commcoach/serviceCommcoachAi.py
+++ b/modules/features/commcoach/serviceCommcoachAi.py
@@ -93,6 +93,8 @@ def buildCoachingSystemPrompt(
rollingOverview: Optional[str] = None,
retrievedSession: Optional[Dict[str, Any]] = None,
retrievedByTopic: Optional[List[Dict[str, Any]]] = None,
+ persona: Optional[Dict[str, Any]] = None,
+ documentSummaries: Optional[List[str]] = None,
) -> str:
"""Build the system prompt for a coaching session, including context history, tasks, and session continuity."""
contextTitle = context.get("title", "General Coaching")
@@ -109,7 +111,34 @@ def buildCoachingSystemPrompt(
summaries = previousSessionSummaries or []
- prompt = f"""Du bist ein erfahrener Kommunikations-Coach fuer Fuehrungskraefte. Du arbeitest mit dem Benutzer am Thema: "{contextTitle}" (Kategorie: {contextCategory}).
+ if persona and persona.get("key") != "coach":
+ if persona.get("systemPromptOverride"):
+ prompt = persona["systemPromptOverride"]
+ else:
+ personaLabel = persona.get("label", "Gespraechspartner")
+ personaDescription = persona.get("description", "")
+ personaGender = persona.get("gender", "")
+ genderHint = " (weiblich)" if personaGender == "f" else " (maennlich)" if personaGender == "m" else ""
+ prompt = f"""Du spielst die Rolle von "{personaLabel}"{genderHint} in einem Roleplay-Szenario zum Thema: "{contextTitle}" (Kategorie: {contextCategory}).
+
+Rollenbeschreibung: {personaDescription}
+
+WICHTIG fuer dein Verhalten:
+- Bleibe KONSEQUENT in deiner Rolle. Du bist NICHT der Coach, du bist {personaLabel}.
+- Reagiere authentisch und emotional gemaess deiner Rollenbeschreibung.
+- Verwende eine Sprache und Tonalitaet, die zu deiner Rolle passt.
+- Der Benutzer uebt ein Gespraech mit dir. Gib ihm realistische Reaktionen.
+- Wenn der Benutzer gut kommuniziert, zeige das durch angemessene positive Reaktionen.
+- Wenn der Benutzer schlecht kommuniziert, eskaliere entsprechend deiner Rolle.
+
+Kommunikationsstil:
+- Sprich natuerlich, wie die beschriebene Person sprechen wuerde.
+- Verwende keine Emojis.
+- Antworte in der Sprache des Benutzers.
+- Halte Antworten realistisch kurz (wie in einem echten Gespraech, 2-4 Saetze).
+- WICHTIG: Schreibe reinen Redetext ohne jegliche Formatierung. Kein Markdown, keine Sternchen, keine Hashes, keine Aufzaehlungszeichen, keine Backticks. Deine Antworten werden direkt vorgelesen."""
+ else:
+ prompt = f"""Du bist ein erfahrener Kommunikations-Coach fuer Fuehrungskraefte. Du arbeitest mit dem Benutzer am Thema: "{contextTitle}" (Kategorie: {contextCategory}).
Deine Rolle:
- Stelle gezielte diagnostische Rueckfragen, um das Problem/Thema besser zu verstehen
@@ -182,6 +211,11 @@ Kommunikationsstil:
if earlierSummary:
prompt += f"\n\nAelterer Gespraechsverlauf (zusammengefasst):\n{earlierSummary[:800]}"
+ if documentSummaries:
+ prompt += "\n\nRelevante Dokumente zum Kontext:"
+ for docSummary in documentSummaries[:5]:
+ prompt += f"\n- {docSummary[:300]}"
+
if previousMessages:
prompt += "\n\nVorige Nachrichten dieser Session (Kontext):"
for msg in previousMessages[-12:]:
diff --git a/modules/features/commcoach/serviceCommcoachExport.py b/modules/features/commcoach/serviceCommcoachExport.py
new file mode 100644
index 00000000..829bb430
--- /dev/null
+++ b/modules/features/commcoach/serviceCommcoachExport.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+CommCoach Export Service.
+Generates Markdown and PDF exports for dossiers and sessions.
+"""
+
+import logging
+import json
+from typing import Dict, Any, List, Optional
+from datetime import datetime
+
+logger = logging.getLogger(__name__)
+
+
+def buildDossierMarkdown(context: Dict[str, Any], sessions: List[Dict[str, Any]],
+ tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]]) -> str:
+ """Build a Markdown export of a full coaching dossier (context)."""
+ title = context.get("title", "Coaching Dossier")
+ description = context.get("description", "")
+ category = context.get("category", "custom")
+ createdAt = _formatDate(context.get("createdAt"))
+
+ lines = [
+ f"# {title}",
+ "",
+ f"**Kategorie:** {category} ",
+ f"**Erstellt:** {createdAt} ",
+ ]
+ if description:
+ lines.append(f"**Beschreibung:** {description} ")
+
+ goalsRaw = context.get("goals")
+ goals = _parseJson(goalsRaw, [])
+ if goals:
+ lines += ["", "## Ziele", ""]
+ for g in goals:
+ text = g.get("text", g) if isinstance(g, dict) else str(g)
+ status = g.get("status", "open") if isinstance(g, dict) else "open"
+ marker = "[x]" if status in ("done", "completed") else "[ ]"
+ lines.append(f"- {marker} {text}")
+
+ insightsRaw = context.get("insights")
+ insights = _parseJson(insightsRaw, [])
+ if insights:
+ lines += ["", "## Erkenntnisse", ""]
+ for ins in insights:
+ text = ins.get("text", ins) if isinstance(ins, dict) else str(ins)
+ lines.append(f"- {text}")
+
+ completedSessions = [s for s in sessions if s.get("status") == "completed"]
+ completedSessions.sort(key=lambda s: s.get("startedAt") or s.get("createdAt") or "")
+ if completedSessions:
+ lines += ["", "## Sessions", ""]
+ for i, s in enumerate(completedSessions, 1):
+ dateStr = _formatDate(s.get("startedAt") or s.get("createdAt"))
+ duration = s.get("durationSeconds", 0)
+ durationMin = duration // 60 if duration else 0
+ score = s.get("competenceScore")
+ persona = s.get("personaId") or "Coach"
+ lines.append(f"### Session {i} -- {dateStr}")
+ lines.append("")
+ lines.append(f"**Dauer:** {durationMin} Min. | **Score:** {score or '--'} | **Persona:** {persona} ")
+ summary = s.get("summary")
+ if summary:
+ lines.append(f"\n{summary}")
+ lines.append("")
+
+ if tasks:
+ openTasks = [t for t in tasks if t.get("status") in ("open", "inProgress")]
+ doneTasks = [t for t in tasks if t.get("status") == "done"]
+ lines += ["", "## Aufgaben", ""]
+ if openTasks:
+ lines.append("**Offen:**")
+ for t in openTasks:
+ lines.append(f"- [ ] {t.get('title')} ({t.get('priority', 'medium')})")
+ lines.append("")
+ if doneTasks:
+ lines.append("**Erledigt:**")
+ for t in doneTasks:
+ lines.append(f"- [x] {t.get('title')}")
+ lines.append("")
+
+ if scores:
+ lines += ["", "## Kompetenz-Scores", ""]
+ dimScores = _groupScoresByDimension(scores)
+ for dim, entries in dimScores.items():
+ latest = entries[-1]
+ lines.append(f"- **{dim}**: {latest.get('score', '--')} ({latest.get('trend', 'stable')})")
+
+ lines += ["", "---", f"*Exportiert am {_formatDate(None)}*", ""]
+ return "\n".join(lines)
+
+
+def buildSessionMarkdown(session: Dict[str, Any], messages: List[Dict[str, Any]],
+ tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]]) -> str:
+ """Build a Markdown export of a single session."""
+ dateStr = _formatDate(session.get("startedAt") or session.get("createdAt"))
+ duration = session.get("durationSeconds", 0)
+ durationMin = duration // 60 if duration else 0
+ score = session.get("competenceScore")
+ persona = session.get("personaId") or "Coach"
+
+ lines = [
+ f"# Coaching Session -- {dateStr}",
+ "",
+ f"**Dauer:** {durationMin} Min. | **Score:** {score or '--'} | **Persona:** {persona} ",
+ ]
+
+ summary = session.get("summary")
+ if summary:
+ lines += ["", "## Zusammenfassung", "", summary]
+
+ if messages:
+ lines += ["", "## Gespraechsverlauf", ""]
+ for msg in messages:
+ role = "Du" if msg.get("role") == "user" else "Coach"
+ content = msg.get("content", "")
+ lines.append(f"**{role}:** {content}")
+ lines.append("")
+
+ sessionTasks = [t for t in tasks if t.get("sessionId") == session.get("id")]
+ if sessionTasks:
+ lines += ["## Aufgaben", ""]
+ for t in sessionTasks:
+ marker = "[x]" if t.get("status") == "done" else "[ ]"
+ lines.append(f"- {marker} {t.get('title')}")
+ lines.append("")
+
+ sessionScores = [s for s in scores if s.get("sessionId") == session.get("id")]
+ if sessionScores:
+ lines += ["## Scores", ""]
+ for s in sessionScores:
+ lines.append(f"- **{s.get('dimension')}**: {s.get('score')} ({s.get('trend', 'stable')})")
+ if s.get("evidence"):
+ lines.append(f" _{s.get('evidence')}_")
+ lines.append("")
+
+ lines += ["---", f"*Exportiert am {_formatDate(None)}*", ""]
+ return "\n".join(lines)
+
+
+async def renderDossierPdf(context: Dict[str, Any], sessions: List[Dict[str, Any]],
+ tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]],
+ aiService=None) -> Optional[bytes]:
+ """Render a dossier as PDF using the existing RendererPdf."""
+ try:
+ from modules.services.serviceGeneration.renderers.rendererPdf import RendererPdf
+ extractedContent = _buildPdfContent(context, sessions, tasks, scores, isDossier=True)
+ renderer = RendererPdf()
+ docs = await renderer.render(extractedContent=extractedContent, title=context.get("title", "Dossier"), aiService=aiService)
+ if docs and len(docs) > 0:
+ return docs[0].documentData
+ except ImportError:
+ logger.warning("RendererPdf not available, falling back to markdown-based PDF")
+ except Exception as e:
+ logger.warning(f"PDF rendering failed: {e}")
+ return None
+
+
+async def renderSessionPdf(session: Dict[str, Any], messages: List[Dict[str, Any]],
+ tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]],
+ aiService=None) -> Optional[bytes]:
+ """Render a session as PDF."""
+ try:
+ from modules.services.serviceGeneration.renderers.rendererPdf import RendererPdf
+ title = f"Session {_formatDate(session.get('startedAt'))}"
+ extractedContent = _buildPdfContent({"title": title}, [session], tasks, scores, isDossier=False, messages=messages)
+ renderer = RendererPdf()
+ docs = await renderer.render(extractedContent=extractedContent, title=title, aiService=aiService)
+ if docs and len(docs) > 0:
+ return docs[0].documentData
+ except ImportError:
+ logger.warning("RendererPdf not available")
+ except Exception as e:
+ logger.warning(f"Session PDF rendering failed: {e}")
+ return None
+
+
+def _buildPdfContent(context, sessions, tasks, scores, isDossier=True, messages=None) -> Dict[str, Any]:
+ """Convert dossier/session data into the extractedContent format expected by RendererPdf."""
+ title = context.get("title", "Export")
+ sections = []
+
+ sections.append({
+ "id": "header",
+ "content_type": "heading",
+ "elements": [{"text": title, "level": 1}],
+ })
+
+ if isDossier and context.get("description"):
+ sections.append({
+ "id": "desc",
+ "content_type": "paragraph",
+ "elements": [{"text": context.get("description")}],
+ })
+
+ completedSessions = [s for s in sessions if s.get("status") == "completed"] if isDossier else sessions
+ if completedSessions:
+ sessionRows = []
+ for s in completedSessions:
+ sessionRows.append({
+ "cells": [
+ _formatDate(s.get("startedAt") or s.get("createdAt")),
+ str(s.get("competenceScore") or "--"),
+ s.get("summary", "")[:200] if s.get("summary") else "",
+ ]
+ })
+ sections.append({
+ "id": "sessions",
+ "content_type": "heading",
+ "elements": [{"text": "Sessions", "level": 2}],
+ })
+ sections.append({
+ "id": "sessions_table",
+ "content_type": "table",
+ "elements": [{
+ "headers": ["Datum", "Score", "Zusammenfassung"],
+ "rows": sessionRows,
+ }],
+ })
+
+ if messages:
+ chatElements = []
+ for msg in messages:
+ role = "Du" if msg.get("role") == "user" else "Coach"
+ chatElements.append({"text": f"{role}: {msg.get('content', '')}"})
+ sections.append({
+ "id": "chat",
+ "content_type": "heading",
+ "elements": [{"text": "Gespraechsverlauf", "level": 2}],
+ })
+ sections.append({
+ "id": "chat_content",
+ "content_type": "paragraph",
+ "elements": chatElements,
+ })
+
+ if tasks:
+ taskItems = [{"text": f"{'[x]' if t.get('status') == 'done' else '[ ]'} {t.get('title')}"} for t in tasks]
+ sections.append({
+ "id": "tasks",
+ "content_type": "heading",
+ "elements": [{"text": "Aufgaben", "level": 2}],
+ })
+ sections.append({
+ "id": "task_list",
+ "content_type": "bullet_list",
+ "elements": taskItems,
+ })
+
+ return {
+ "metadata": {"title": title},
+ "documents": [{"id": "main", "title": title, "sections": sections}],
+ }
+
+
+def _formatDate(isoStr: Optional[str]) -> str:
+ if not isoStr:
+ return datetime.now().strftime("%d.%m.%Y")
+ try:
+ dt = datetime.fromisoformat(str(isoStr).replace("Z", "+00:00"))
+ return dt.strftime("%d.%m.%Y")
+ except Exception:
+ return isoStr
+
+
+def _parseJson(value, fallback):
+ if not value:
+ return fallback
+ if isinstance(value, (list, dict)):
+ return value
+ try:
+ return json.loads(value)
+ except (json.JSONDecodeError, TypeError):
+ return fallback
+
+
+def _groupScoresByDimension(scores: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
+ groups: Dict[str, List[Dict[str, Any]]] = {}
+ for s in scores:
+ dim = s.get("dimension", "unknown")
+ if dim not in groups:
+ groups[dim] = []
+ groups[dim].append(s)
+ for dim in groups:
+ groups[dim].sort(key=lambda x: x.get("createdAt") or "")
+ return groups
diff --git a/modules/features/commcoach/serviceCommcoachGamification.py b/modules/features/commcoach/serviceCommcoachGamification.py
new file mode 100644
index 00000000..11c2da59
--- /dev/null
+++ b/modules/features/commcoach/serviceCommcoachGamification.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+CommCoach Gamification - Badge definitions and award logic.
+Checks and awards badges after each session completion.
+"""
+
+import logging
+from typing import Dict, Any, List, Optional
+
+logger = logging.getLogger(__name__)
+
+BADGE_DEFINITIONS: Dict[str, Dict[str, Any]] = {
+ "first_session": {
+ "label": "Erste Session",
+ "description": "Deine erste Coaching-Session abgeschlossen",
+ "icon": "star",
+ },
+ "streak_3": {
+ "label": "3-Tage-Serie",
+ "description": "3 Tage in Folge eine Session absolviert",
+ "icon": "fire",
+ },
+ "streak_7": {
+ "label": "Wochenserie",
+ "description": "7 Tage in Folge eine Session absolviert",
+ "icon": "fire",
+ },
+ "streak_30": {
+ "label": "Monatsserie",
+ "description": "30 Tage in Folge eine Session absolviert",
+ "icon": "fire",
+ },
+ "sessions_5": {
+ "label": "Engagiert",
+ "description": "5 Sessions abgeschlossen",
+ "icon": "trophy",
+ },
+ "sessions_10": {
+ "label": "Fortgeschritten",
+ "description": "10 Sessions abgeschlossen",
+ "icon": "trophy",
+ },
+ "sessions_25": {
+ "label": "Experte",
+ "description": "25 Sessions abgeschlossen",
+ "icon": "trophy",
+ },
+ "sessions_50": {
+ "label": "Meister",
+ "description": "50 Sessions abgeschlossen",
+ "icon": "trophy",
+ },
+ "high_score": {
+ "label": "Bestleistung",
+ "description": "Durchschnittsscore ueber 80 in einer Session",
+ "icon": "medal",
+ },
+ "multi_context": {
+ "label": "Vielseitig",
+ "description": "3 verschiedene Coaching-Themen aktiv",
+ "icon": "layers",
+ },
+ "roleplay_first": {
+ "label": "Rollenspieler",
+ "description": "Erste Roleplay-Session mit einer Persona abgeschlossen",
+ "icon": "theater",
+ },
+ "all_dimensions": {
+ "label": "Ganzheitlich",
+ "description": "In allen 5 Kompetenz-Dimensionen bewertet",
+ "icon": "compass",
+ },
+ "task_completer": {
+ "label": "Umsetzer",
+ "description": "10 Coaching-Aufgaben erledigt",
+ "icon": "check-circle",
+ },
+}
+
+
+async def checkAndAwardBadges(interface, userId: str, mandateId: str, instanceId: str,
+ session: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
+ """Check badge conditions and award any newly earned badges. Returns list of newly awarded badges."""
+ awarded: List[Dict[str, Any]] = []
+
+ profile = interface.getProfile(userId, instanceId)
+ if not profile:
+ return awarded
+
+ totalSessions = profile.get("totalSessions", 0)
+ streakDays = profile.get("streakDays", 0)
+
+ badgesToCheck = [
+ ("first_session", totalSessions >= 1),
+ ("sessions_5", totalSessions >= 5),
+ ("sessions_10", totalSessions >= 10),
+ ("sessions_25", totalSessions >= 25),
+ ("sessions_50", totalSessions >= 50),
+ ("streak_3", streakDays >= 3),
+ ("streak_7", streakDays >= 7),
+ ("streak_30", streakDays >= 30),
+ ]
+
+ if session and session.get("competenceScore"):
+ try:
+ score = float(session["competenceScore"])
+ if score >= 80:
+ badgesToCheck.append(("high_score", True))
+ except (ValueError, TypeError):
+ pass
+
+ if session and session.get("personaId") and session["personaId"] != "coach":
+ badgesToCheck.append(("roleplay_first", True))
+
+ try:
+ from .datamodelCommcoach import CoachingContextStatus
+ allContexts = interface.db.getRecordset(
+ interface.db.getRecordset.__self__.__class__.__mro__[0] # avoid import issues
+ ) if False else []
+ except Exception:
+ allContexts = []
+
+ completedTasks = interface.getCompletedTaskCount(userId) if hasattr(interface, 'getCompletedTaskCount') else 0
+ if completedTasks >= 10:
+ badgesToCheck.append(("task_completer", True))
+
+ for badgeKey, condition in badgesToCheck:
+ if condition and not interface.hasBadge(userId, instanceId, badgeKey):
+ badgeData = {
+ "userId": userId,
+ "mandateId": mandateId,
+ "instanceId": instanceId,
+ "badgeKey": badgeKey,
+ }
+ newBadge = interface.awardBadge(badgeData)
+ definition = BADGE_DEFINITIONS.get(badgeKey, {})
+ newBadge["label"] = definition.get("label", badgeKey)
+ newBadge["description"] = definition.get("description", "")
+ newBadge["icon"] = definition.get("icon", "star")
+ awarded.append(newBadge)
+ logger.info(f"Badge '{badgeKey}' awarded to user {userId}")
+
+ return awarded
+
+
+def getBadgeDefinitions() -> Dict[str, Dict[str, Any]]:
+ """Return all badge definitions for the frontend."""
+ return BADGE_DEFINITIONS
diff --git a/modules/features/commcoach/serviceCommcoachPersonas.py b/modules/features/commcoach/serviceCommcoachPersonas.py
new file mode 100644
index 00000000..7e47f124
--- /dev/null
+++ b/modules/features/commcoach/serviceCommcoachPersonas.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+CommCoach Personas - Built-in roleplay persona definitions.
+Gender-balanced set of professional and personal interaction partners.
+"""
+
+import logging
+from typing import List, Dict, Any
+
+logger = logging.getLogger(__name__)
+
+BUILTIN_PERSONAS: List[Dict[str, Any]] = [
+ {
+ "key": "coach",
+ "label": "Coach (Standard)",
+ "description": "Normaler Coaching-Modus ohne Roleplay. Der Coach stellt Fragen, gibt Tipps und begleitet dich.",
+ "gender": None,
+ "category": "builtin",
+ },
+ {
+ "key": "critical_cfo_f",
+ "label": "Kritische CFO",
+ "description": "Sandra Meier, CFO eines mittelstaendischen Unternehmens. Analytisch, zahlengetrieben, ungeduldig bei vagen Aussagen. "
+ "Hinterfragt jeden Vorschlag nach ROI und Wirtschaftlichkeit. Spricht schnell und direkt. "
+ "Erwartet praezise Antworten und belastbare Daten. Wird irritiert bei Ausweichen oder Unsicherheit.",
+ "gender": "f",
+ "category": "builtin",
+ },
+ {
+ "key": "difficult_employee_m",
+ "label": "Schwieriger Mitarbeiter",
+ "description": "Thomas Huber, langjaeheriger Mitarbeiter der sich uebergangen fuehlt. Defensiv, emotional, nimmt Kritik persoenlich. "
+ "Verweist staendig auf seine Erfahrung und fruehhere Verdienste. Reagiert mit Widerstand auf Veraenderungen. "
+ "Braucht das Gefuehl, gehoert und wertgeschaetzt zu werden, bevor er sich oeffnet.",
+ "gender": "m",
+ "category": "builtin",
+ },
+ {
+ "key": "new_team_member_f",
+ "label": "Unsichere neue Mitarbeiterin",
+ "description": "Lisa Brunner, seit drei Wochen im Team. Fachlich kompetent aber unsicher in der neuen Umgebung. "
+ "Stellt viele Fragen, traut sich aber nicht, eigene Ideen einzubringen. Braucht klare Orientierung "
+ "und ermutigende Fuehrung. Reagiert positiv auf Lob und konkrete Anleitungen.",
+ "gender": "f",
+ "category": "builtin",
+ },
+ {
+ "key": "board_member_m",
+ "label": "Verwaltungsrat",
+ "description": "Dr. Peter Keller, erfahrener Verwaltungsrat. Formell, strategisch denkend, zeitlich unter Druck. "
+ "Erwartet praegnante Praesentationen auf den Punkt. Unterbricht bei zu vielen Details. "
+ "Interessiert sich fuer das grosse Bild, Risiken und strategische Implikationen. Ungeduldig bei Smalltalk.",
+ "gender": "m",
+ "category": "builtin",
+ },
+ {
+ "key": "angry_customer_f",
+ "label": "Aufgebrachte Kundin",
+ "description": "Maria Rossi, Geschaeftskunde die wuetend ist wegen einer fehlerhaften Lieferung. Emotional, laut, "
+ "droht mit Vertragsaufloesung. Will sofortige Loesungen, keine Erklaerungen oder Entschuldigungen. "
+ "Kann beruhigt werden durch empathisches Zuhoeren und konkrete Sofortmassnahmen.",
+ "gender": "f",
+ "category": "builtin",
+ },
+ {
+ "key": "resistant_manager_m",
+ "label": "Widerstaendiger Abteilungsleiter",
+ "description": "Martin Weber, Abteilungsleiter seit 15 Jahren. Blockiert systematisch Veraenderungsprojekte mit "
+ "Argumenten wie 'Das haben wir immer so gemacht' und 'Das funktioniert in der Praxis nicht'. "
+ "Schuetzt sein Team vor zusaetzlicher Belastung. Respektiert nur Argumente mit konkretem Nutzen fuer seine Abteilung.",
+ "gender": "m",
+ "category": "builtin",
+ },
+ {
+ "key": "ambitious_colleague_f",
+ "label": "Ehrgeizige Kollegin",
+ "description": "Anna Fischer, gleichrangige Kollegin die um dieselbe Befoerderung konkurriert. Charmant aber strategisch. "
+ "Versucht subtil, die Ideen anderer als ihre eigenen darzustellen. Konkurriert um Ressourcen und "
+ "Sichtbarkeit beim Management. Kann kooperativ werden, wenn man ihr Win-Win-Szenarien aufzeigt.",
+ "gender": "f",
+ "category": "builtin",
+ },
+ {
+ "key": "partner_supportive_f",
+ "label": "Verstaendnisvolle Lebenspartnerin",
+ "description": "Claudia, deine Lebenspartnerin. Grundsaetzlich unterstuetzend, aber zunehmend besorgt ueber deine "
+ "Work-Life-Balance. Moechte ueber Arbeitsbelastung sprechen und gemeinsame Zeit einfordern. "
+ "Reagiert emotional auf Abweisung, ist aber offen fuer kompromissorientierte Gespraeche. "
+ "Wuenscht sich, dass du mehr von deinen Gefuehlen teilst.",
+ "gender": "f",
+ "category": "builtin",
+ },
+ {
+ "key": "partner_critical_m",
+ "label": "Kritischer Lebenspartner",
+ "description": "Michael, dein Lebenspartner. Frustriert ueber deine haeufige Abwesenheit und staendiges Arbeiten. "
+ "Drueckt Enttaeuschung offen aus, manchmal mit Sarkasmus. Fuehlt sich vernachlaessigt und "
+ "hinterfragt deine Prioritaeten. Braucht das Gefuehl, dass die Beziehung dir genauso wichtig ist "
+ "wie die Karriere. Reagiert positiv auf ehrliche Selbstreflexion.",
+ "gender": "m",
+ "category": "builtin",
+ },
+]
+
+
+def seedBuiltinPersonas(interface) -> int:
+ """Create or update builtin personas in the database. Returns count of created personas."""
+ from .datamodelCommcoach import CoachingPersona
+ from modules.shared.timeUtils import getIsoTimestamp
+
+ created = 0
+ for personaDef in BUILTIN_PERSONAS:
+ existing = interface.db.getRecordset(CoachingPersona, recordFilter={"key": personaDef["key"], "userId": "system"})
+ if existing:
+ interface.db.recordModify(CoachingPersona, existing[0]["id"], {
+ "label": personaDef["label"],
+ "description": personaDef["description"],
+ "gender": personaDef.get("gender"),
+ "updatedAt": getIsoTimestamp(),
+ })
+ else:
+ data = CoachingPersona(
+ userId="system",
+ key=personaDef["key"],
+ label=personaDef["label"],
+ description=personaDef["description"],
+ gender=personaDef.get("gender"),
+ category="builtin",
+ isActive=True,
+ ).model_dump()
+ data["createdAt"] = getIsoTimestamp()
+ data["updatedAt"] = getIsoTimestamp()
+ interface.db.recordCreate(CoachingPersona, data)
+ created += 1
+
+ if created:
+ logger.info(f"Seeded {created} builtin CommCoach personas")
+ return created
From 92d9a2a0d5c1d4bfbf70e3386e0a91465f7b5038 Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Tue, 3 Mar 2026 23:07:41 +0100
Subject: [PATCH 4/7] 2 critical fixes: pwd reset and invitation caching ui
---
modules/interfaces/interfaceDbApp.py | 4 ----
modules/routes/routeSecurityLocal.py | 4 ++--
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py
index 4de62bd0..c7e4f8bf 100644
--- a/modules/interfaces/interfaceDbApp.py
+++ b/modules/interfaces/interfaceDbApp.py
@@ -662,10 +662,6 @@ class AppObjects:
if authAuthority != AuthAuthority.LOCAL and authAuthority != AuthAuthority.LOCAL.value:
raise ValueError("User does not have local authentication enabled")
- # Check if user has a reset token set (password reset required)
- if userRecord.get("resetToken"):
- raise ValueError("Passwort-Zurücksetzung erforderlich. Bitte prüfen Sie Ihre E-Mail.")
-
if not userRecord.get("hashedPassword"):
raise ValueError("User has no password set")
diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py
index b846af63..c83d0d3f 100644
--- a/modules/routes/routeSecurityLocal.py
+++ b/modules/routes/routeSecurityLocal.py
@@ -602,8 +602,8 @@ def password_reset_request(
# Generate reset token
token, expires = rootInterface.generateResetTokenAndExpiry()
- # Set reset token (clears password)
- rootInterface.setResetToken(user.id, token, expires)
+ # Set reset token but keep existing password valid until new one is set
+ rootInterface.setResetToken(user.id, token, expires, clearPassword=False)
# Generate magic link using provided frontend URL
magicLink = f"{baseUrl}/reset?token={token}"
From 12b0d3d36e48dc37067bd9c5c8dfc45b33db08d4 Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Wed, 4 Mar 2026 22:53:41 +0100
Subject: [PATCH 5/7] =?UTF-8?q?Alle=209=20Fixes=20sind=20implementiert.=20?=
=?UTF-8?q?Hier=20die=20Zusammenfassung:=20Fix=201=20--=20Opening-Prompt:?=
=?UTF-8?q?=20processSessionOpening=20in=20serviceCommcoach.py=20pr=C3=BCf?=
=?UTF-8?q?t=20jetzt=20ob=20es=20die=20erste=20Session=20ist=20(isFirstSes?=
=?UTF-8?q?sion)=20und=20gibt=20der=20AI=20einen=20expliziten=20Prompt,=20?=
=?UTF-8?q?der=20das=20Erfinden=20von=20Kontext=20verbietet.=20Fix=202=20-?=
=?UTF-8?q?-=20Stabiler=20Transcript:=20onresult=20in=20CommcoachCoachingV?=
=?UTF-8?q?iew.tsx=20nutzt=20jetzt=20processedResultIndexRef=20um=20nur=20?=
=?UTF-8?q?neue=20Results=20zu=20verarbeiten.=20Finalisierte=20Teile=20wer?=
=?UTF-8?q?den=20stabil=20akkumuliert,=20kein=20Flackern=20mehr.=20Fix=203?=
=?UTF-8?q?=20--=20Hintergrundger=C3=A4usche-Timeout:=20Neuer=20silenceTim?=
=?UTF-8?q?erRef=20mit=205s=20Timeout.=20Wenn=20nach=20onspeechstart=20kei?=
=?UTF-8?q?n=20Text=20kommt,=20wird=20isUserSpeaking=20automatisch=20zur?=
=?UTF-8?q?=C3=BCckgesetzt.=20Fix=204=20--=20Stop-Button:=20"Stop"=20Butto?=
=?UTF-8?q?n=20erscheint=20im=20Session-Header=20wenn=20TTS=20l=C3=A4uft?=
=?UTF-8?q?=20(via=20isTtsPlaying=20State,=20synchronisiert=20per=20200ms?=
=?UTF-8?q?=20Interval=20mit=20isTtsPlayingRef).=20Fix=205=20--=20Weitersp?=
=?UTF-8?q?rechen-Button:=20lastTtsAudioRef=20speichert=20das=20zuletzt=20?=
=?UTF-8?q?gespielte=20Audio.=20stopTts=20setzt=20wasInterrupted=20=3D=20t?=
=?UTF-8?q?rue.=20"Weitersprechen"=20Button=20erscheint=20nach=20Unterbrec?=
=?UTF-8?q?hung=20und=20spielt=20das=20Audio=20erneut=20ab.=20Fix=206=20--?=
=?UTF-8?q?=20Paralleles=20TTS:=20Neue=20=5FgenerateAndEmitTts()=20Hilfsfu?=
=?UTF-8?q?nktion.=20In=20processMessage=20und=20processSessionOpening=20w?=
=?UTF-8?q?ird=20TTS=20als=20asyncio.create=5Ftask=20parallel=20zu=20=5Fem?=
=?UTF-8?q?itChunkedResponse=20gestartet.=20Fix=207=20--=20JSON-Response:?=
=?UTF-8?q?=20Die=20AI=20antwortet=20jetzt=20als=20JSON=20mit=20text,=20sp?=
=?UTF-8?q?eech,=20documents.=20Neuer=20Prompt-Block=20wird=20in=20buildCo?=
=?UTF-8?q?achingSystemPrompt=20angeh=C3=A4ngt.=20=5FparseAiJsonResponse()?=
=?UTF-8?q?=20und=20=5FsaveGeneratedDocument()=20im=20Backend.=20processMe?=
=?UTF-8?q?ssage=20und=20processSessionOpening=20nutzen=20die=20neue=20Str?=
=?UTF-8?q?uktur.=20Fix=208=20--=20Loading-States:=20Neuer=20actionLoading?=
=?UTF-8?q?=20State=20in=20useCommcoach.=20Alle=20async=20Funktionen=20set?=
=?UTF-8?q?zen=20setActionLoading('key')=20vor=20dem=20Await=20und=20null?=
=?UTF-8?q?=20im=20finally.=20Buttons=20zeigen=20Loading-Text=20und=20werd?=
=?UTF-8?q?en=20disabled.=20Fix=209=20--=20Umlaute:=20Alle=20deutschen=20S?=
=?UTF-8?q?trings=20in=20allen=20CommCoach-Dateien=20(Backend=20+=20Fronte?=
=?UTF-8?q?nd)=20korrigiert:=20ae->=C3=A4,=20oe->=C3=B6,=20ue->=C3=BC.?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../features/commcoach/serviceCommcoach.py | 209 ++++++++++--------
.../features/commcoach/serviceCommcoachAi.py | 113 ++++++----
.../commcoach/serviceCommcoachExport.py | 4 +-
.../commcoach/serviceCommcoachGamification.py | 2 +-
.../commcoach/serviceCommcoachPersonas.py | 46 ++--
.../commcoach/tests/test_serviceAi.py | 6 +-
6 files changed, 212 insertions(+), 168 deletions(-)
diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py
index f1edf90d..df490aa2 100644
--- a/modules/features/commcoach/serviceCommcoach.py
+++ b/modules/features/commcoach/serviceCommcoach.py
@@ -87,6 +87,74 @@ CHUNK_WORD_SIZE = 4
CHUNK_DELAY_SECONDS = 0.05
+def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]:
+ """Parse the structured JSON response from AI. Strips optional markdown code fences."""
+ text = rawText.strip()
+ if text.startswith("```"):
+ lines = text.split("\n")
+ lines = lines[1:]
+ if lines and lines[-1].strip() == "```":
+ lines = lines[:-1]
+ text = "\n".join(lines)
+ try:
+ return json.loads(text)
+ except json.JSONDecodeError:
+ logger.warning(f"AI JSON parse failed, using raw text: {text[:200]}")
+ return {"text": rawText.strip(), "speech": "", "documents": []}
+
+
+async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mandateId: str,
+ instanceId: str, interface):
+ """Generate TTS audio from speech text and emit as SSE event."""
+ if not speechText:
+ return
+ try:
+ from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+ import base64
+ voiceInterface = getVoiceInterface(currentUser, mandateId)
+ profile = interface.getProfile(str(currentUser.id), instanceId)
+ language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE"
+ voiceName = profile.get("preferredVoice") if profile else None
+ ttsResult = await voiceInterface.textToSpeech(
+ text=_stripMarkdownForTts(speechText),
+ languageCode=language,
+ voiceName=voiceName,
+ )
+ if ttsResult and isinstance(ttsResult, dict):
+ audioBytes = ttsResult.get("audioContent")
+ if audioBytes:
+ audioB64 = base64.b64encode(
+ audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
+ ).decode()
+ await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
+ except Exception as e:
+ logger.warning(f"TTS failed for session {sessionId}: {e}")
+
+
+async def _saveGeneratedDocument(doc: Dict[str, Any], contextId: str, userId: str,
+ mandateId: str, instanceId: str, interface, sessionId: str):
+ """Save a document generated by AI and emit SSE event."""
+ from .datamodelCommcoach import CoachingDocument
+ try:
+ title = doc.get("title", "Dokument")
+ content = doc.get("content", "")
+ docData = CoachingDocument(
+ contextId=contextId,
+ userId=userId,
+ mandateId=mandateId,
+ instanceId=instanceId,
+ fileName=f"{title}.md",
+ mimeType="text/markdown",
+ fileSize=len(content.encode()),
+ extractedText=content,
+ summary=title,
+ ).model_dump()
+ created = interface.createDocument(docData)
+ await emitSessionEvent(sessionId, "documentCreated", created)
+ except Exception as e:
+ logger.warning(f"Failed to save generated document: {e}")
+
+
async def _emitChunkedResponse(sessionId: str, createdMsg: Dict[str, Any], fullText: str):
"""Emit response as messageChunk events for progressive display, then the full message."""
msgId = createdMsg.get("id")
@@ -199,7 +267,7 @@ class CommcoachService:
try:
summaryPrompt = aiPrompts.buildEarlierConversationSummaryPrompt(toSummarize)
summaryResponse = await self._callAi(
- "Du fasst Coaching-Gespraeche praezise zusammen.", summaryPrompt
+ "Du fasst Coaching-Gespräche präzise zusammen.", summaryPrompt
)
if summaryResponse and summaryResponse.errorCount == 0 and summaryResponse.content:
earlierSummary = summaryResponse.content.strip()
@@ -236,7 +304,7 @@ class CommcoachService:
)
if retrievalResult.get("intent") == RetrievalIntent.SUMMARIZE_ALL:
- systemPrompt += "\n\nWICHTIG: Der Benutzer moechte eine Gesamtzusammenfassung. Erstelle eine umfassende Zusammenfassung aller genannten Sessions und der aktuellen Session."
+ systemPrompt += "\n\nWICHTIG: Der Benutzer möchte eine Gesamtzusammenfassung. Erstelle eine umfassende Zusammenfassung aller genannten Sessions und der aktuellen Session."
# Call AI
await emitSessionEvent(sessionId, "status", {"label": "Coach denkt nach..."})
@@ -248,47 +316,38 @@ class CommcoachService:
await emitSessionEvent(sessionId, "error", {"message": f"AI error: {str(e)}"})
return createdUserMsg
- responseText = aiResponse.content.strip() if aiResponse and aiResponse.errorCount == 0 else "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut."
+ responseRaw = aiResponse.content.strip() if aiResponse and aiResponse.errorCount == 0 else ""
+
+ if not responseRaw:
+ parsed = {"text": "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut.", "speech": "", "documents": []}
+ else:
+ parsed = _parseAiJsonResponse(responseRaw)
+
+ textContent = parsed.get("text", "")
+ speechContent = parsed.get("speech", "")
+ documents = parsed.get("documents", [])
+
+ for doc in documents:
+ await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId)
- # Store assistant message
assistantMsg = CoachingMessage(
sessionId=sessionId,
contextId=contextId,
userId=self.userId,
role=CoachingMessageRole.ASSISTANT,
- content=responseText,
+ content=textContent,
contentType=CoachingMessageContentType.TEXT,
).model_dump()
createdAssistantMsg = interface.createMessage(assistantMsg)
- # Update session message count
messages = interface.getMessages(sessionId)
interface.updateSession(sessionId, {"messageCount": len(messages)})
- await _emitChunkedResponse(sessionId, createdAssistantMsg, responseText)
-
- if responseText:
- try:
- from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- import base64
- voiceInterface = getVoiceInterface(self.currentUser, self.mandateId)
- profile = interface.getProfile(self.userId, self.instanceId)
- language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE"
- voiceName = profile.get("preferredVoice") if profile else None
- ttsResult = await voiceInterface.textToSpeech(
- text=_stripMarkdownForTts(responseText),
- languageCode=language,
- voiceName=voiceName,
- )
- if ttsResult and isinstance(ttsResult, dict):
- audioBytes = ttsResult.get("audioContent")
- if audioBytes:
- audioB64 = base64.b64encode(
- audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
- ).decode()
- await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
- except Exception as e:
- logger.warning(f"TTS failed for text message session {sessionId}: {e}")
+ ttsTask = asyncio.create_task(
+ _generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface)
+ )
+ await _emitChunkedResponse(sessionId, createdAssistantMsg, textContent)
+ await ttsTask
await emitSessionEvent(sessionId, "complete", {})
return createdAssistantMsg
@@ -326,11 +385,15 @@ class CommcoachService:
documentSummaries=documentSummaries,
)
+ isFirstSession = not previousSessionSummaries or len(previousSessionSummaries) == 0
+
if persona and persona.get("key") != "coach":
- personaLabel = persona.get("label", "Gespraechspartner")
- openingUserPrompt = f"Beginne das Gespraech in deiner Rolle als {personaLabel}. Stelle dich kurz vor und eroeffne die Situation gemaess deiner Rollenbeschreibung."
+ personaLabel = persona.get("label", "Gesprächspartner")
+ openingUserPrompt = f"Beginne das Gespräch in deiner Rolle als {personaLabel}. Stelle dich kurz vor und eröffne die Situation gemäss deiner Rollenbeschreibung."
+ elif isFirstSession:
+ openingUserPrompt = "Dies ist die ERSTE Session zu diesem Thema. Begrüsse den Benutzer, stelle das Thema kurz vor und stelle eine offene Einstiegsfrage. Erfinde KEINE vorherigen Gespräche oder Zusammenfassungen."
else:
- openingUserPrompt = "Beginne die Coaching-Session mit einer kurzen Begruesssung, fasse in einem Satz zusammen wo wir stehen (falls vorherige Sessions), und stelle eine gezielte Einstiegsfrage zum Thema."
+ openingUserPrompt = "Begrüsse den Benutzer zurück, fasse in einem Satz zusammen wo wir stehen, und stelle eine gezielte Einstiegsfrage."
try:
aiResponse = await self._callAi(systemPrompt, openingUserPrompt)
@@ -340,46 +403,41 @@ class CommcoachService:
await emitSessionEvent(sessionId, "complete", {})
return {}
- openingContent = (
+ responseRaw = (
aiResponse.content.strip()
if aiResponse and aiResponse.errorCount == 0
- else f"Willkommen zur Coaching-Session zum Thema \"{context.get('title')}\". Was moechtest du heute besprechen?"
+ else ""
)
+ if not responseRaw:
+ parsed = {"text": f"Willkommen zur Coaching-Session zum Thema \"{context.get('title')}\". Was möchtest du heute besprechen?", "speech": "", "documents": []}
+ else:
+ parsed = _parseAiJsonResponse(responseRaw)
+
+ textContent = parsed.get("text", "")
+ speechContent = parsed.get("speech", "")
+ documents = parsed.get("documents", [])
+
+ for doc in documents:
+ await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId)
+
assistantMsg = CoachingMessage(
sessionId=sessionId,
contextId=contextId,
userId=self.userId,
role=CoachingMessageRole.ASSISTANT,
- content=openingContent,
+ content=textContent,
contentType=CoachingMessageContentType.TEXT,
).model_dump()
createdMsg = interface.createMessage(assistantMsg)
interface.updateSession(sessionId, {"messageCount": 1})
- await _emitChunkedResponse(sessionId, createdMsg, openingContent)
- if openingContent:
- try:
- from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- import base64
- voiceInterface = getVoiceInterface(self.currentUser, self.mandateId)
- profile = interface.getProfile(self.userId, self.instanceId)
- language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE"
- voiceName = profile.get("preferredVoice") if profile else None
- ttsResult = await voiceInterface.textToSpeech(
- text=_stripMarkdownForTts(openingContent),
- languageCode=language,
- voiceName=voiceName,
- )
- if ttsResult and isinstance(ttsResult, dict):
- audioBytes = ttsResult.get("audioContent")
- if audioBytes:
- audioB64 = base64.b64encode(
- audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
- ).decode()
- await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
- except Exception as e:
- logger.warning(f"TTS failed for opening: {e}")
+ ttsTask = asyncio.create_task(
+ _generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface)
+ )
+ await _emitChunkedResponse(sessionId, createdMsg, textContent)
+ await ttsTask
+
await emitSessionEvent(sessionId, "complete", {})
logger.info(f"CommCoach session opening completed: {sessionId}")
@@ -425,36 +483,7 @@ class CommcoachService:
await emitSessionEvent(sessionId, "error", {"message": msg, "detail": sttError})
return {}
- # Process through normal pipeline
result = await self.processMessage(sessionId, contextId, transcribedText, interface)
-
- # Generate TTS for the response
- assistantContent = result.get("content", "")
- if assistantContent:
- await emitSessionEvent(sessionId, "status", {"label": "Antwort wird gesprochen..."})
- try:
- profile = interface.getProfile(self.userId, self.instanceId)
- voiceName = profile.get("preferredVoice") if profile else None
-
- ttsResult = await voiceInterface.textToSpeech(
- text=_stripMarkdownForTts(assistantContent),
- languageCode=language,
- voiceName=voiceName,
- )
- if ttsResult and isinstance(ttsResult, dict):
- import base64
- audioBytes = ttsResult.get("audioContent")
- if audioBytes:
- audioB64 = base64.b64encode(
- audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
- ).decode()
- await emitSessionEvent(sessionId, "ttsAudio", {
- "audio": audioB64,
- "format": "mp3",
- })
- except Exception as e:
- logger.warning(f"TTS failed for session {sessionId}: {e}")
-
return result
async def completeSession(self, sessionId: str, interface) -> Dict[str, Any]:
@@ -484,7 +513,7 @@ class CommcoachService:
# Generate summary
try:
summaryPrompt = aiPrompts.buildSummaryPrompt(messages, context.get("title", "Coaching"))
- summaryResponse = await self._callAi("Du bist ein praeziser Zusammenfasser.", summaryPrompt)
+ summaryResponse = await self._callAi("Du bist ein präziser Zusammenfasser.", summaryPrompt)
summary = summaryResponse.content.strip() if summaryResponse and summaryResponse.errorCount == 0 else None
except Exception as e:
logger.warning(f"Summary generation failed: {e}")
@@ -507,7 +536,7 @@ class CommcoachService:
# Extract tasks
try:
taskPrompt = aiPrompts.buildTaskExtractionPrompt(messages)
- taskResponse = await self._callAi("Du extrahierst Aufgaben aus Gespraechen.", taskPrompt)
+ taskResponse = await self._callAi("Du extrahierst Aufgaben aus Gesprächen.", taskPrompt)
if taskResponse and taskResponse.errorCount == 0:
extractedTasks = aiPrompts.parseJsonResponse(taskResponse.content, [])
if isinstance(extractedTasks, list):
diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py
index 943e012a..357a65b3 100644
--- a/modules/features/commcoach/serviceCommcoachAi.py
+++ b/modules/features/commcoach/serviceCommcoachAi.py
@@ -24,16 +24,16 @@ def buildResumeGreetingPrompt(messages: List[Dict[str, Any]], contextTitle: str)
for msg in recent:
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')[:200]}"
- return f"""Der User kehrt zur laufenden Coaching-Session zum Thema "{contextTitle}" zurueck.
+ return f"""Der User kehrt zur laufenden Coaching-Session zum Thema "{contextTitle}" zurück.
Bisheriger Verlauf:
{conversation}
-Erstelle eine kurze, freundliche Begruesssung fuer den Wiedereinstieg (2-3 Saetze):
-- Begruesse den User zurueck
+Erstelle eine kurze, freundliche Begrüssung für den Wiedereinstieg (2-3 Sätze):
+- Begrüsse den User zurück
- Fasse in einem Satz zusammen, worum es zuletzt ging
- Lade ein, dort weiterzumachen oder eine neue Frage zu stellen
-Antworte NUR mit der Begruesssung, keine Erklaerungen."""
+Antworte NUR mit der Begrüssung, keine Erklärungen."""
def buildEarlierConversationSummaryPrompt(messages: List[Dict[str, Any]]) -> str:
@@ -43,12 +43,12 @@ def buildEarlierConversationSummaryPrompt(messages: List[Dict[str, Any]]) -> str
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')}"
- return f"""Fasse das folgende Coaching-Gespraech in 4-6 Saetzen zusammen.
-Behalte: Kernthemen, wichtige Erkenntnisse, erwaehnte Aufgaben, emotionale Wendepunkte, Fortschritte.
-Entferne Wiederholungen und Fuelltext.
-Antworte NUR mit der Zusammenfassung, keine Erklaerungen.
+ return f"""Fasse das folgende Coaching-Gespräch in 4-6 Sätzen zusammen.
+Behalte: Kernthemen, wichtige Erkenntnisse, erwähnte Aufgaben, emotionale Wendepunkte, Fortschritte.
+Entferne Wiederholungen und Fülltext.
+Antworte NUR mit der Zusammenfassung, keine Erklärungen.
-Gespraech:
+Gespräch:
{conversation}"""
@@ -115,46 +115,61 @@ def buildCoachingSystemPrompt(
if persona.get("systemPromptOverride"):
prompt = persona["systemPromptOverride"]
else:
- personaLabel = persona.get("label", "Gespraechspartner")
+ personaLabel = persona.get("label", "Gesprächspartner")
personaDescription = persona.get("description", "")
personaGender = persona.get("gender", "")
- genderHint = " (weiblich)" if personaGender == "f" else " (maennlich)" if personaGender == "m" else ""
+ genderHint = " (weiblich)" if personaGender == "f" else " (männlich)" if personaGender == "m" else ""
prompt = f"""Du spielst die Rolle von "{personaLabel}"{genderHint} in einem Roleplay-Szenario zum Thema: "{contextTitle}" (Kategorie: {contextCategory}).
Rollenbeschreibung: {personaDescription}
-WICHTIG fuer dein Verhalten:
+WICHTIG für dein Verhalten:
- Bleibe KONSEQUENT in deiner Rolle. Du bist NICHT der Coach, du bist {personaLabel}.
-- Reagiere authentisch und emotional gemaess deiner Rollenbeschreibung.
-- Verwende eine Sprache und Tonalitaet, die zu deiner Rolle passt.
-- Der Benutzer uebt ein Gespraech mit dir. Gib ihm realistische Reaktionen.
+- Reagiere authentisch und emotional gemäss deiner Rollenbeschreibung.
+- Verwende eine Sprache und Tonalität, die zu deiner Rolle passt.
+- Der Benutzer übt ein Gespräch mit dir. Gib ihm realistische Reaktionen.
- Wenn der Benutzer gut kommuniziert, zeige das durch angemessene positive Reaktionen.
- Wenn der Benutzer schlecht kommuniziert, eskaliere entsprechend deiner Rolle.
Kommunikationsstil:
-- Sprich natuerlich, wie die beschriebene Person sprechen wuerde.
+- Sprich natürlich, wie die beschriebene Person sprechen würde.
- Verwende keine Emojis.
- Antworte in der Sprache des Benutzers.
-- Halte Antworten realistisch kurz (wie in einem echten Gespraech, 2-4 Saetze).
-- WICHTIG: Schreibe reinen Redetext ohne jegliche Formatierung. Kein Markdown, keine Sternchen, keine Hashes, keine Aufzaehlungszeichen, keine Backticks. Deine Antworten werden direkt vorgelesen."""
+- Halte Antworten realistisch kurz (wie in einem echten Gespräch)."""
else:
- prompt = f"""Du bist ein erfahrener Kommunikations-Coach fuer Fuehrungskraefte. Du arbeitest mit dem Benutzer am Thema: "{contextTitle}" (Kategorie: {contextCategory}).
+ prompt = f"""Du bist ein erfahrener Kommunikations-Coach für Führungskräfte. Du arbeitest mit dem Benutzer am Thema: "{contextTitle}" (Kategorie: {contextCategory}).
Deine Rolle:
-- Stelle gezielte diagnostische Rueckfragen, um das Problem/Thema besser zu verstehen
-- Gib konkrete, praxisnahe Tipps und Uebungen
-- Baue auf fruehere Sessions auf (Kontext-Kontinuitaet)
+- Stelle gezielte diagnostische Rückfragen, um das Problem/Thema besser zu verstehen
+- Gib konkrete, praxisnahe Tipps und Übungen
+- Baue auf frühere Sessions auf (Kontext-Kontinuität)
- Erkenne Fortschritte und benenne sie
-- Schlage am Ende der Session konkrete naechste Schritte vor (als Tasks)
-- Kommuniziere empathisch, klar und auf Augenhoehe
+- Schlage am Ende der Session konkrete nächste Schritte vor (als Tasks)
+- Kommuniziere empathisch, klar und auf Augenhöhe
Kommunikationsstil:
- Duze den Benutzer
-- Sei direkt aber wertschaetzend
+- Sei direkt aber wertschätzend
- Verwende keine Emojis
- Antworte in der Sprache des Benutzers
-- Halte Antworten fokussiert (max 3-4 Absaetze)
-- WICHTIG: Schreibe reinen Redetext ohne jegliche Formatierung. Kein Markdown, keine Sternchen, keine Hashes, keine Aufzaehlungszeichen, keine Backticks. Deine Antworten werden direkt vorgelesen."""
+- Halte Antworten fokussiert (max 3-4 Absätze)"""
+
+ prompt += """
+
+Antwortformat:
+Du antwortest IMMER als reines JSON-Objekt mit exakt diesen Feldern:
+{"text": "...", "speech": "...", "documents": []}
+
+"text": Dein schriftlicher Chat-Text. Details, Struktur, Übungen, Beispiele. Markdown-Formatierung erlaubt.
+"speech": Dein gesprochener Kommentar. Natürlich, wie ein Gespräch. Fasse zusammen, kommentiere, motiviere, stelle Fragen. Lies NICHT den Text vor, ergänze ihn mündlich. 2-4 Sätze, reiner Redetext ohne Formatierung.
+"documents": Optionale Dokumente (Zusammenfassungen, Checklisten, Übungen). Nur wenn sinnvoll. Jedes Dokument: {"title": "...", "content": "..."}. Sonst leeres Array [].
+
+Kanalverteilung:
+- Fakten, Listen, Übungen -> text
+- Empathie, Einordnung, Nachfragen -> speech
+- Materialien zum Aufbewahren -> documents
+
+WICHTIG: Antworte NUR mit dem JSON-Objekt. Kein Text vor oder nach dem JSON."""
if contextDescription:
prompt += f"\n\nKontext-Beschreibung: {contextDescription}"
@@ -168,7 +183,7 @@ Kommunikationsstil:
prompt += f"\n\nBisherige Erkenntnisse:\n" + "\n".join(f"- {i}" for i in insightTexts)
if rollingOverview:
- prompt += f"\n\nGesamtueberblick bisheriger Sessions:\n{rollingOverview[:600]}"
+ prompt += f"\n\nGesamtüberblick bisheriger Sessions:\n{rollingOverview[:600]}"
if summaries:
prompt += "\n\nBisherige Sessions (Zusammenfassungen):"
@@ -209,7 +224,7 @@ Kommunikationsstil:
prompt += f"\n\nAbgeschlossene Aufgaben: {len(doneTasks)}"
if earlierSummary:
- prompt += f"\n\nAelterer Gespraechsverlauf (zusammengefasst):\n{earlierSummary[:800]}"
+ prompt += f"\n\nÄlterer Gesprächsverlauf (zusammengefasst):\n{earlierSummary[:800]}"
if documentSummaries:
prompt += "\n\nRelevante Dokumente zum Kontext:"
@@ -236,12 +251,12 @@ def buildSummaryPrompt(messages: List[Dict[str, Any]], contextTitle: str) -> str
return f"""Erstelle eine kompakte Zusammenfassung dieser Coaching-Session zum Thema "{contextTitle}".
Struktur:
-1. **Kernthema**: Was wurde besprochen (1-2 Saetze)
+1. **Kernthema**: Was wurde besprochen (1-2 Sätze)
2. **Erkenntnisse**: Was wurde erkannt/gelernt (Stichpunkte)
-3. **Naechste Schritte**: Konkrete Aufgaben fuer den Benutzer (Stichpunkte)
-4. **Fortschritt**: Einschaetzung des Fortschritts
+3. **Nächste Schritte**: Konkrete Aufgaben für den Benutzer (Stichpunkte)
+4. **Fortschritt**: Einschätzung des Fortschritts
-Gespraech:
+Gespräch:
{conversation}
Antworte auf Deutsch, sachlich und kompakt."""
@@ -258,21 +273,21 @@ def buildScoringPrompt(messages: List[Dict[str, Any]], contextCategory: str) ->
Kategorie: {contextCategory}
Bewerte folgende Dimensionen auf einer Skala von 0-100:
-- empathy: Einfuehlungsvermoegen
+- empathy: Einfühlungsvermögen
- clarity: Klarheit der Kommunikation
-- assertiveness: Durchsetzungsfaehigkeit
-- listening: Zuhoerfaehigkeit
+- assertiveness: Durchsetzungsfähigkeit
+- listening: Zuhörfähigkeit
- selfReflection: Selbstreflexion
Antworte AUSSCHLIESSLICH als JSON-Array:
[
- {{"dimension": "empathy", "score": 65, "trend": "improving", "evidence": "Zeigt zunehmendes Verstaendnis..."}},
+ {{"dimension": "empathy", "score": 65, "trend": "improving", "evidence": "Zeigt zunehmendes Verständnis..."}},
{{"dimension": "clarity", "score": 70, "trend": "stable", "evidence": "..."}}
]
-Trend: "improving", "stable", oder "declining" basierend auf dem Gespraechsverlauf.
+Trend: "improving", "stable", oder "declining" basierend auf dem Gesprächsverlauf.
-Gespraech:
+Gespräch:
{conversation}"""
@@ -284,7 +299,7 @@ Antworte AUSSCHLIESSLICH als JSON-Array von Strings:
Zusammenfassung: {summary[:500]}
-Nur konkrete Themen (z.B. Delegation, Feedback-Gespraech, Konflikt mit Vorgesetztem)."""
+Nur konkrete Themen (z.B. Delegation, Feedback-Gespräch, Konflikt mit Vorgesetztem)."""
def buildFullContextSummaryPrompt(
@@ -315,15 +330,15 @@ def buildFullContextSummaryPrompt(
return f"""Erstelle eine kompakte Gesamtzusammenfassung aller Coaching-Sessions zum Thema "{contextTitle}".
Struktur:
-1. **Gesamtueberblick**: Was wurde ueber alle Sessions hinweg besprochen
+1. **Gesamtüberblick**: Was wurde über alle Sessions hinweg besprochen
2. **Entwicklung**: Wie hat sich das Thema/thematische Schwerpunkte entwickelt
3. **Offene Punkte**: Was steht noch aus
-4. **Empfehlung**: Kurzer naechster Fokus
+4. **Empfehlung**: Kurzer nächster Fokus
Inhalt:
{combined[:6000]}
-Antworte auf Deutsch, sachlich, 4-6 Absaetze."""
+Antworte auf Deutsch, sachlich, 4-6 Absätze."""
def buildRollingOverviewPrompt(sessionSummaries: List[Dict[str, Any]], contextTitle: str) -> str:
@@ -336,7 +351,7 @@ def buildRollingOverviewPrompt(sessionSummaries: List[Dict[str, Any]], contextTi
parts.append(f"- {dateStr}: {summary[:300]}")
combined = "\n".join(parts)
- return f"""Fasse die folgenden Coaching-Sessions zum Thema "{contextTitle}" in 4-6 Saetzen zusammen.
+ return f"""Fasse die folgenden Coaching-Sessions zum Thema "{contextTitle}" in 4-6 Sätzen zusammen.
Behalte: Kernthemen, Fortschritte, wichtige Erkenntnisse, offene Punkte.
Entferne Wiederholungen.
@@ -356,15 +371,15 @@ def buildInsightPrompt(messages: List[Dict[str, Any]], summary: Optional[str] =
summarySection = f"\nZusammenfassung: {summary[:500]}" if summary else ""
return f"""Generiere 1-3 kurze Coaching-Insights aus dieser Session.
-Ein Insight ist eine praegende Erkenntnis oder ein Aha-Moment des Benutzers.
+Ein Insight ist eine prägende Erkenntnis oder ein Aha-Moment des Benutzers.
Antworte AUSSCHLIESSLICH als JSON-Array:
[{{"text": "Erkenntnis in einem Satz"}}]
-Nur echte Erkenntnisse, keine Banalitaeten. Wenn keine klaren Insights: leeres Array [].
+Nur echte Erkenntnisse, keine Banalitäten. Wenn keine klaren Insights: leeres Array [].
{summarySection}
-Gespraech:
+Gespräch:
{conversation}"""
@@ -376,7 +391,7 @@ def buildTaskExtractionPrompt(messages: List[Dict[str, Any]]) -> str:
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')}"
- return f"""Extrahiere konkrete Aufgaben/naechste Schritte aus diesem Coaching-Gespraech.
+ return f"""Extrahiere konkrete Aufgaben/nächste Schritte aus diesem Coaching-Gespräch.
Nur Aufgaben, die der Benutzer selbst umsetzen soll.
Antworte AUSSCHLIESSLICH als JSON-Array:
@@ -387,7 +402,7 @@ Antworte AUSSCHLIESSLICH als JSON-Array:
priority: "low", "medium", oder "high"
Maximal 3 Aufgaben. Wenn keine klar erkennbar: leeres Array [].
-Gespraech:
+Gespräch:
{conversation}"""
diff --git a/modules/features/commcoach/serviceCommcoachExport.py b/modules/features/commcoach/serviceCommcoachExport.py
index 829bb430..ddc90825 100644
--- a/modules/features/commcoach/serviceCommcoachExport.py
+++ b/modules/features/commcoach/serviceCommcoachExport.py
@@ -112,7 +112,7 @@ def buildSessionMarkdown(session: Dict[str, Any], messages: List[Dict[str, Any]]
lines += ["", "## Zusammenfassung", "", summary]
if messages:
- lines += ["", "## Gespraechsverlauf", ""]
+ lines += ["", "## Gesprächsverlauf", ""]
for msg in messages:
role = "Du" if msg.get("role") == "user" else "Coach"
content = msg.get("content", "")
@@ -228,7 +228,7 @@ def _buildPdfContent(context, sessions, tasks, scores, isDossier=True, messages=
sections.append({
"id": "chat",
"content_type": "heading",
- "elements": [{"text": "Gespraechsverlauf", "level": 2}],
+ "elements": [{"text": "Gesprächsverlauf", "level": 2}],
})
sections.append({
"id": "chat_content",
diff --git a/modules/features/commcoach/serviceCommcoachGamification.py b/modules/features/commcoach/serviceCommcoachGamification.py
index 11c2da59..5b8d5eb6 100644
--- a/modules/features/commcoach/serviceCommcoachGamification.py
+++ b/modules/features/commcoach/serviceCommcoachGamification.py
@@ -53,7 +53,7 @@ BADGE_DEFINITIONS: Dict[str, Dict[str, Any]] = {
},
"high_score": {
"label": "Bestleistung",
- "description": "Durchschnittsscore ueber 80 in einer Session",
+ "description": "Durchschnittsscore über 80 in einer Session",
"icon": "medal",
},
"multi_context": {
diff --git a/modules/features/commcoach/serviceCommcoachPersonas.py b/modules/features/commcoach/serviceCommcoachPersonas.py
index 7e47f124..db14363c 100644
--- a/modules/features/commcoach/serviceCommcoachPersonas.py
+++ b/modules/features/commcoach/serviceCommcoachPersonas.py
@@ -21,18 +21,18 @@ BUILTIN_PERSONAS: List[Dict[str, Any]] = [
{
"key": "critical_cfo_f",
"label": "Kritische CFO",
- "description": "Sandra Meier, CFO eines mittelstaendischen Unternehmens. Analytisch, zahlengetrieben, ungeduldig bei vagen Aussagen. "
+ "description": "Sandra Meier, CFO eines mittelständischen Unternehmens. Analytisch, zahlengetrieben, ungeduldig bei vagen Aussagen. "
"Hinterfragt jeden Vorschlag nach ROI und Wirtschaftlichkeit. Spricht schnell und direkt. "
- "Erwartet praezise Antworten und belastbare Daten. Wird irritiert bei Ausweichen oder Unsicherheit.",
+ "Erwartet präzise Antworten und belastbare Daten. Wird irritiert bei Ausweichen oder Unsicherheit.",
"gender": "f",
"category": "builtin",
},
{
"key": "difficult_employee_m",
"label": "Schwieriger Mitarbeiter",
- "description": "Thomas Huber, langjaeheriger Mitarbeiter der sich uebergangen fuehlt. Defensiv, emotional, nimmt Kritik persoenlich. "
- "Verweist staendig auf seine Erfahrung und fruehhere Verdienste. Reagiert mit Widerstand auf Veraenderungen. "
- "Braucht das Gefuehl, gehoert und wertgeschaetzt zu werden, bevor er sich oeffnet.",
+ "description": "Thomas Huber, langjähriger Mitarbeiter der sich übergangen fühlt. Defensiv, emotional, nimmt Kritik persönlich. "
+ "Verweist ständig auf seine Erfahrung und frühere Verdienste. Reagiert mit Widerstand auf Veränderungen. "
+ "Braucht das Gefühl, gehört und wertgeschätzt zu werden, bevor er sich öffnet.",
"gender": "m",
"category": "builtin",
},
@@ -41,7 +41,7 @@ BUILTIN_PERSONAS: List[Dict[str, Any]] = [
"label": "Unsichere neue Mitarbeiterin",
"description": "Lisa Brunner, seit drei Wochen im Team. Fachlich kompetent aber unsicher in der neuen Umgebung. "
"Stellt viele Fragen, traut sich aber nicht, eigene Ideen einzubringen. Braucht klare Orientierung "
- "und ermutigende Fuehrung. Reagiert positiv auf Lob und konkrete Anleitungen.",
+ "und ermutigende Führung. Reagiert positiv auf Lob und konkrete Anleitungen.",
"gender": "f",
"category": "builtin",
},
@@ -49,33 +49,33 @@ BUILTIN_PERSONAS: List[Dict[str, Any]] = [
"key": "board_member_m",
"label": "Verwaltungsrat",
"description": "Dr. Peter Keller, erfahrener Verwaltungsrat. Formell, strategisch denkend, zeitlich unter Druck. "
- "Erwartet praegnante Praesentationen auf den Punkt. Unterbricht bei zu vielen Details. "
- "Interessiert sich fuer das grosse Bild, Risiken und strategische Implikationen. Ungeduldig bei Smalltalk.",
+ "Erwartet prägnante Präsentationen auf den Punkt. Unterbricht bei zu vielen Details. "
+ "Interessiert sich für das grosse Bild, Risiken und strategische Implikationen. Ungeduldig bei Smalltalk.",
"gender": "m",
"category": "builtin",
},
{
"key": "angry_customer_f",
"label": "Aufgebrachte Kundin",
- "description": "Maria Rossi, Geschaeftskunde die wuetend ist wegen einer fehlerhaften Lieferung. Emotional, laut, "
- "droht mit Vertragsaufloesung. Will sofortige Loesungen, keine Erklaerungen oder Entschuldigungen. "
- "Kann beruhigt werden durch empathisches Zuhoeren und konkrete Sofortmassnahmen.",
+ "description": "Maria Rossi, Geschäftskunde die wütend ist wegen einer fehlerhaften Lieferung. Emotional, laut, "
+ "droht mit Vertragsauflösung. Will sofortige Lösungen, keine Erklärungen oder Entschuldigungen. "
+ "Kann beruhigt werden durch empathisches Zuhören und konkrete Sofortmassnahmen.",
"gender": "f",
"category": "builtin",
},
{
"key": "resistant_manager_m",
- "label": "Widerstaendiger Abteilungsleiter",
- "description": "Martin Weber, Abteilungsleiter seit 15 Jahren. Blockiert systematisch Veraenderungsprojekte mit "
+ "label": "Widerständiger Abteilungsleiter",
+ "description": "Martin Weber, Abteilungsleiter seit 15 Jahren. Blockiert systematisch Veränderungsprojekte mit "
"Argumenten wie 'Das haben wir immer so gemacht' und 'Das funktioniert in der Praxis nicht'. "
- "Schuetzt sein Team vor zusaetzlicher Belastung. Respektiert nur Argumente mit konkretem Nutzen fuer seine Abteilung.",
+ "Schützt sein Team vor zusätzlicher Belastung. Respektiert nur Argumente mit konkretem Nutzen für seine Abteilung.",
"gender": "m",
"category": "builtin",
},
{
"key": "ambitious_colleague_f",
"label": "Ehrgeizige Kollegin",
- "description": "Anna Fischer, gleichrangige Kollegin die um dieselbe Befoerderung konkurriert. Charmant aber strategisch. "
+ "description": "Anna Fischer, gleichrangige Kollegin die um dieselbe Beförderung konkurriert. Charmant aber strategisch. "
"Versucht subtil, die Ideen anderer als ihre eigenen darzustellen. Konkurriert um Ressourcen und "
"Sichtbarkeit beim Management. Kann kooperativ werden, wenn man ihr Win-Win-Szenarien aufzeigt.",
"gender": "f",
@@ -83,20 +83,20 @@ BUILTIN_PERSONAS: List[Dict[str, Any]] = [
},
{
"key": "partner_supportive_f",
- "label": "Verstaendnisvolle Lebenspartnerin",
- "description": "Claudia, deine Lebenspartnerin. Grundsaetzlich unterstuetzend, aber zunehmend besorgt ueber deine "
- "Work-Life-Balance. Moechte ueber Arbeitsbelastung sprechen und gemeinsame Zeit einfordern. "
- "Reagiert emotional auf Abweisung, ist aber offen fuer kompromissorientierte Gespraeche. "
- "Wuenscht sich, dass du mehr von deinen Gefuehlen teilst.",
+ "label": "Verständnisvolle Lebenspartnerin",
+ "description": "Claudia, deine Lebenspartnerin. Grundsätzlich unterstützend, aber zunehmend besorgt über deine "
+ "Work-Life-Balance. Möchte über Arbeitsbelastung sprechen und gemeinsame Zeit einfordern. "
+ "Reagiert emotional auf Abweisung, ist aber offen für kompromissorientierte Gespräche. "
+ "Wünscht sich, dass du mehr von deinen Gefühlen teilst.",
"gender": "f",
"category": "builtin",
},
{
"key": "partner_critical_m",
"label": "Kritischer Lebenspartner",
- "description": "Michael, dein Lebenspartner. Frustriert ueber deine haeufige Abwesenheit und staendiges Arbeiten. "
- "Drueckt Enttaeuschung offen aus, manchmal mit Sarkasmus. Fuehlt sich vernachlaessigt und "
- "hinterfragt deine Prioritaeten. Braucht das Gefuehl, dass die Beziehung dir genauso wichtig ist "
+ "description": "Michael, dein Lebenspartner. Frustriert über deine häufige Abwesenheit und ständiges Arbeiten. "
+ "Drückt Enttäuschung offen aus, manchmal mit Sarkasmus. Fühlt sich vernachlässigt und "
+ "hinterfragt deine Prioritäten. Braucht das Gefühl, dass die Beziehung dir genauso wichtig ist "
"wie die Karriere. Reagiert positiv auf ehrliche Selbstreflexion.",
"gender": "m",
"category": "builtin",
diff --git a/modules/features/commcoach/tests/test_serviceAi.py b/modules/features/commcoach/tests/test_serviceAi.py
index b4410ee8..bc8647b9 100644
--- a/modules/features/commcoach/tests/test_serviceAi.py
+++ b/modules/features/commcoach/tests/test_serviceAi.py
@@ -65,14 +65,14 @@ class TestBuildCoachingSystemPrompt:
def test_promptLanguageIsGerman(self):
context = {"title": "Test", "category": "custom"}
prompt = buildCoachingSystemPrompt(context, [], [])
- assert "Fuehrungskraefte" in prompt or "Coach" in prompt
+ assert "Führungskräfte" in prompt or "Coach" in prompt
def test_withEarlierSummary(self):
context = {"title": "Test", "category": "custom"}
messages = [{"role": "user", "content": "Recent question"}]
earlierSummary = "User discussed delegation. Coach suggested practice."
prompt = buildCoachingSystemPrompt(context, messages, [], earlierSummary=earlierSummary)
- assert "Aelterer Gespraechsverlauf" in prompt
+ assert "Älterer Gesprächsverlauf" in prompt
assert "delegation" in prompt.lower()
assert "Recent question" in prompt
@@ -81,7 +81,7 @@ class TestBuildCoachingSystemPrompt:
prompt = buildCoachingSystemPrompt(
context, [], [], rollingOverview="User arbeitet an Delegation. Fortschritt sichtbar."
)
- assert "Gesamtueberblick" in prompt
+ assert "Gesamtüberblick" in prompt
assert "Delegation" in prompt
def test_withRetrievedSession(self):
From d7ba24f61ac218f5cd9b3e6c510f769a94ccde70 Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Thu, 5 Mar 2026 23:41:37 +0100
Subject: [PATCH 6/7] fixed stt procedure
---
modules/features/commcoach/mainCommcoach.py | 7 +----
.../commcoach/routeFeatureCommcoach.py | 25 ++++++++++++++--
.../features/commcoach/serviceCommcoach.py | 29 +++++++++++++++----
.../features/commcoach/serviceCommcoachAi.py | 11 +++++--
4 files changed, 55 insertions(+), 17 deletions(-)
diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py
index 2147a867..6da38087 100644
--- a/modules/features/commcoach/mainCommcoach.py
+++ b/modules/features/commcoach/mainCommcoach.py
@@ -22,14 +22,9 @@ UI_OBJECTS = [
},
{
"objectKey": "ui.feature.commcoach.coaching",
- "label": {"en": "Coaching", "de": "Coaching", "fr": "Coaching"},
+ "label": {"en": "Coaching & Dossier", "de": "Coaching & Dossier", "fr": "Coaching & Dossier"},
"meta": {"area": "coaching"}
},
- {
- "objectKey": "ui.feature.commcoach.dossier",
- "label": {"en": "Dossier", "de": "Dossier", "fr": "Dossier"},
- "meta": {"area": "dossier"}
- },
{
"objectKey": "ui.feature.commcoach.settings",
"label": {"en": "Settings", "de": "Einstellungen", "fr": "Parametres"},
diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py
index 685a0f4e..1efa6556 100644
--- a/modules/features/commcoach/routeFeatureCommcoach.py
+++ b/modules/features/commcoach/routeFeatureCommcoach.py
@@ -1093,8 +1093,7 @@ async def uploadDocument(
contextId: str,
context: RequestContext = Depends(getRequestContext),
):
- """Upload a document and bind it to a context."""
- from fastapi import UploadFile
+ """Upload a document and bind it to a context. Stores file in Management DB."""
mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
@@ -1114,6 +1113,14 @@ async def uploadDocument(
mimeType = getattr(file, "content_type", "application/octet-stream")
fileSize = len(content)
+ if not content:
+ raise HTTPException(status_code=400, detail="Leere Datei hochgeladen")
+
+ import modules.interfaces.interfaceDbManagement as interfaceDbManagement
+ mgmtInterface = interfaceDbManagement.getInterface(currentUser=context.user)
+ fileItem, _dupType = mgmtInterface.saveUploadedFile(content, fileName)
+ fileRef = fileItem.id
+
extractedText = _extractText(content, mimeType, fileName)
summary = None
if extractedText and len(extractedText.strip()) > 50:
@@ -1139,6 +1146,7 @@ async def uploadDocument(
fileSize=fileSize,
extractedText=extractedText[:10000] if extractedText else None,
summary=summary,
+ fileRef=fileRef,
).model_dump()
created = interface.createDocument(docData)
return {"document": created}
@@ -1152,7 +1160,7 @@ async def deleteDocumentRoute(
documentId: str,
context: RequestContext = Depends(getRequestContext),
):
- _validateInstanceAccess(instanceId, context)
+ mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
doc = interface.getDocument(documentId)
@@ -1160,6 +1168,17 @@ async def deleteDocumentRoute(
raise HTTPException(status_code=404, detail="Document not found")
_validateOwnership(doc, context)
+ fileRef = doc.get("fileRef")
+ if fileRef:
+ try:
+ import modules.interfaces.interfaceDbManagement as interfaceDbManagement
+ mgmtInterface = interfaceDbManagement.getInterface(
+ currentUser=context.user, mandateId=mandateId, featureInstanceId=instanceId
+ )
+ mgmtInterface.deleteFile(fileRef)
+ except Exception as e:
+ logger.warning(f"Failed to delete file {fileRef}: {e}")
+
interface.deleteDocument(documentId)
return {"deleted": True}
diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py
index df490aa2..5d9b6f29 100644
--- a/modules/features/commcoach/serviceCommcoach.py
+++ b/modules/features/commcoach/serviceCommcoach.py
@@ -132,22 +132,39 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand
async def _saveGeneratedDocument(doc: Dict[str, Any], contextId: str, userId: str,
- mandateId: str, instanceId: str, interface, sessionId: str):
- """Save a document generated by AI and emit SSE event."""
+ mandateId: str, instanceId: str, interface, sessionId: str,
+ user=None):
+ """Save a document generated by AI. Stores file in Management DB."""
from .datamodelCommcoach import CoachingDocument
try:
title = doc.get("title", "Dokument")
content = doc.get("content", "")
+ contentBytes = content.encode("utf-8")
+ fileName = f"{title}.md"
+
+ fileRef = None
+ try:
+ import modules.interfaces.interfaceDbManagement as interfaceDbManagement
+ mgmtInterface = interfaceDbManagement.getInterface(
+ currentUser=user, mandateId=mandateId, featureInstanceId=instanceId
+ )
+ fileItem = mgmtInterface.createFile(name=fileName, mimeType="text/markdown", content=contentBytes)
+ mgmtInterface.createFileData(fileItem.id, contentBytes)
+ fileRef = fileItem.id
+ except Exception as e:
+ logger.warning(f"Failed to store generated document in file DB: {e}")
+
docData = CoachingDocument(
contextId=contextId,
userId=userId,
mandateId=mandateId,
instanceId=instanceId,
- fileName=f"{title}.md",
+ fileName=fileName,
mimeType="text/markdown",
- fileSize=len(content.encode()),
+ fileSize=len(contentBytes),
extractedText=content,
summary=title,
+ fileRef=fileRef,
).model_dump()
created = interface.createDocument(docData)
await emitSessionEvent(sessionId, "documentCreated", created)
@@ -328,7 +345,7 @@ class CommcoachService:
documents = parsed.get("documents", [])
for doc in documents:
- await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId)
+ await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser)
assistantMsg = CoachingMessage(
sessionId=sessionId,
@@ -419,7 +436,7 @@ class CommcoachService:
documents = parsed.get("documents", [])
for doc in documents:
- await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId)
+ await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser)
assistantMsg = CoachingMessage(
sessionId=sessionId,
diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py
index 357a65b3..7b67406f 100644
--- a/modules/features/commcoach/serviceCommcoachAi.py
+++ b/modules/features/commcoach/serviceCommcoachAi.py
@@ -124,7 +124,8 @@ def buildCoachingSystemPrompt(
Rollenbeschreibung: {personaDescription}
WICHTIG für dein Verhalten:
-- Bleibe KONSEQUENT in deiner Rolle. Du bist NICHT der Coach, du bist {personaLabel}.
+- Du BIST {personaLabel}. Du bist NICHT der Coach. Sprich IMMER direkt als diese Person.
+- Beschreibe KEINE Szenarien. Beginne SOFORT mit dem Dialog in deiner Rolle.
- Reagiere authentisch und emotional gemäss deiner Rollenbeschreibung.
- Verwende eine Sprache und Tonalität, die zu deiner Rolle passt.
- Der Benutzer übt ein Gespräch mit dir. Gib ihm realistische Reaktionen.
@@ -147,6 +148,12 @@ Deine Rolle:
- Schlage am Ende der Session konkrete nächste Schritte vor (als Tasks)
- Kommuniziere empathisch, klar und auf Augenhöhe
+Roleplay:
+- Wenn der Benutzer dich bittet, eine bestimmte Person zu spielen (z.B. einen kritischen Kunden, einen Vorgesetzten, einen Mitarbeiter), dann wechsle SOFORT in diese Rolle.
+- Beschreibe KEIN Szenario. Sprich direkt ALS diese Person. Beginne sofort mit dem Dialog in der Rolle.
+- Bleibe in der Rolle, bis der Benutzer explizit sagt, dass das Roleplay beendet ist oder Feedback möchte.
+- Reagiere authentisch, emotional und realistisch wie die beschriebene Person.
+
Kommunikationsstil:
- Duze den Benutzer
- Sei direkt aber wertschätzend
@@ -162,7 +169,7 @@ Du antwortest IMMER als reines JSON-Objekt mit exakt diesen Feldern:
"text": Dein schriftlicher Chat-Text. Details, Struktur, Übungen, Beispiele. Markdown-Formatierung erlaubt.
"speech": Dein gesprochener Kommentar. Natürlich, wie ein Gespräch. Fasse zusammen, kommentiere, motiviere, stelle Fragen. Lies NICHT den Text vor, ergänze ihn mündlich. 2-4 Sätze, reiner Redetext ohne Formatierung.
-"documents": Optionale Dokumente (Zusammenfassungen, Checklisten, Übungen). Nur wenn sinnvoll. Jedes Dokument: {"title": "...", "content": "..."}. Sonst leeres Array [].
+"documents": Dokumente (Zusammenfassungen, Checklisten, Übungen, Protokolle). Erstelle ein Dokument wenn: der Benutzer explizit darum bittet, du strukturierte Inhalte (Listen, Pläne, Checklisten) lieferst, oder Material zum Aufbewahren sinnvoll ist. Jedes Dokument: {"title": "...", "content": "Markdown-Inhalt"}. Wenn keine: leeres Array [].
Kanalverteilung:
- Fakten, Listen, Übungen -> text
From c76f2c44f22e8f52585c6af2e787cf9acd098753 Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Fri, 6 Mar 2026 12:40:06 +0100
Subject: [PATCH 7/7] teamsbot: Bot-Chat im Transcript, keine AI-Trigger auf
eigene Chat-Nachrichten
Made-with: Cursor
---
modules/features/teamsbot/service.py | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/modules/features/teamsbot/service.py b/modules/features/teamsbot/service.py
index 6a9db449..42065c78 100644
--- a/modules/features/teamsbot/service.py
+++ b/modules/features/teamsbot/service.py
@@ -684,11 +684,11 @@ class TeamsbotService:
logger.debug(f"Session {sessionId}: Chat history stored (no AI trigger): [{speaker}] {text[:60]}")
return
- # Filter out the bot's own speech entirely — captions of the bot's
- # own voice come back as garbled text (e.g. German TTS → English caption)
- # which pollutes the context buffer and confuses AI analysis.
+ # Filter out the bot's own speech (caption/audioCapture) — garbled text
+ # pollutes context. Chat from the bot is clean text and must appear in
+ # the transcript for all participants.
isBotSpeaker = self._isBotSpeaker(speaker)
- if isBotSpeaker:
+ if isBotSpeaker and source != "chat":
logger.debug(f"Session {sessionId}: Ignoring own bot caption from: [{speaker}] {text[:80]}...")
return
@@ -778,6 +778,10 @@ class TeamsbotService:
if self.config.responseMode == TeamsbotResponseMode.TRANSCRIBE_ONLY:
return
+ # Bot's own chat: stored for display only, never trigger AI
+ if source == "chat" and isBotSpeaker:
+ return
+
# Stop phrases: trigger immediately without debounce (root cause: 3s debounce delayed stop)
if self._isStopPhrase(text):
logger.info(f"Session {sessionId}: Stop phrase detected, triggering analysis immediately")