1569 lines
67 KiB
Python
1569 lines
67 KiB
Python
# Copyright (c) 2025 Patrick Motsch
|
||
# All rights reserved.
|
||
"""
|
||
CommCoach Service - Coaching Orchestration.
|
||
Manages the coaching pipeline: message processing, AI calls, scoring, task extraction.
|
||
"""
|
||
|
||
import re
|
||
import html
|
||
import logging
|
||
import json
|
||
import asyncio
|
||
from typing import Optional, Dict, Any, List
|
||
|
||
from modules.datamodels.datamodelUam import User
|
||
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
|
||
from modules.shared.timeUtils import getIsoTimestamp, getUtcTimestamp
|
||
|
||
from .datamodelCommcoach import (
|
||
CoachingMessage, CoachingMessageRole, CoachingMessageContentType,
|
||
CoachingSessionStatus, CoachingTask, CoachingTaskPriority,
|
||
CoachingScore, CoachingScoreTrend,
|
||
)
|
||
from . import serviceCommcoachAi as aiPrompts
|
||
from .serviceCommcoachAi import (
|
||
COMPRESSION_MESSAGE_THRESHOLD,
|
||
COMPRESSION_RECENT_COUNT,
|
||
COMPRESSION_MAX_MESSAGES_FETCH,
|
||
buildResumeGreetingPrompt,
|
||
)
|
||
from .serviceCommcoachContextRetrieval import (
|
||
detectIntent,
|
||
RetrievalIntent,
|
||
buildSessionSummariesForPrompt,
|
||
findSessionByDate,
|
||
searchSessionsByTopic,
|
||
searchSessionsByTopicRag,
|
||
_parseDateFromMessage,
|
||
PREVIOUS_SESSION_SUMMARIES_COUNT,
|
||
ROLLING_OVERVIEW_SESSION_THRESHOLD,
|
||
ROLLING_OVERVIEW_EVERY_N_SESSIONS,
|
||
)
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
|
||
def _selectConfiguredVoice(
|
||
language: str,
|
||
voiceMap: Any,
|
||
legacyVoice: Optional[str] = None,
|
||
legacyLanguage: Optional[str] = None,
|
||
) -> Optional[str]:
|
||
"""Resolve the configured TTS voice for a language from ttsVoiceMap, then legacy ttsVoice."""
|
||
normalizedLanguage = str(language or "").strip()
|
||
normalizedLower = normalizedLanguage.lower()
|
||
baseLanguage = normalizedLower.split("-", 1)[0] if normalizedLower else ""
|
||
|
||
if isinstance(voiceMap, dict) and voiceMap:
|
||
direct = voiceMap.get(normalizedLanguage)
|
||
if isinstance(direct, str) and direct.strip():
|
||
return direct.strip()
|
||
|
||
directBase = voiceMap.get(baseLanguage)
|
||
if isinstance(directBase, str) and directBase.strip():
|
||
return directBase.strip()
|
||
|
||
for mapKey, mapValue in voiceMap.items():
|
||
if not isinstance(mapValue, str) or not mapValue.strip():
|
||
continue
|
||
keyNorm = str(mapKey or "").strip().lower()
|
||
if keyNorm == normalizedLower or keyNorm == baseLanguage or (baseLanguage and keyNorm.startswith(baseLanguage + "-")):
|
||
return mapValue.strip()
|
||
|
||
if legacyVoice and str(legacyVoice).strip():
|
||
legacyLangNorm = str(legacyLanguage or "").strip().lower()
|
||
if not legacyLangNorm or legacyLangNorm == normalizedLower:
|
||
return str(legacyVoice).strip()
|
||
|
||
return None
|
||
|
||
|
||
def buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str:
|
||
if voiceName:
|
||
return (
|
||
f'Die konfigurierte Stimme "{voiceName}" für {language} ist ungültig oder nicht verfügbar. '
|
||
'Bitte passe sie unter Einstellungen > Stimme & Sprache an.'
|
||
)
|
||
return (
|
||
f'Für die Sprache {language} ist keine gültige TTS-Stimme konfiguriert. '
|
||
'Bitte prüfe die Einstellungen unter Stimme & Sprache.'
|
||
)
|
||
|
||
|
||
def getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple:
|
||
"""Load voice language and voiceName from central UserVoicePreferences.
|
||
Returns (language, voiceName) tuple."""
|
||
try:
|
||
from modules.datamodels.datamodelUam import UserVoicePreferences
|
||
from modules.interfaces.interfaceDbApp import getRootInterface
|
||
rootIf = getRootInterface()
|
||
prefs = rootIf.db.getRecordset(
|
||
UserVoicePreferences,
|
||
recordFilter={"userId": userId}
|
||
)
|
||
if prefs:
|
||
allPrefs = [
|
||
pref if isinstance(pref, dict) else pref.model_dump()
|
||
for pref in prefs
|
||
]
|
||
scopedPref = next(
|
||
(
|
||
pref for pref in allPrefs
|
||
if str(pref.get("mandateId") or "").strip() == str(mandateId or "").strip()
|
||
),
|
||
None,
|
||
)
|
||
globalPref = next(
|
||
(
|
||
pref for pref in allPrefs
|
||
if not str(pref.get("mandateId") or "").strip()
|
||
),
|
||
None,
|
||
)
|
||
|
||
language = (
|
||
(globalPref or {}).get("ttsLanguage")
|
||
or (globalPref or {}).get("sttLanguage")
|
||
or (scopedPref or {}).get("ttsLanguage")
|
||
or (scopedPref or {}).get("sttLanguage")
|
||
or "de-DE"
|
||
)
|
||
|
||
scopedVoiceFromMap = _selectConfiguredVoice(
|
||
language=language,
|
||
voiceMap=(scopedPref or {}).get("ttsVoiceMap"),
|
||
)
|
||
globalVoice = _selectConfiguredVoice(
|
||
language=language,
|
||
voiceMap=(globalPref or {}).get("ttsVoiceMap"),
|
||
legacyVoice=(globalPref or {}).get("ttsVoice"),
|
||
legacyLanguage=(globalPref or {}).get("ttsLanguage"),
|
||
)
|
||
scopedLegacyVoice = _selectConfiguredVoice(
|
||
language=language,
|
||
voiceMap=None,
|
||
legacyVoice=(scopedPref or {}).get("ttsVoice"),
|
||
legacyLanguage=(scopedPref or {}).get("ttsLanguage"),
|
||
)
|
||
anyPref = allPrefs[0]
|
||
fallbackVoice = _selectConfiguredVoice(
|
||
language=language,
|
||
voiceMap=(anyPref or {}).get("ttsVoiceMap"),
|
||
legacyVoice=(anyPref or {}).get("ttsVoice"),
|
||
legacyLanguage=(anyPref or {}).get("ttsLanguage"),
|
||
)
|
||
voiceName = scopedVoiceFromMap or globalVoice or scopedLegacyVoice or fallbackVoice
|
||
return (language, voiceName)
|
||
except Exception as e:
|
||
logger.warning(f"Failed to load UserVoicePreferences for user={userId}: {e}")
|
||
return ("de-DE", None)
|
||
|
||
|
||
def stripMarkdownForTts(text: str) -> str:
|
||
"""Strip markdown formatting so TTS reads clean speech text."""
|
||
t = text
|
||
t = re.sub(r'\*\*(.+?)\*\*', r'\1', t)
|
||
t = re.sub(r'\*(.+?)\*', r'\1', t)
|
||
t = re.sub(r'__(.+?)__', r'\1', t)
|
||
t = re.sub(r'_(.+?)_', r'\1', t)
|
||
t = re.sub(r'`[^`]+`', lambda m: m.group(0)[1:-1], t)
|
||
t = re.sub(r'^#{1,6}\s*', '', t, flags=re.MULTILINE)
|
||
t = re.sub(r'^\s*[-*+]\s+', '', t, flags=re.MULTILINE)
|
||
t = re.sub(r'^\s*\d+\.\s+', '', t, flags=re.MULTILINE)
|
||
t = re.sub(r'\[(.+?)\]\(.+?\)', r'\1', t)
|
||
t = re.sub(r'\n{3,}', '\n\n', t)
|
||
return t.strip()
|
||
|
||
|
||
# Session event queues for SSE streaming
|
||
_sessionEvents: Dict[str, asyncio.Queue] = {}
|
||
|
||
|
||
async def emitSessionEvent(sessionId: str, eventType: str, data: Any):
|
||
"""Emit an event to the session's SSE stream."""
|
||
if sessionId not in _sessionEvents:
|
||
_sessionEvents[sessionId] = asyncio.Queue()
|
||
await _sessionEvents[sessionId].put({
|
||
"type": eventType,
|
||
"data": data,
|
||
"timestamp": getIsoTimestamp(),
|
||
})
|
||
|
||
|
||
def getSessionEventQueue(sessionId: str) -> asyncio.Queue:
|
||
if sessionId not in _sessionEvents:
|
||
_sessionEvents[sessionId] = asyncio.Queue()
|
||
return _sessionEvents[sessionId]
|
||
|
||
|
||
def cleanupSessionEvents(sessionId: str):
|
||
_sessionEvents.pop(sessionId, None)
|
||
|
||
|
||
CHUNK_WORD_SIZE = 4
|
||
CHUNK_DELAY_SECONDS = 0.05
|
||
|
||
|
||
def _normalizeEmailBulletList(values: Any, maxItems: int = 4) -> List[str]:
|
||
items: List[str] = []
|
||
if not isinstance(values, list):
|
||
return items
|
||
for value in values:
|
||
text = str(value or "").strip()
|
||
if text:
|
||
items.append(text)
|
||
if len(items) >= maxItems:
|
||
break
|
||
return items
|
||
|
||
|
||
def _buildSummaryEmailBlock(
|
||
emailData: Optional[Dict[str, Any]],
|
||
summary: str,
|
||
contextTitle: str,
|
||
) -> str:
|
||
"""Render a stable, mail-client-friendly CommCoach summary block."""
|
||
payload = emailData or {}
|
||
headline = str(payload.get("headline") or contextTitle or "Coaching-Session").strip()
|
||
intro = str(payload.get("intro") or "").strip()
|
||
coreTopic = str(payload.get("coreTopic") or "").strip()
|
||
insights = _normalizeEmailBulletList(payload.get("insights"))
|
||
nextSteps = _normalizeEmailBulletList(payload.get("nextSteps"))
|
||
progress = _normalizeEmailBulletList(payload.get("progress"))
|
||
|
||
if not (intro or coreTopic or insights or nextSteps or progress):
|
||
escapedSummary = html.escape(summary or "").replace("\n", "<br>")
|
||
return (
|
||
'<div style="border:1px solid #e5e7eb;border-radius:10px;padding:20px 22px;'
|
||
'background-color:#ffffff;">'
|
||
f'<h3 style="margin:0 0 12px 0;font-size:18px;line-height:1.3;color:#1f2937;">{html.escape(headline)}</h3>'
|
||
f'<div style="font-size:15px;line-height:1.7;color:#374151;">{escapedSummary}</div>'
|
||
'</div>'
|
||
)
|
||
|
||
def _renderSection(title: str, bodyHtml: str) -> str:
|
||
if not bodyHtml:
|
||
return ""
|
||
return (
|
||
'<tr><td style="padding:0 0 18px 0;">'
|
||
f'<div style="font-size:12px;font-weight:700;letter-spacing:0.06em;text-transform:uppercase;'
|
||
f'color:#1d4ed8;margin:0 0 8px 0;">{html.escape(title)}</div>'
|
||
f'<div style="font-size:15px;line-height:1.7;color:#374151;">{bodyHtml}</div>'
|
||
'</td></tr>'
|
||
)
|
||
|
||
def _renderList(values: List[str]) -> str:
|
||
if not values:
|
||
return ""
|
||
rows = "".join(
|
||
'<tr>'
|
||
'<td valign="top" style="padding:0 10px 8px 0;font-size:15px;line-height:1.6;color:#2563eb;">•</td>'
|
||
f'<td style="padding:0 0 8px 0;font-size:15px;line-height:1.6;color:#374151;">{html.escape(item)}</td>'
|
||
'</tr>'
|
||
for item in values
|
||
)
|
||
return f'<table role="presentation" cellpadding="0" cellspacing="0" style="border-collapse:collapse;">{rows}</table>'
|
||
|
||
introHtml = f'<p style="margin:0;">{html.escape(intro)}</p>' if intro else ""
|
||
coreTopicHtml = f'<p style="margin:0;">{html.escape(coreTopic)}</p>' if coreTopic else ""
|
||
|
||
sectionsHtml = "".join([
|
||
_renderSection("Kernbotschaft", introHtml),
|
||
_renderSection("Kernthema", coreTopicHtml),
|
||
_renderSection("Erkenntnisse", _renderList(insights)),
|
||
_renderSection("Nächste Schritte", _renderList(nextSteps)),
|
||
_renderSection("Fortschritt", _renderList(progress)),
|
||
])
|
||
|
||
return (
|
||
'<table role="presentation" width="100%" cellpadding="0" cellspacing="0" '
|
||
'style="border-collapse:separate;border-spacing:0;background-color:#ffffff;'
|
||
'border:1px solid #e5e7eb;border-radius:12px;">'
|
||
'<tr><td style="padding:22px 22px 4px 22px;">'
|
||
f'<h3 style="margin:0 0 6px 0;font-size:20px;line-height:1.3;color:#111827;">{html.escape(headline)}</h3>'
|
||
f'<p style="margin:0 0 18px 0;font-size:13px;line-height:1.5;color:#6b7280;">Thema: {html.escape(contextTitle)}</p>'
|
||
'<table role="presentation" width="100%" cellpadding="0" cellspacing="0" style="border-collapse:collapse;">'
|
||
f'{sectionsHtml}'
|
||
'</table>'
|
||
'</td></tr>'
|
||
'</table>'
|
||
)
|
||
|
||
DOC_INTENT_MAX_DOCS = 3
|
||
DOC_CONTENT_MAX_CHARS = 3000
|
||
|
||
|
||
def _buildCombinedUserPrompt(messages: List[Dict[str, Any]]) -> str:
|
||
"""Collect all user messages after the last assistant message into one combined prompt."""
|
||
pending = []
|
||
for msg in reversed(messages):
|
||
if msg.get("role") == "assistant":
|
||
break
|
||
if msg.get("role") == "user":
|
||
pending.insert(0, msg.get("content", ""))
|
||
return " ".join(pending).strip()
|
||
|
||
|
||
def _stripPendingUserMessages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||
"""Return messages up to and including the last assistant message (remove trailing user-only tail)."""
|
||
lastAssistantIdx = -1
|
||
for i in range(len(messages) - 1, -1, -1):
|
||
if messages[i].get("role") == "assistant":
|
||
lastAssistantIdx = i
|
||
break
|
||
if lastAssistantIdx < 0:
|
||
return []
|
||
return messages[:lastAssistantIdx + 1]
|
||
|
||
|
||
def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]:
|
||
"""Parse optional structured AI output; otherwise treat free text as normal response."""
|
||
text = rawText.strip()
|
||
if text.startswith("```"):
|
||
lines = text.split("\n")
|
||
lines = lines[1:]
|
||
if lines and lines[-1].strip() == "```":
|
||
lines = lines[:-1]
|
||
text = "\n".join(lines)
|
||
try:
|
||
parsed = json.loads(text)
|
||
if isinstance(parsed, dict):
|
||
if parsed.get("text") and not parsed.get("speech"):
|
||
parsed["speech"] = parsed.get("text")
|
||
return parsed
|
||
return {"text": rawText.strip(), "speech": rawText.strip(), "documents": []}
|
||
except json.JSONDecodeError:
|
||
return {"text": rawText.strip(), "speech": rawText.strip(), "documents": []}
|
||
|
||
|
||
async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mandateId: str,
|
||
instanceId: str, interface):
|
||
"""Generate TTS audio from speech text and emit as SSE event."""
|
||
if not speechText:
|
||
return
|
||
try:
|
||
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
|
||
import base64
|
||
voiceInterface = getVoiceInterface(currentUser, mandateId)
|
||
language, voiceName = getUserVoicePrefs(str(currentUser.id), mandateId)
|
||
ttsResult = await voiceInterface.textToSpeech(
|
||
text=stripMarkdownForTts(speechText),
|
||
languageCode=language,
|
||
voiceName=voiceName,
|
||
)
|
||
if ttsResult and isinstance(ttsResult, dict):
|
||
audioBytes = ttsResult.get("audioContent")
|
||
if audioBytes:
|
||
audioB64 = base64.b64encode(
|
||
audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
|
||
).decode()
|
||
await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
|
||
return
|
||
errorDetail = ttsResult.get("error", "Text-to-Speech failed")
|
||
await emitSessionEvent(sessionId, "error", {
|
||
"message": buildTtsConfigErrorMessage(language, voiceName, errorDetail),
|
||
"detail": errorDetail,
|
||
"ttsLanguage": language,
|
||
"ttsVoice": voiceName,
|
||
})
|
||
except Exception as e:
|
||
logger.warning(f"TTS failed for session {sessionId}: {e}")
|
||
await emitSessionEvent(sessionId, "error", {
|
||
"message": buildTtsConfigErrorMessage("de-DE", None, str(e)),
|
||
"detail": str(e),
|
||
})
|
||
|
||
|
||
def _resolveFileNameAndMime(title: str) -> tuple:
|
||
"""Derive fileName and mimeType from a document title. Only appends .md if no known extension present."""
|
||
import os
|
||
knownExtensions = {
|
||
".md": "text/markdown", ".txt": "text/plain", ".html": "text/html",
|
||
".htm": "text/html", ".pdf": "application/pdf", ".json": "application/json",
|
||
".csv": "text/csv", ".xml": "application/xml", ".doc": "application/msword",
|
||
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||
}
|
||
_, ext = os.path.splitext(title)
|
||
if ext.lower() in knownExtensions:
|
||
return title, knownExtensions[ext.lower()]
|
||
return f"{title}.md", "text/markdown"
|
||
|
||
|
||
async def _saveOrUpdateDocument(doc: Dict[str, Any], contextId: str, userId: str,
|
||
mandateId: str, instanceId: str, interface, sessionId: str,
|
||
user=None):
|
||
"""Save a document as platform FileItem (no CoachingDocument)."""
|
||
try:
|
||
title = doc.get("title", "Dokument")
|
||
content = doc.get("content", "")
|
||
contentBytes = content.encode("utf-8")
|
||
fileName, mimeType = _resolveFileNameAndMime(title)
|
||
|
||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||
mgmtInterface = interfaceDbManagement.getInterface(
|
||
currentUser=user, mandateId=mandateId, featureInstanceId=instanceId
|
||
)
|
||
fileItem = mgmtInterface.createFile(name=fileName, mimeType=mimeType, content=contentBytes)
|
||
mgmtInterface.createFileData(fileItem.id, contentBytes)
|
||
|
||
from modules.datamodels.datamodelFiles import FileItem as FileItemModel
|
||
mgmtInterface.db.recordModify(FileItemModel, fileItem.id, {
|
||
"scope": "featureInstance",
|
||
"featureInstanceId": instanceId,
|
||
"mandateId": mandateId,
|
||
})
|
||
|
||
await emitSessionEvent(sessionId, "documentCreated", {
|
||
"id": fileItem.id, "fileName": fileName, "fileSize": len(contentBytes),
|
||
})
|
||
logger.info(f"Document saved as platform FileItem: {fileItem.id} ({title})")
|
||
|
||
except Exception as e:
|
||
logger.warning(f"Failed to save document as FileItem: {e}")
|
||
|
||
|
||
|
||
|
||
async def _resolveDocumentIntent(combinedUserPrompt: str, docs: List[Dict[str, Any]], callAiFn) -> Dict[str, Any]:
|
||
"""Pre-AI-call: identify which documents the user references and what action is needed."""
|
||
if not docs:
|
||
return {"read": [], "update": [], "create": [], "noDocumentAction": True}
|
||
from . import serviceCommcoachAi as aiPrompts
|
||
docCatalog = [{"id": d.get("id", ""), "title": d.get("summary") or d.get("fileName", ""), "summary": (d.get("summary") or "")[:100]} for d in docs]
|
||
prompt = aiPrompts.buildDocumentIntentPrompt(combinedUserPrompt, docCatalog)
|
||
try:
|
||
response = await callAiFn("Du analysierst Dokumentreferenzen in Benutzeranfragen. Antworte NUR als JSON.", prompt)
|
||
if response and response.errorCount == 0 and response.content:
|
||
parsed = aiPrompts._parseAiJsonSafe(response.content.strip(), {"read": [], "update": [], "create": [], "noDocumentAction": True})
|
||
return parsed
|
||
except Exception as e:
|
||
logger.warning(f"Document intent detection failed: {e}")
|
||
return {"read": [], "update": [], "create": [], "noDocumentAction": True}
|
||
|
||
|
||
def _getPlatformFileList(mandateId: str = None, instanceId: str = None) -> List[Dict[str, Any]]:
|
||
"""Get list of platform FileItems for this feature instance (for doc intent detection)."""
|
||
try:
|
||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||
from modules.datamodels.datamodelFiles import FileItem
|
||
mgmtIf = interfaceDbManagement.getInterface(
|
||
currentUser=None, mandateId=mandateId, featureInstanceId=instanceId
|
||
)
|
||
records = mgmtIf.db.getRecordset(
|
||
FileItem, recordFilter={"featureInstanceId": instanceId}
|
||
) if instanceId else []
|
||
result = []
|
||
for r in records:
|
||
d = r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else {}
|
||
result.append({
|
||
"id": d.get("id", ""),
|
||
"fileName": d.get("fileName") or d.get("name") or "Dokument",
|
||
"summary": d.get("fileName") or "",
|
||
})
|
||
return result
|
||
except Exception as e:
|
||
logger.warning(f"Failed to load platform file list: {e}")
|
||
return []
|
||
|
||
|
||
def _loadDocumentContents(docIds: List[str], interface, mandateId: str = None, instanceId: str = None) -> List[Dict[str, Any]]:
|
||
"""Load file content for given IDs from platform FileItem store."""
|
||
results = []
|
||
try:
|
||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||
from modules.datamodels.datamodelFiles import FileItem
|
||
mgmtIf = interfaceDbManagement.getInterface(
|
||
currentUser=None, mandateId=mandateId, featureInstanceId=instanceId
|
||
)
|
||
for fId in docIds[:DOC_INTENT_MAX_DOCS]:
|
||
fileRecords = mgmtIf.db.getRecordset(FileItem, recordFilter={"id": fId})
|
||
if fileRecords:
|
||
f = fileRecords[0] if isinstance(fileRecords[0], dict) else fileRecords[0].model_dump()
|
||
content = ""
|
||
try:
|
||
from modules.datamodels.datamodelKnowledge import FileContentIndex
|
||
idxRecords = mgmtIf.db.getRecordset(FileContentIndex, recordFilter={"fileId": fId})
|
||
if idxRecords:
|
||
idx = idxRecords[0] if isinstance(idxRecords[0], dict) else idxRecords[0].model_dump()
|
||
content = (idx.get("extractedText") or "")[:DOC_CONTENT_MAX_CHARS]
|
||
except Exception:
|
||
pass
|
||
results.append({
|
||
"id": fId,
|
||
"title": f.get("fileName") or f.get("name") or "Dokument",
|
||
"content": content,
|
||
})
|
||
except Exception as e:
|
||
logger.warning(f"Failed to load document contents from platform: {e}")
|
||
return results
|
||
|
||
|
||
async def _emitChunkedResponse(sessionId: str, createdMsg: Dict[str, Any], fullText: str):
|
||
"""Emit response as messageChunk events for progressive display, then the full message."""
|
||
msgId = createdMsg.get("id")
|
||
words = fullText.split()
|
||
emitted = ""
|
||
for i in range(0, len(words), CHUNK_WORD_SIZE):
|
||
chunk = " ".join(words[i:i + CHUNK_WORD_SIZE])
|
||
emitted = (emitted + " " + chunk).strip() if emitted else chunk
|
||
await emitSessionEvent(sessionId, "messageChunk", {
|
||
"id": msgId,
|
||
"role": "assistant",
|
||
"chunk": chunk,
|
||
"accumulated": emitted,
|
||
})
|
||
await asyncio.sleep(CHUNK_DELAY_SECONDS)
|
||
await emitSessionEvent(sessionId, "message", {
|
||
"id": msgId,
|
||
"role": "assistant",
|
||
"content": fullText,
|
||
"createdAt": createdMsg.get("createdAt"),
|
||
})
|
||
|
||
|
||
def _resolvePersona(session: Optional[Dict[str, Any]], interface) -> Optional[Dict[str, Any]]:
|
||
"""Resolve persona data from session's personaId."""
|
||
if not session:
|
||
return None
|
||
personaId = session.get("personaId")
|
||
if not personaId:
|
||
return None
|
||
try:
|
||
return interface.getPersona(personaId)
|
||
except Exception:
|
||
return None
|
||
|
||
|
||
def _getDocumentSummaries(contextId: str, userId: str, interface,
|
||
mandateId: str = None, instanceId: str = None) -> Optional[List[str]]:
|
||
"""Get document summaries from platform FileItems (UDL) for the coaching instance."""
|
||
try:
|
||
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
|
||
from modules.datamodels.datamodelFiles import FileItem
|
||
mgmtIf = interfaceDbManagement.getInterface(
|
||
currentUser=None, mandateId=mandateId, featureInstanceId=instanceId
|
||
)
|
||
files = mgmtIf.db.getRecordset(
|
||
FileItem, recordFilter={"featureInstanceId": instanceId}
|
||
) if instanceId else []
|
||
summaries = []
|
||
for f in files[:10]:
|
||
fData = f if isinstance(f, dict) else f.model_dump() if hasattr(f, "model_dump") else {}
|
||
name = fData.get("fileName") or fData.get("name") or "Dokument"
|
||
fId = fData.get("id")
|
||
snippet = None
|
||
if fId:
|
||
try:
|
||
from modules.datamodels.datamodelKnowledge import FileContentIndex
|
||
idxRecords = mgmtIf.db.getRecordset(
|
||
FileContentIndex, recordFilter={"fileId": fId}
|
||
)
|
||
if idxRecords:
|
||
idx = idxRecords[0] if isinstance(idxRecords[0], dict) else idxRecords[0].model_dump()
|
||
snippet = (idx.get("extractedText") or "")[:200]
|
||
except Exception:
|
||
pass
|
||
if snippet:
|
||
summaries.append(f"[{name}] {snippet}...")
|
||
else:
|
||
summaries.append(f"[{name}]")
|
||
return summaries if summaries else None
|
||
except Exception as e:
|
||
logger.warning(f"Failed to load platform file summaries for instance {instanceId}: {e}")
|
||
return None
|
||
|
||
|
||
def _createCommcoachRagFn(
|
||
userId: str,
|
||
featureInstanceId: str,
|
||
mandateId: str,
|
||
context: Dict[str, Any],
|
||
tasks: List[Dict[str, Any]],
|
||
currentUser=None,
|
||
):
|
||
"""Create a CommCoach-specific RAG function combining KnowledgeService RAG with live coaching DB context."""
|
||
|
||
async def _buildRagContext(
|
||
currentPrompt: str, workflowId: str, userId: str,
|
||
featureInstanceId: str, mandateId: str, **kwargs
|
||
) -> str:
|
||
parts = []
|
||
|
||
# 1. Standard KnowledgeService RAG (finds indexed session chunks + files)
|
||
try:
|
||
from modules.serviceCenter import getService
|
||
from modules.serviceCenter.context import ServiceCenterContext
|
||
serviceContext = ServiceCenterContext(
|
||
user=currentUser,
|
||
mandate_id=mandateId,
|
||
feature_instance_id=featureInstanceId,
|
||
)
|
||
knowledgeService = getService("knowledge", serviceContext)
|
||
ragContext = await knowledgeService.buildAgentContext(
|
||
currentPrompt=currentPrompt,
|
||
workflowId=workflowId,
|
||
userId=userId,
|
||
featureInstanceId=featureInstanceId,
|
||
mandateId=mandateId,
|
||
)
|
||
if ragContext:
|
||
parts.append(ragContext)
|
||
except Exception as e:
|
||
logger.debug(f"CommCoach RAG knowledge context failed: {e}")
|
||
|
||
# 2. Live coaching DB context (current goals, tasks, rolling overview)
|
||
liveContext = []
|
||
goals = _parseJsonField(context.get("goals")) if context else None
|
||
if goals:
|
||
goalTexts = [g.get("text", g) if isinstance(g, dict) else str(g) for g in goals if g]
|
||
if goalTexts:
|
||
liveContext.append("Aktuelle Ziele:\n" + "\n".join(f"- {g}" for g in goalTexts))
|
||
|
||
openTasks = [t for t in (tasks or []) if t.get("status") in ("open", "inProgress")]
|
||
if openTasks:
|
||
taskLines = [f"- {t.get('title', '')}" for t in openTasks[:5]]
|
||
liveContext.append("Offene Aufgaben:\n" + "\n".join(taskLines))
|
||
|
||
rollingOverview = context.get("rollingOverview") if context else None
|
||
if rollingOverview:
|
||
liveContext.append(f"Gesamtüberblick bisheriger Sessions:\n{rollingOverview[:500]}")
|
||
|
||
insights = _parseJsonField(context.get("insights")) if context else None
|
||
if insights:
|
||
insightTexts = [i.get("text", i) if isinstance(i, dict) else str(i) for i in insights[-5:] if i]
|
||
if insightTexts:
|
||
liveContext.append("Bisherige Erkenntnisse:\n" + "\n".join(f"- {t}" for t in insightTexts))
|
||
|
||
if liveContext:
|
||
parts.append("--- Coaching-Kontext (Live) ---\n" + "\n\n".join(liveContext))
|
||
|
||
return "\n\n".join(parts) if parts else ""
|
||
|
||
return _buildRagContext
|
||
|
||
|
||
def _parseJsonField(value, fallback=None):
|
||
if not value:
|
||
return fallback
|
||
if isinstance(value, (list, dict)):
|
||
return value
|
||
try:
|
||
return json.loads(value)
|
||
except (json.JSONDecodeError, TypeError):
|
||
return fallback
|
||
|
||
|
||
_RESEARCH_KEYWORDS = re.compile(
|
||
r"\b(such|recherchier|schau nach|im web|finde heraus|google|online|nachschlagen|"
|
||
r"search|look up|find out|browse)\b",
|
||
re.IGNORECASE,
|
||
)
|
||
|
||
|
||
def _shouldActivateTools(
|
||
fileIds: Optional[List[str]],
|
||
dataSourceIds: Optional[List[str]],
|
||
featureDataSourceIds: Optional[List[str]],
|
||
userMessage: str,
|
||
) -> bool:
|
||
"""Decide whether the agent should have tools activated for this turn."""
|
||
if fileIds:
|
||
return True
|
||
if dataSourceIds:
|
||
return True
|
||
if featureDataSourceIds:
|
||
return True
|
||
if _RESEARCH_KEYWORDS.search(userMessage or ""):
|
||
return True
|
||
return False
|
||
|
||
|
||
def _buildConversationHistory(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||
"""Convert coaching messages to OpenAI-style conversation history for the agent."""
|
||
history = []
|
||
for msg in messages:
|
||
role = msg.get("role", "user")
|
||
content = msg.get("content", "")
|
||
if role in ("user", "assistant") and content:
|
||
history.append({"role": role, "content": content})
|
||
return history
|
||
|
||
|
||
_TTS_WORD_LIMIT = 200
|
||
|
||
|
||
async def _prepareSpeechText(fullText: str, callAiFn) -> str:
|
||
"""Prepare text for TTS. Short responses used directly; long ones get summarized."""
|
||
cleaned = stripMarkdownForTts(fullText)
|
||
wordCount = len(cleaned.split())
|
||
if wordCount <= _TTS_WORD_LIMIT:
|
||
return cleaned
|
||
try:
|
||
prompt = f"""Fasse den folgenden Text in 3-4 natürlichen, gesprochenen Sätzen zusammen.
|
||
Der Text soll vorgelesen werden – schreibe daher natürlich und flüssig, keine Aufzählungen.
|
||
Behalte die wichtigsten Punkte und den Ton bei.
|
||
|
||
Text:
|
||
{cleaned[:3000]}
|
||
|
||
Antworte NUR mit der gekürzten Sprachversion."""
|
||
response = await callAiFn(
|
||
"Du kürzt Texte für Sprachausgabe. Antworte kurz und natürlich.",
|
||
prompt,
|
||
)
|
||
if response and response.errorCount == 0 and response.content:
|
||
return response.content.strip()
|
||
except Exception as e:
|
||
logger.warning(f"Speech summary generation failed: {e}")
|
||
return cleaned[:1500]
|
||
|
||
|
||
class CommcoachService:
|
||
"""Coaching orchestrator: processes messages, calls AI, extracts tasks and scores."""
|
||
|
||
def __init__(self, currentUser: User, mandateId: str, instanceId: str):
|
||
self.currentUser = currentUser
|
||
self.mandateId = mandateId
|
||
self.instanceId = instanceId
|
||
self.userId = str(currentUser.id)
|
||
|
||
async def processMessage(
|
||
self, sessionId: str, contextId: str, userContent: str, interface,
|
||
fileIds: Optional[List[str]] = None,
|
||
dataSourceIds: Optional[List[str]] = None,
|
||
featureDataSourceIds: Optional[List[str]] = None,
|
||
allowedProviders: Optional[List[str]] = None,
|
||
) -> Dict[str, Any]:
|
||
"""
|
||
Process a user message through the agent-based coaching pipeline:
|
||
1. Store user message
|
||
2. Build coaching system prompt + session history
|
||
3. Run AgentService with CommCoach RAG and optional tools
|
||
4. Map agent events to CommCoach SSE events
|
||
5. Post-processing: store message, TTS, tasks, scores
|
||
"""
|
||
from . import interfaceFeatureCommcoach as interfaceDb
|
||
|
||
# Store user message
|
||
userMsg = CoachingMessage(
|
||
sessionId=sessionId,
|
||
contextId=contextId,
|
||
userId=self.userId,
|
||
role=CoachingMessageRole.USER,
|
||
content=userContent,
|
||
contentType=CoachingMessageContentType.TEXT,
|
||
).model_dump()
|
||
createdUserMsg = interface.createMessage(userMsg)
|
||
|
||
await emitSessionEvent(sessionId, "message", {
|
||
"id": createdUserMsg.get("id"),
|
||
"role": "user",
|
||
"content": userContent,
|
||
"createdAt": createdUserMsg.get("createdAt"),
|
||
})
|
||
|
||
# Build context
|
||
context = interface.getContext(contextId)
|
||
if not context:
|
||
logger.error(f"Context {contextId} not found")
|
||
return createdUserMsg
|
||
|
||
messages = interface.getRecentMessages(sessionId, count=COMPRESSION_MAX_MESSAGES_FETCH)
|
||
session = interface.getSession(sessionId)
|
||
compressedSummary = session.get("compressedHistorySummary") if session else None
|
||
compressedUpTo = session.get("compressedHistoryUpToMessageCount") if session else None
|
||
|
||
earlierSummary, previousMessages = aiPrompts.prepareMessagesForPrompt(
|
||
messages, compressedSummary, compressedUpTo
|
||
)
|
||
|
||
if earlierSummary is None and len(messages) > COMPRESSION_MESSAGE_THRESHOLD:
|
||
toSummarizeCount = len(messages) - COMPRESSION_RECENT_COUNT
|
||
if toSummarizeCount > 0:
|
||
toSummarize = messages[:toSummarizeCount]
|
||
try:
|
||
summaryPrompt = aiPrompts.buildEarlierConversationSummaryPrompt(toSummarize)
|
||
summaryResponse = await self._callAi(
|
||
"Du fasst Coaching-Gespräche präzise zusammen.", summaryPrompt
|
||
)
|
||
if summaryResponse and summaryResponse.errorCount == 0 and summaryResponse.content:
|
||
earlierSummary = summaryResponse.content.strip()
|
||
interface.updateSession(sessionId, {
|
||
"compressedHistorySummary": earlierSummary,
|
||
"compressedHistoryUpToMessageCount": toSummarizeCount,
|
||
})
|
||
previousMessages = messages[-COMPRESSION_RECENT_COUNT:]
|
||
logger.info(f"Session {sessionId}: Compressed history ({toSummarizeCount} msgs -> {len(earlierSummary)} chars)")
|
||
except Exception as e:
|
||
logger.warning(f"History compression failed for session {sessionId}: {e}")
|
||
previousMessages = messages[-20:]
|
||
|
||
combinedUserPrompt = _buildCombinedUserPrompt(previousMessages)
|
||
if not combinedUserPrompt:
|
||
combinedUserPrompt = userContent
|
||
|
||
contextMessages = _stripPendingUserMessages(previousMessages)
|
||
tasks = interface.getTasks(contextId, self.userId)
|
||
|
||
await emitSessionEvent(sessionId, "status", {"label": "Kontext wird geladen..."})
|
||
|
||
persona = _resolvePersona(session, interface)
|
||
|
||
systemPrompt = aiPrompts.buildCoachingSystemPrompt(
|
||
context,
|
||
contextMessages,
|
||
tasks,
|
||
earlierSummary=earlierSummary,
|
||
persona=persona,
|
||
)
|
||
|
||
# Build conversation history for the agent
|
||
conversationHistory = _buildConversationHistory(contextMessages)
|
||
|
||
# Dynamic tool activation
|
||
useTools = _shouldActivateTools(fileIds, dataSourceIds, featureDataSourceIds, combinedUserPrompt)
|
||
|
||
await emitSessionEvent(sessionId, "status", {"label": "Coach formuliert Antwort..."})
|
||
|
||
try:
|
||
agentResponse = await self._runAgent(
|
||
sessionId=sessionId,
|
||
prompt=combinedUserPrompt,
|
||
systemPrompt=systemPrompt,
|
||
conversationHistory=conversationHistory,
|
||
context=context,
|
||
tasks=tasks,
|
||
fileIds=fileIds,
|
||
useTools=useTools,
|
||
allowedProviders=allowedProviders,
|
||
)
|
||
except asyncio.CancelledError:
|
||
logger.info(f"processMessage cancelled for session {sessionId} (new message arrived)")
|
||
return createdUserMsg
|
||
except Exception as e:
|
||
logger.error(f"Agent call failed for session {sessionId}: {e}")
|
||
await emitSessionEvent(sessionId, "error", {"message": f"AI error: {str(e)}"})
|
||
return createdUserMsg
|
||
|
||
textContent = agentResponse or ""
|
||
|
||
if not textContent:
|
||
textContent = "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut."
|
||
|
||
if asyncio.current_task() and asyncio.current_task().cancelled():
|
||
logger.info(f"processMessage cancelled before storing response for session {sessionId}")
|
||
return createdUserMsg
|
||
|
||
assistantMsg = CoachingMessage(
|
||
sessionId=sessionId,
|
||
contextId=contextId,
|
||
userId=self.userId,
|
||
role=CoachingMessageRole.ASSISTANT,
|
||
content=textContent,
|
||
contentType=CoachingMessageContentType.TEXT,
|
||
).model_dump()
|
||
createdAssistantMsg = interface.createMessage(assistantMsg)
|
||
|
||
messages = interface.getMessages(sessionId)
|
||
interface.updateSession(sessionId, {"messageCount": len(messages)})
|
||
|
||
await emitSessionEvent(sessionId, "status", {"label": "Antwort wird verarbeitet..."})
|
||
|
||
# TTS: use free-text directly; for long responses, generate speech summary
|
||
speechText = await _prepareSpeechText(textContent, self._callAi)
|
||
|
||
ttsTask = asyncio.create_task(
|
||
_generateAndEmitTts(sessionId, speechText, self.currentUser, self.mandateId, self.instanceId, interface)
|
||
)
|
||
await _emitChunkedResponse(sessionId, createdAssistantMsg, textContent)
|
||
await ttsTask
|
||
|
||
await emitSessionEvent(sessionId, "complete", {})
|
||
return createdAssistantMsg
|
||
|
||
async def _runAgent(
|
||
self,
|
||
sessionId: str,
|
||
prompt: str,
|
||
systemPrompt: str,
|
||
conversationHistory: List[Dict[str, Any]],
|
||
context: Dict[str, Any],
|
||
tasks: List[Dict[str, Any]],
|
||
fileIds: Optional[List[str]] = None,
|
||
useTools: bool = False,
|
||
allowedProviders: Optional[List[str]] = None,
|
||
) -> str:
|
||
"""Run the AgentService for a coaching message. Returns the final text response."""
|
||
from modules.serviceCenter import getService
|
||
from modules.serviceCenter.context import ServiceCenterContext
|
||
from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentConfig, AgentEventTypeEnum
|
||
|
||
serviceContext = ServiceCenterContext(
|
||
user=self.currentUser,
|
||
mandate_id=self.mandateId,
|
||
feature_instance_id=self.instanceId,
|
||
)
|
||
agentService = getService("agent", serviceContext)
|
||
|
||
config = AgentConfig(
|
||
toolSet="commcoach" if useTools else "none",
|
||
maxRounds=3 if useTools else 1,
|
||
temperature=0.4,
|
||
)
|
||
|
||
buildRagContextFn = _createCommcoachRagFn(
|
||
userId=self.userId,
|
||
featureInstanceId=self.instanceId,
|
||
mandateId=self.mandateId,
|
||
context=context,
|
||
tasks=tasks,
|
||
currentUser=self.currentUser,
|
||
)
|
||
|
||
finalText = ""
|
||
async for event in agentService.runAgent(
|
||
prompt=prompt,
|
||
fileIds=fileIds,
|
||
config=config,
|
||
toolSet=config.toolSet,
|
||
workflowId=f"commcoach:{sessionId}",
|
||
conversationHistory=conversationHistory,
|
||
buildRagContextFn=buildRagContextFn,
|
||
systemPromptOverride=systemPrompt,
|
||
):
|
||
if event.type == AgentEventTypeEnum.CHUNK:
|
||
chunk = event.content or ""
|
||
finalText += chunk
|
||
elif event.type == AgentEventTypeEnum.MESSAGE:
|
||
finalText += event.content or ""
|
||
elif event.type == AgentEventTypeEnum.FINAL:
|
||
if not finalText:
|
||
finalText = event.content or ""
|
||
elif event.type == AgentEventTypeEnum.TOOL_CALL:
|
||
await emitSessionEvent(sessionId, "toolCall", event.data or {})
|
||
elif event.type == AgentEventTypeEnum.TOOL_RESULT:
|
||
await emitSessionEvent(sessionId, "toolResult", event.data or {})
|
||
elif event.type == AgentEventTypeEnum.AGENT_PROGRESS:
|
||
await emitSessionEvent(sessionId, "agentProgress", event.data or {})
|
||
elif event.type == AgentEventTypeEnum.ERROR:
|
||
await emitSessionEvent(sessionId, "error", {"message": event.content or "Agent error"})
|
||
|
||
return finalText.strip()
|
||
|
||
async def processSessionOpening(self, sessionId: str, contextId: str, interface) -> Dict[str, Any]:
|
||
"""
|
||
Generate and stream the opening greeting for a new session.
|
||
Emits status, message, and complete events to the session queue.
|
||
"""
|
||
await emitSessionEvent(sessionId, "status", {"label": "Coach bereitet sich vor..."})
|
||
|
||
context = interface.getContext(contextId)
|
||
if not context:
|
||
logger.error(f"Context {contextId} not found")
|
||
await emitSessionEvent(sessionId, "error", {"message": "Context not found"})
|
||
await emitSessionEvent(sessionId, "complete", {})
|
||
return {}
|
||
|
||
tasks = interface.getTasks(contextId, self.userId)
|
||
previousMessages = []
|
||
|
||
allSessions = interface.getSessions(contextId, self.userId)
|
||
previousSessionSummaries = buildSessionSummariesForPrompt(
|
||
allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT
|
||
)
|
||
|
||
session = interface.getSession(sessionId)
|
||
persona = _resolvePersona(session, interface)
|
||
documentSummaries = _getDocumentSummaries(
|
||
contextId, self.userId, interface, mandateId=self.mandateId, instanceId=self.instanceId
|
||
)
|
||
|
||
systemPrompt = aiPrompts.buildCoachingSystemPrompt(
|
||
context, previousMessages, tasks,
|
||
previousSessionSummaries=previousSessionSummaries,
|
||
persona=persona,
|
||
documentSummaries=documentSummaries,
|
||
)
|
||
|
||
isFirstSession = not previousSessionSummaries or len(previousSessionSummaries) == 0
|
||
|
||
if persona and persona.get("key") != "coach":
|
||
personaLabel = persona.get("label", "Gesprächspartner")
|
||
openingUserPrompt = f"Beginne das Gespräch in deiner Rolle als {personaLabel}. Stelle dich kurz vor und eröffne die Situation gemäss deiner Rollenbeschreibung."
|
||
elif isFirstSession:
|
||
openingUserPrompt = "Dies ist die ERSTE Session zu diesem Thema. Begrüsse den Benutzer, stelle das Thema kurz vor und stelle eine offene Einstiegsfrage. Erfinde KEINE vorherigen Gespräche oder Zusammenfassungen."
|
||
else:
|
||
openingUserPrompt = "Begrüsse den Benutzer zurück, fasse in einem Satz zusammen wo wir stehen, und stelle eine gezielte Einstiegsfrage."
|
||
|
||
try:
|
||
aiResponse = await self._callAi(systemPrompt, openingUserPrompt)
|
||
except Exception as e:
|
||
logger.error(f"AI opening failed for session {sessionId}: {e}")
|
||
await emitSessionEvent(sessionId, "error", {"message": f"AI error: {str(e)}"})
|
||
await emitSessionEvent(sessionId, "complete", {})
|
||
return {}
|
||
|
||
responseRaw = (
|
||
aiResponse.content.strip()
|
||
if aiResponse and aiResponse.errorCount == 0
|
||
else ""
|
||
)
|
||
|
||
if not responseRaw:
|
||
parsed = {"text": f"Willkommen zur Coaching-Session zum Thema \"{context.get('title')}\". Was möchtest du heute besprechen?", "speech": "", "documents": []}
|
||
else:
|
||
parsed = _parseAiJsonResponse(responseRaw)
|
||
|
||
textContent = parsed.get("text", "")
|
||
speechContent = parsed.get("speech", "")
|
||
documents = parsed.get("documents", [])
|
||
|
||
for doc in documents:
|
||
await _saveOrUpdateDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser)
|
||
|
||
assistantMsg = CoachingMessage(
|
||
sessionId=sessionId,
|
||
contextId=contextId,
|
||
userId=self.userId,
|
||
role=CoachingMessageRole.ASSISTANT,
|
||
content=textContent,
|
||
contentType=CoachingMessageContentType.TEXT,
|
||
).model_dump()
|
||
createdMsg = interface.createMessage(assistantMsg)
|
||
interface.updateSession(sessionId, {"messageCount": 1})
|
||
|
||
ttsTask = asyncio.create_task(
|
||
_generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface)
|
||
)
|
||
await _emitChunkedResponse(sessionId, createdMsg, textContent)
|
||
await ttsTask
|
||
|
||
await emitSessionEvent(sessionId, "complete", {})
|
||
|
||
logger.info(f"CommCoach session opening completed: {sessionId}")
|
||
return createdMsg
|
||
|
||
async def generateResumeGreeting(self, sessionId: str, contextId: str, messages: list, interface) -> str:
|
||
"""Generate a follow-up greeting when user returns to an active session."""
|
||
context = interface.getContext(contextId)
|
||
if not context:
|
||
raise ValueError(f"Context {contextId} not found for resume greeting")
|
||
contextTitle = context.get("title", "Coaching")
|
||
prompt = buildResumeGreetingPrompt(messages, contextTitle)
|
||
aiResponse = await self._callAi(
|
||
"Du bist ein freundlicher Coach. Antworte kurz und einladend.",
|
||
prompt,
|
||
)
|
||
if not aiResponse or aiResponse.errorCount > 0 or not aiResponse.content:
|
||
raise RuntimeError(f"AI resume greeting failed: {getattr(aiResponse, 'errorMessage', 'no content')}")
|
||
return aiResponse.content.strip()
|
||
|
||
async def processAudioMessage(self, sessionId: str, contextId: str, audioContent: bytes, language: str, interface) -> Dict[str, Any]:
|
||
"""Process an audio message: STT -> coaching pipeline -> TTS response."""
|
||
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
|
||
|
||
await emitSessionEvent(sessionId, "status", {"label": "Sprache wird erkannt..."})
|
||
|
||
voiceInterface = getVoiceInterface(self.currentUser, self.mandateId)
|
||
sttResult = await voiceInterface.speechToText(
|
||
audioContent=audioContent,
|
||
language=language,
|
||
skipFallbacks=True,
|
||
)
|
||
|
||
transcribedText = ""
|
||
if sttResult and isinstance(sttResult, dict):
|
||
transcribedText = sttResult.get("text", "")
|
||
elif isinstance(sttResult, str):
|
||
transcribedText = sttResult
|
||
|
||
if not transcribedText.strip():
|
||
sttError = sttResult.get("error", "Unbekannter Fehler") if isinstance(sttResult, dict) else "Unbekannter Fehler"
|
||
msg = f"Sprache konnte nicht erkannt werden. ({sttError})"
|
||
await emitSessionEvent(sessionId, "error", {"message": msg, "detail": sttError})
|
||
return {}
|
||
|
||
result = await self.processMessage(sessionId, contextId, transcribedText, interface)
|
||
return result
|
||
|
||
async def completeSession(self, sessionId: str, interface) -> Dict[str, Any]:
|
||
"""
|
||
Complete a session:
|
||
1. Generate summary
|
||
2. Extract tasks
|
||
3. Generate scores
|
||
4. Update context stats
|
||
5. Send email summary
|
||
"""
|
||
session = interface.getSession(sessionId)
|
||
if not session:
|
||
return {}
|
||
|
||
contextId = session.get("contextId")
|
||
context = interface.getContext(contextId) if contextId else None
|
||
messages = interface.getMessages(sessionId)
|
||
|
||
if len(messages) < 2:
|
||
interface.updateSession(sessionId, {
|
||
"status": CoachingSessionStatus.COMPLETED.value,
|
||
"endedAt": getUtcTimestamp(),
|
||
"compressedHistorySummary": None,
|
||
"compressedHistoryUpToMessageCount": None,
|
||
})
|
||
return session
|
||
|
||
# Generate summary (AI returns JSON with summary + structured email payload)
|
||
summary = None
|
||
emailData = None
|
||
try:
|
||
summaryPrompt = aiPrompts.buildSummaryPrompt(messages, context.get("title", "Coaching"))
|
||
summaryResponse = await self._callAi("Du bist ein präziser Zusammenfasser. Antworte NUR als JSON.", summaryPrompt)
|
||
if summaryResponse and summaryResponse.errorCount == 0 and summaryResponse.content:
|
||
parsed = aiPrompts.parseJsonResponse(summaryResponse.content.strip(), None)
|
||
if isinstance(parsed, dict):
|
||
summary = parsed.get("summary") or parsed.get("text")
|
||
if isinstance(parsed.get("email"), dict):
|
||
emailData = parsed.get("email")
|
||
elif isinstance(parsed.get("emailData"), dict):
|
||
emailData = parsed.get("emailData")
|
||
else:
|
||
summary = summaryResponse.content.strip()
|
||
except Exception as e:
|
||
logger.warning(f"Summary generation failed: {e}")
|
||
|
||
keyTopics = None
|
||
if summary:
|
||
try:
|
||
keyTopicsPrompt = aiPrompts.buildKeyTopicsExtractionPrompt(summary, messages)
|
||
keyTopicsResponse = await self._callAi(
|
||
"Du extrahierst Kernthemen aus Zusammenfassungen.", keyTopicsPrompt
|
||
)
|
||
if keyTopicsResponse and keyTopicsResponse.errorCount == 0 and keyTopicsResponse.content:
|
||
parsed = aiPrompts.parseJsonResponse(keyTopicsResponse.content, [])
|
||
if isinstance(parsed, list) and parsed:
|
||
keyTopics = json.dumps([str(t) for t in parsed[:5]])
|
||
except Exception as e:
|
||
logger.warning(f"Key topics extraction failed: {e}")
|
||
|
||
# Extract tasks
|
||
try:
|
||
taskPrompt = aiPrompts.buildTaskExtractionPrompt(messages)
|
||
taskResponse = await self._callAi("Du extrahierst Aufgaben aus Gesprächen.", taskPrompt)
|
||
if taskResponse and taskResponse.errorCount == 0:
|
||
extractedTasks = aiPrompts.parseJsonResponse(taskResponse.content, [])
|
||
if isinstance(extractedTasks, list):
|
||
for taskData in extractedTasks[:3]:
|
||
if isinstance(taskData, dict) and taskData.get("title"):
|
||
newTask = CoachingTask(
|
||
contextId=contextId,
|
||
sessionId=sessionId,
|
||
userId=self.userId,
|
||
mandateId=self.mandateId,
|
||
title=taskData["title"],
|
||
description=taskData.get("description"),
|
||
priority=taskData.get("priority", "medium"),
|
||
).model_dump()
|
||
created = interface.createTask(newTask)
|
||
await emitSessionEvent(sessionId, "taskCreated", created)
|
||
except Exception as e:
|
||
logger.warning(f"Task extraction failed: {e}")
|
||
|
||
# Generate scores
|
||
try:
|
||
scorePrompt = aiPrompts.buildScoringPrompt(messages, context.get("category", "custom") if context else "custom")
|
||
scoreResponse = await self._callAi("Du bewertest Kommunikationskompetenz.", scorePrompt)
|
||
competenceScore = None
|
||
if scoreResponse and scoreResponse.errorCount == 0:
|
||
scores = aiPrompts.parseJsonResponse(scoreResponse.content, [])
|
||
if isinstance(scores, list):
|
||
scoreValues = []
|
||
for scoreData in scores:
|
||
if isinstance(scoreData, dict) and "dimension" in scoreData and "score" in scoreData:
|
||
newScore = CoachingScore(
|
||
contextId=contextId,
|
||
sessionId=sessionId,
|
||
userId=self.userId,
|
||
mandateId=self.mandateId,
|
||
dimension=scoreData["dimension"],
|
||
score=float(scoreData["score"]),
|
||
trend=scoreData.get("trend", "stable"),
|
||
evidence=scoreData.get("evidence"),
|
||
).model_dump()
|
||
interface.createScore(newScore)
|
||
scoreValues.append(float(scoreData["score"]))
|
||
await emitSessionEvent(sessionId, "scoreUpdate", scoreData)
|
||
if scoreValues:
|
||
competenceScore = sum(scoreValues) / len(scoreValues)
|
||
except Exception as e:
|
||
logger.warning(f"Scoring failed: {e}")
|
||
competenceScore = None
|
||
|
||
# Generate insights
|
||
try:
|
||
insightPrompt = aiPrompts.buildInsightPrompt(messages, summary)
|
||
insightResponse = await self._callAi("Du generierst kurze Coaching-Insights.", insightPrompt)
|
||
if insightResponse and insightResponse.errorCount == 0 and insightResponse.content:
|
||
insights = aiPrompts.parseJsonResponse(insightResponse.content, [])
|
||
if isinstance(insights, list):
|
||
existingInsights = aiPrompts._parseJsonField(context.get("insights") if context else None, [])
|
||
for ins in insights[:3]:
|
||
insightText = ins.get("text", ins) if isinstance(ins, dict) else str(ins)
|
||
if insightText:
|
||
existingInsights.append({"text": insightText, "sessionId": sessionId, "createdAt": getIsoTimestamp()})
|
||
await emitSessionEvent(sessionId, "insightGenerated", {"text": insightText, "sessionId": sessionId})
|
||
if contextId and existingInsights:
|
||
interface.updateContext(contextId, {"insights": json.dumps(existingInsights[-10:])})
|
||
except Exception as e:
|
||
logger.warning(f"Insight generation failed: {e}")
|
||
|
||
# Index session data for RAG-based long-term memory
|
||
try:
|
||
from .serviceCommcoachIndexer import indexSessionData
|
||
from modules.serviceCenter import getService
|
||
from modules.serviceCenter.context import ServiceCenterContext
|
||
|
||
serviceContext = ServiceCenterContext(
|
||
user=self.currentUser,
|
||
mandate_id=self.mandateId,
|
||
feature_instance_id=self.instanceId,
|
||
)
|
||
knowledgeService = getService("knowledge", serviceContext)
|
||
parsedGoals = aiPrompts._parseJsonField(context.get("goals") if context else None, [])
|
||
parsedInsights = aiPrompts._parseJsonField(context.get("insights") if context else None, [])
|
||
allTasks = interface.getTasks(contextId, self.userId)
|
||
|
||
await indexSessionData(
|
||
sessionId=sessionId,
|
||
contextId=contextId,
|
||
userId=self.userId,
|
||
featureInstanceId=self.instanceId,
|
||
mandateId=self.mandateId,
|
||
messages=messages,
|
||
summary=summary,
|
||
keyTopics=keyTopics,
|
||
goals=parsedGoals,
|
||
insights=parsedInsights,
|
||
tasks=allTasks,
|
||
contextTitle=context.get("title", "Coaching") if context else "Coaching",
|
||
knowledgeService=knowledgeService,
|
||
)
|
||
except Exception as e:
|
||
logger.warning(f"Coaching session indexing failed (non-blocking): {e}")
|
||
|
||
# Calculate duration
|
||
startedAt = session.get("startedAt")
|
||
durationSeconds = 0
|
||
if startedAt:
|
||
from datetime import datetime, timezone
|
||
start = datetime.fromtimestamp(startedAt, tz=timezone.utc)
|
||
end = datetime.now(timezone.utc)
|
||
durationSeconds = int((end - start).total_seconds())
|
||
|
||
# Update session - clear compressed history so it never leaks into new sessions
|
||
sessionUpdates = {
|
||
"status": CoachingSessionStatus.COMPLETED.value,
|
||
"endedAt": getUtcTimestamp(),
|
||
"summary": summary,
|
||
"durationSeconds": durationSeconds,
|
||
"messageCount": len(messages),
|
||
"compressedHistorySummary": None,
|
||
"compressedHistoryUpToMessageCount": None,
|
||
}
|
||
if competenceScore is not None:
|
||
sessionUpdates["competenceScore"] = round(competenceScore, 1)
|
||
if keyTopics is not None:
|
||
sessionUpdates["keyTopics"] = keyTopics
|
||
interface.updateSession(sessionId, sessionUpdates)
|
||
|
||
# Update context stats
|
||
if contextId:
|
||
allSessions = interface.getSessions(contextId, self.userId)
|
||
completedCount = len([s for s in allSessions if s.get("status") == CoachingSessionStatus.COMPLETED.value])
|
||
interface.updateContext(contextId, {
|
||
"sessionCount": completedCount,
|
||
"lastSessionAt": getUtcTimestamp(),
|
||
})
|
||
|
||
# Update user profile streak
|
||
self._updateStreak(interface)
|
||
|
||
# Check and award badges
|
||
try:
|
||
from .serviceCommcoachGamification import checkAndAwardBadges
|
||
updatedSession = interface.getSession(sessionId)
|
||
newBadges = await checkAndAwardBadges(
|
||
interface, self.userId, self.mandateId, self.instanceId, session=updatedSession
|
||
)
|
||
for badge in newBadges:
|
||
await emitSessionEvent(sessionId, "badgeAwarded", badge)
|
||
except Exception as e:
|
||
logger.warning(f"Badge check failed: {e}")
|
||
|
||
# Send email summary
|
||
if summary:
|
||
contextTitle = context.get("title", "Coaching") if context else "Coaching"
|
||
await self._sendSessionEmail(session, summary, emailData, contextTitle, interface)
|
||
|
||
await emitSessionEvent(sessionId, "sessionState", {
|
||
"status": "completed",
|
||
"summary": summary,
|
||
"competenceScore": competenceScore,
|
||
})
|
||
await emitSessionEvent(sessionId, "complete", {})
|
||
|
||
return interface.getSession(sessionId)
|
||
|
||
def _updateStreak(self, interface):
|
||
"""Update the user's streak in their profile."""
|
||
try:
|
||
profile = interface.getProfile(self.userId, self.instanceId)
|
||
if not profile:
|
||
profile = interface.getOrCreateProfile(self.userId, self.mandateId, self.instanceId)
|
||
|
||
from datetime import datetime, timezone
|
||
|
||
lastSessionAt = profile.get("lastSessionAt")
|
||
currentStreak = profile.get("streakDays", 0)
|
||
longestStreak = profile.get("longestStreak", 0)
|
||
totalSessions = profile.get("totalSessions", 0)
|
||
|
||
today = datetime.now(timezone.utc).date()
|
||
isConsecutive = False
|
||
|
||
if lastSessionAt:
|
||
lastDate = datetime.fromtimestamp(lastSessionAt, tz=timezone.utc).date()
|
||
diff = (today - lastDate).days
|
||
if diff == 1:
|
||
isConsecutive = True
|
||
elif diff == 0:
|
||
isConsecutive = True
|
||
|
||
newStreak = (currentStreak + 1) if isConsecutive else 1
|
||
newLongest = max(longestStreak, newStreak)
|
||
|
||
interface.updateProfile(profile.get("id"), {
|
||
"streakDays": newStreak,
|
||
"longestStreak": newLongest,
|
||
"totalSessions": totalSessions + 1,
|
||
"lastSessionAt": getUtcTimestamp(),
|
||
})
|
||
except Exception as e:
|
||
logger.warning(f"Failed to update streak: {e}")
|
||
|
||
async def _sendSessionEmail(
|
||
self,
|
||
session: Dict[str, Any],
|
||
summary: str,
|
||
emailData: Optional[Dict[str, Any]],
|
||
contextTitle: str,
|
||
interface,
|
||
):
|
||
"""Send session summary via email with the standard PowerOn layout."""
|
||
try:
|
||
profile = interface.getProfile(self.userId, self.instanceId)
|
||
if profile and not profile.get("emailSummaryEnabled", True):
|
||
return
|
||
|
||
from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface
|
||
from modules.interfaces.interfaceDbApp import getRootInterface
|
||
from modules.shared.notifyMandateAdmins import renderHtmlEmail, resolveMandateName
|
||
|
||
rootInterface = getRootInterface()
|
||
user = rootInterface.getUser(self.userId)
|
||
if not user or not user.email:
|
||
return
|
||
|
||
messaging = getMessagingInterface()
|
||
subject = f"Coaching-Session Zusammenfassung: {contextTitle}"
|
||
mandateName = resolveMandateName(self.mandateId)
|
||
contentHtml = _buildSummaryEmailBlock(emailData, summary, contextTitle)
|
||
htmlMessage = renderHtmlEmail(
|
||
"Coaching-Session Zusammenfassung",
|
||
[
|
||
f'Thema: {contextTitle}',
|
||
"Hier ist die kompakte Zusammenfassung deiner abgeschlossenen Session.",
|
||
],
|
||
mandateName,
|
||
footerNote="Diese Zusammenfassung wurde automatisch aus deiner Coaching-Session erstellt.",
|
||
rawHtmlBlock=contentHtml,
|
||
)
|
||
|
||
messaging.send("email", user.email, subject, htmlMessage)
|
||
interface.updateSession(session.get("id"), {"emailSent": True})
|
||
logger.info(f"Session summary email sent to {user.email}")
|
||
except Exception as e:
|
||
logger.warning(f"Failed to send session email: {e}")
|
||
|
||
async def _buildRetrievalContext(
|
||
self,
|
||
contextId: str,
|
||
sessionId: str,
|
||
userContent: str,
|
||
context: Dict[str, Any],
|
||
interface,
|
||
) -> Dict[str, Any]:
|
||
"""
|
||
Build retrieval context based on user intent.
|
||
Returns: previousSessionSummaries, rollingOverview, retrievedSession, retrievedByTopic, intent, sessionSummaries.
|
||
"""
|
||
intent = detectIntent(userContent)
|
||
allSessions = interface.getSessions(contextId, self.userId)
|
||
completedSessions = [s for s in allSessions if s.get("status") == CoachingSessionStatus.COMPLETED.value]
|
||
|
||
for s in completedSessions:
|
||
startedAt = s.get("startedAt")
|
||
if startedAt:
|
||
from datetime import datetime, timezone
|
||
dt = datetime.fromtimestamp(startedAt, tz=timezone.utc)
|
||
s["date"] = dt.strftime("%d.%m.%Y")
|
||
else:
|
||
s["date"] = ""
|
||
|
||
result = {
|
||
"intent": intent,
|
||
"previousSessionSummaries": [],
|
||
"rollingOverview": None,
|
||
"retrievedSession": None,
|
||
"retrievedByTopic": None,
|
||
"sessionSummaries": [],
|
||
}
|
||
|
||
ctx = interface.getContext(contextId)
|
||
rollingOverview = ctx.get("rollingOverview") if ctx else None
|
||
rollingUpTo = ctx.get("rollingOverviewUpToSessionCount") if ctx else None
|
||
|
||
if intent == RetrievalIntent.SUMMARIZE_ALL:
|
||
result["previousSessionSummaries"] = buildSessionSummariesForPrompt(
|
||
allSessions, excludeSessionId=sessionId, limit=20
|
||
)
|
||
result["sessionSummaries"] = result["previousSessionSummaries"]
|
||
if len(completedSessions) >= ROLLING_OVERVIEW_SESSION_THRESHOLD and rollingOverview:
|
||
result["rollingOverview"] = rollingOverview
|
||
|
||
elif intent == RetrievalIntent.RECALL_SESSION:
|
||
targetDate = _parseDateFromMessage(userContent)
|
||
retrieved = findSessionByDate(completedSessions, targetDate)
|
||
if retrieved:
|
||
result["retrievedSession"] = retrieved
|
||
logger.info(f"Session recall: found session {retrieved.get('id')} for date {targetDate}")
|
||
result["previousSessionSummaries"] = buildSessionSummariesForPrompt(
|
||
allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT
|
||
)
|
||
if rollingOverview:
|
||
result["rollingOverview"] = rollingOverview
|
||
|
||
elif intent == RetrievalIntent.RECALL_TOPIC:
|
||
retrieved = list(searchSessionsByTopic(completedSessions, userContent))
|
||
queryVector = await self._embedUserQuery(userContent)
|
||
if queryVector:
|
||
ragHits = searchSessionsByTopicRag(
|
||
userContent,
|
||
self.userId,
|
||
self.instanceId,
|
||
mandateId=self.mandateId,
|
||
queryVector=queryVector,
|
||
)
|
||
for hit in ragHits:
|
||
content = (hit.get("content") or "").strip()
|
||
if not content:
|
||
continue
|
||
retrieved.append({
|
||
"summary": content[:450],
|
||
"date": "",
|
||
"source": "rag",
|
||
"ragSourceLabel": hit.get("fileName") or "Mandantenwissen",
|
||
})
|
||
result["retrievedByTopic"] = retrieved
|
||
if retrieved:
|
||
logger.info(f"Topic recall: {len(retrieved)} item(s) (sessions + optional RAG)")
|
||
result["previousSessionSummaries"] = buildSessionSummariesForPrompt(
|
||
allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT
|
||
)
|
||
if rollingOverview:
|
||
result["rollingOverview"] = rollingOverview
|
||
|
||
else:
|
||
result["previousSessionSummaries"] = buildSessionSummariesForPrompt(
|
||
allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT
|
||
)
|
||
if len(completedSessions) >= ROLLING_OVERVIEW_SESSION_THRESHOLD:
|
||
if rollingOverview and rollingUpTo is not None and rollingUpTo >= len(completedSessions) - 3:
|
||
result["rollingOverview"] = rollingOverview
|
||
else:
|
||
try:
|
||
toSummarize = completedSessions[ROLLING_OVERVIEW_EVERY_N_SESSIONS:]
|
||
toSummarize = toSummarize[:ROLLING_OVERVIEW_EVERY_N_SESSIONS * 2]
|
||
if len(toSummarize) >= ROLLING_OVERVIEW_EVERY_N_SESSIONS:
|
||
summariesForOverview = buildSessionSummariesForPrompt(
|
||
toSummarize, limit=len(toSummarize),
|
||
)
|
||
overviewPrompt = aiPrompts.buildRollingOverviewPrompt(
|
||
summariesForOverview, context.get("title", "Coaching")
|
||
)
|
||
overviewResponse = await self._callAi(
|
||
"Du fasst Coaching-Sessions kompakt zusammen.", overviewPrompt
|
||
)
|
||
if overviewResponse and overviewResponse.errorCount == 0 and overviewResponse.content:
|
||
newOverview = overviewResponse.content.strip()
|
||
interface.updateContext(contextId, {
|
||
"rollingOverview": newOverview,
|
||
"rollingOverviewUpToSessionCount": len(completedSessions),
|
||
})
|
||
result["rollingOverview"] = newOverview
|
||
logger.info(f"Context {contextId}: Rolling overview updated ({len(toSummarize)} sessions)")
|
||
except Exception as e:
|
||
logger.warning(f"Rolling overview failed for context {contextId}: {e}")
|
||
|
||
return result
|
||
|
||
async def _callAi(self, systemPrompt: str, userPrompt: str):
|
||
"""Call the AI service with the given prompts."""
|
||
from modules.serviceCenter import getService
|
||
from modules.serviceCenter.context import ServiceCenterContext
|
||
|
||
serviceContext = ServiceCenterContext(
|
||
user=self.currentUser,
|
||
mandate_id=self.mandateId,
|
||
feature_instance_id=self.instanceId,
|
||
)
|
||
aiService = getService("ai", serviceContext)
|
||
await aiService.ensureAiObjectsInitialized()
|
||
|
||
aiRequest = AiCallRequest(
|
||
prompt=userPrompt,
|
||
context=systemPrompt,
|
||
options=AiCallOptions(
|
||
operationType=OperationTypeEnum.DATA_ANALYSE,
|
||
priority=PriorityEnum.QUALITY,
|
||
)
|
||
)
|
||
return await aiService.callAi(aiRequest)
|
||
|
||
async def _embedUserQuery(self, text: str) -> Optional[List[float]]:
|
||
"""Embedding for mandate-wide RAG (same ServiceCenter AI service as coaching calls)."""
|
||
snippet = (text or "").strip()[:2000]
|
||
if not snippet:
|
||
return None
|
||
from modules.serviceCenter import getService
|
||
from modules.serviceCenter.context import ServiceCenterContext
|
||
|
||
serviceContext = ServiceCenterContext(
|
||
user=self.currentUser,
|
||
mandate_id=self.mandateId,
|
||
feature_instance_id=self.instanceId,
|
||
)
|
||
aiService = getService("ai", serviceContext)
|
||
await aiService.ensureAiObjectsInitialized()
|
||
try:
|
||
response = await aiService.callEmbedding([snippet])
|
||
except Exception as e:
|
||
logger.warning(f"CommCoach RAG embedding failed: {e}")
|
||
return None
|
||
if not response or response.errorCount > 0:
|
||
return None
|
||
embs = (response.metadata or {}).get("embeddings") or []
|
||
vec = embs[0] if embs else None
|
||
if isinstance(vec, list) and len(vec) > 0:
|
||
return vec
|
||
return None
|