Merge pull request #102 from valueonag/feat/commcoach

Feat/commcoach
This commit is contained in:
Patrick Motsch 2026-03-06 12:49:24 +01:00 committed by GitHub
commit 42e79a724a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
49 changed files with 2055 additions and 1037 deletions

View file

@ -103,6 +103,7 @@ class CoachingSession(BaseModel):
mandateId: str = Field(description="Mandate ID")
instanceId: str = Field(description="Feature instance ID")
status: CoachingSessionStatus = Field(default=CoachingSessionStatus.ACTIVE)
personaId: Optional[str] = Field(default=None, description="FK to CoachingPersona (Iteration 2)")
summary: Optional[str] = Field(default=None, description="AI-generated session summary")
coachNotes: Optional[str] = Field(default=None, description="JSON: AI internal notes for continuity")
compressedHistorySummary: Optional[str] = Field(default=None, description="AI summary of older messages for long sessions")
@ -183,6 +184,62 @@ class CoachingUserProfile(BaseModel):
updatedAt: Optional[str] = Field(default=None)
# ============================================================================
# Iteration 2: Personas
# ============================================================================
class CoachingPersona(BaseModel):
"""A roleplay persona for coaching sessions."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
userId: str = Field(description="Owner user ID ('system' for builtins)")
mandateId: Optional[str] = Field(default=None)
instanceId: Optional[str] = Field(default=None)
key: str = Field(description="Unique key, e.g. 'critical_cfo_f'")
label: str = Field(description="Display label, e.g. 'Kritische CFO'")
description: str = Field(description="Detailed role description for the AI")
systemPromptOverride: Optional[str] = Field(default=None, description="Full system prompt override for this persona")
gender: Optional[str] = Field(default=None, description="m or f")
category: str = Field(default="builtin", description="'builtin' or 'custom'")
isActive: bool = Field(default=True)
createdAt: Optional[str] = Field(default=None)
updatedAt: Optional[str] = Field(default=None)
# ============================================================================
# Iteration 2: Documents
# ============================================================================
class CoachingDocument(BaseModel):
"""A document attached to a coaching context."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
contextId: str = Field(description="FK to CoachingContext")
userId: str = Field(description="Owner user ID")
mandateId: str = Field(description="Mandate ID")
instanceId: Optional[str] = Field(default=None)
fileName: str = Field(description="Original file name")
mimeType: str = Field(default="application/octet-stream")
fileSize: int = Field(default=0)
extractedText: Optional[str] = Field(default=None, description="Text content extracted from file")
summary: Optional[str] = Field(default=None, description="AI-generated summary")
fileRef: Optional[str] = Field(default=None, description="Reference to file in storage")
createdAt: Optional[str] = Field(default=None)
# ============================================================================
# Iteration 2: Badges / Gamification
# ============================================================================
class CoachingBadge(BaseModel):
"""An achievement badge awarded to a user."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
userId: str = Field(description="Owner user ID")
mandateId: str = Field(description="Mandate ID")
instanceId: str = Field(description="Feature instance ID")
badgeKey: str = Field(description="Badge identifier, e.g. 'streak_7'")
awardedAt: Optional[str] = Field(default=None)
createdAt: Optional[str] = Field(default=None)
# ============================================================================
# API Request/Response Models
# ============================================================================
@ -232,6 +289,25 @@ class UpdateProfileRequest(BaseModel):
emailSummaryEnabled: Optional[bool] = None
class StartSessionRequest(BaseModel):
personaId: Optional[str] = None
class CreatePersonaRequest(BaseModel):
label: str
description: str
gender: Optional[str] = None
systemPromptOverride: Optional[str] = None
class UpdatePersonaRequest(BaseModel):
label: Optional[str] = None
description: Optional[str] = None
gender: Optional[str] = None
systemPromptOverride: Optional[str] = None
isActive: Optional[bool] = None
class DashboardData(BaseModel):
"""Aggregated dashboard data for the user."""
totalContexts: int = 0

View file

@ -5,6 +5,7 @@ Interface to CommCoach database.
Uses the PostgreSQL connector for data access with strict user ownership.
"""
import json
import logging
from typing import Dict, Any, List, Optional
@ -237,6 +238,98 @@ class CommcoachObjects:
count += 1
return count
# =========================================================================
# Personas
# =========================================================================
def getPersonas(self, userId: str, instanceId: str) -> List[Dict[str, Any]]:
from .datamodelCommcoach import CoachingPersona
builtins = self.db.getRecordset(CoachingPersona, recordFilter={"userId": "system"})
custom = self.db.getRecordset(CoachingPersona, recordFilter={"userId": userId, "instanceId": instanceId})
all = builtins + custom
return [p for p in all if p.get("isActive", True)]
def getPersona(self, personaId: str) -> Optional[Dict[str, Any]]:
from .datamodelCommcoach import CoachingPersona
records = self.db.getRecordset(CoachingPersona, recordFilter={"id": personaId})
return records[0] if records else None
def createPersona(self, data: Dict[str, Any]) -> Dict[str, Any]:
from .datamodelCommcoach import CoachingPersona
data["createdAt"] = getIsoTimestamp()
data["updatedAt"] = getIsoTimestamp()
return self.db.recordCreate(CoachingPersona, data)
def updatePersona(self, personaId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
from .datamodelCommcoach import CoachingPersona
updates["updatedAt"] = getIsoTimestamp()
return self.db.recordModify(CoachingPersona, personaId, updates)
def deletePersona(self, personaId: str) -> bool:
from .datamodelCommcoach import CoachingPersona
return self.db.recordDelete(CoachingPersona, personaId)
# =========================================================================
# Documents
# =========================================================================
def getDocuments(self, contextId: str, userId: str) -> List[Dict[str, Any]]:
from .datamodelCommcoach import CoachingDocument
records = self.db.getRecordset(CoachingDocument, recordFilter={"contextId": contextId, "userId": userId})
records.sort(key=lambda r: r.get("createdAt") or "", reverse=True)
return records
def getDocument(self, documentId: str) -> Optional[Dict[str, Any]]:
from .datamodelCommcoach import CoachingDocument
records = self.db.getRecordset(CoachingDocument, recordFilter={"id": documentId})
return records[0] if records else None
def createDocument(self, data: Dict[str, Any]) -> Dict[str, Any]:
from .datamodelCommcoach import CoachingDocument
data["createdAt"] = getIsoTimestamp()
return self.db.recordCreate(CoachingDocument, data)
def deleteDocument(self, documentId: str) -> bool:
from .datamodelCommcoach import CoachingDocument
return self.db.recordDelete(CoachingDocument, documentId)
# =========================================================================
# Badges
# =========================================================================
def getBadges(self, userId: str, instanceId: str) -> List[Dict[str, Any]]:
from .datamodelCommcoach import CoachingBadge
records = self.db.getRecordset(CoachingBadge, recordFilter={"userId": userId, "instanceId": instanceId})
records.sort(key=lambda r: r.get("awardedAt") or "", reverse=True)
return records
def hasBadge(self, userId: str, instanceId: str, badgeKey: str) -> bool:
from .datamodelCommcoach import CoachingBadge
records = self.db.getRecordset(CoachingBadge, recordFilter={"userId": userId, "instanceId": instanceId, "badgeKey": badgeKey})
return len(records) > 0
def awardBadge(self, data: Dict[str, Any]) -> Dict[str, Any]:
from .datamodelCommcoach import CoachingBadge
data["awardedAt"] = getIsoTimestamp()
data["createdAt"] = getIsoTimestamp()
return self.db.recordCreate(CoachingBadge, data)
# =========================================================================
# Score History
# =========================================================================
def getScoreHistory(self, contextId: str, userId: str) -> Dict[str, List[Dict[str, Any]]]:
scores = self.getScores(contextId, userId)
history: Dict[str, List[Dict[str, Any]]] = {}
for s in scores:
dim = s.get("dimension", "unknown")
if dim not in history:
history[dim] = []
history[dim].append({"score": s.get("score"), "trend": s.get("trend"), "evidence": s.get("evidence"), "createdAt": s.get("createdAt"), "sessionId": s.get("sessionId")})
for dim in history:
history[dim].sort(key=lambda x: x.get("createdAt") or "")
return history
# =========================================================================
# User Profile
# =========================================================================
@ -292,14 +385,23 @@ class CommcoachObjects:
contextSummaries = []
for ctx in activeContexts:
goalProgress = _calcGoalProgress(ctx.get("goals"))
contextSummaries.append({
"id": ctx.get("id"),
"title": ctx.get("title"),
"category": ctx.get("category"),
"sessionCount": ctx.get("sessionCount", 0),
"lastSessionAt": ctx.get("lastSessionAt"),
"goalProgress": goalProgress,
})
allGoalProgress = []
for ctx in activeContexts:
gp = _calcGoalProgress(ctx.get("goals"))
if gp is not None:
allGoalProgress.append(gp)
overallGoalProgress = round(sum(allGoalProgress) / len(allGoalProgress)) if allGoalProgress else None
return {
"totalContexts": len(contexts),
"activeContexts": len(activeContexts),
@ -312,4 +414,31 @@ class CommcoachObjects:
"openTasks": self.getOpenTaskCount(userId, instanceId),
"completedTasks": self.getCompletedTaskCount(userId, instanceId),
"contexts": contextSummaries,
"goalProgress": overallGoalProgress,
"badges": self.getBadges(userId, instanceId),
"level": _calcLevel(profile.get("totalSessions", 0) if profile else 0),
}
def _calcGoalProgress(goalsRaw) -> Optional[int]:
"""Calculate goal completion percentage from a context's goals JSON field."""
if not goalsRaw:
return None
goals = goalsRaw
if isinstance(goalsRaw, str):
try:
goals = json.loads(goalsRaw)
except (json.JSONDecodeError, TypeError):
return None
if not isinstance(goals, list) or len(goals) == 0:
return None
done = sum(1 for g in goals if isinstance(g, dict) and g.get("status") in ("done", "completed"))
return round(done / len(goals) * 100)
def _calcLevel(totalSessions: int) -> Dict[str, Any]:
levels = [(50, 5, "Meister"), (25, 4, "Experte"), (10, 3, "Fortgeschritten"), (3, 2, "Engagiert")]
for threshold, number, label in levels:
if totalSessions >= threshold:
return {"number": number, "label": label, "totalSessions": totalSessions}
return {"number": 1, "label": "Einsteiger", "totalSessions": totalSessions}

View file

@ -22,14 +22,9 @@ UI_OBJECTS = [
},
{
"objectKey": "ui.feature.commcoach.coaching",
"label": {"en": "Coaching", "de": "Coaching", "fr": "Coaching"},
"label": {"en": "Coaching & Dossier", "de": "Coaching & Dossier", "fr": "Coaching & Dossier"},
"meta": {"area": "coaching"}
},
{
"objectKey": "ui.feature.commcoach.dossier",
"label": {"en": "Dossier", "de": "Dossier", "fr": "Dossier"},
"meta": {"area": "dossier"}
},
{
"objectKey": "ui.feature.commcoach.settings",
"label": {"en": "Settings", "de": "Einstellungen", "fr": "Parametres"},
@ -68,6 +63,21 @@ DATA_OBJECTS = [
"label": {"en": "User Profile", "de": "Benutzerprofil", "fr": "Profil utilisateur"},
"meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "preferredLanguage"]}
},
{
"objectKey": "data.feature.commcoach.CoachingPersona",
"label": {"en": "Coaching Persona", "de": "Coaching-Persona", "fr": "Persona coaching"},
"meta": {"table": "CoachingPersona", "fields": ["id", "key", "label", "gender"]}
},
{
"objectKey": "data.feature.commcoach.CoachingDocument",
"label": {"en": "Coaching Document", "de": "Coaching-Dokument", "fr": "Document coaching"},
"meta": {"table": "CoachingDocument", "fields": ["id", "contextId", "fileName"]}
},
{
"objectKey": "data.feature.commcoach.CoachingBadge",
"label": {"en": "Coaching Badge", "de": "Coaching-Auszeichnung", "fr": "Badge coaching"},
"meta": {"table": "CoachingBadge", "fields": ["id", "badgeKey", "awardedAt"]}
},
{
"objectKey": "data.feature.commcoach.*",
"label": {"en": "All CommCoach Data", "de": "Alle CommCoach-Daten", "fr": "Toutes les donnees CommCoach"},
@ -184,6 +194,8 @@ def registerFeature(catalogService) -> bool:
)
_syncTemplateRolesToDb()
_seedBuiltinPersonas()
_registerScheduler()
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI, {len(RESOURCE_OBJECTS)} resource, {len(DATA_OBJECTS)} data objects")
return True
@ -193,6 +205,29 @@ def registerFeature(catalogService) -> bool:
return False
def _seedBuiltinPersonas():
"""Seed builtin roleplay personas into the database."""
try:
from .serviceCommcoachPersonas import seedBuiltinPersonas
from .interfaceFeatureCommcoach import CommcoachInterface
from modules.interfaces.interfaceDbManagement import getInterface as getDbInterface
db = getDbInterface()
interface = CommcoachInterface(db)
seedBuiltinPersonas(interface)
except Exception as e:
logger.warning(f"CommCoach persona seeding failed (non-fatal): {e}")
def _registerScheduler():
"""Register CommCoach scheduled jobs (daily reminders)."""
try:
from modules.shared.eventManagement import eventManager
from .serviceCommcoachScheduler import registerScheduledJobs
registerScheduledJobs(eventManager)
except Exception as e:
logger.warning(f"CommCoach scheduler registration failed (non-fatal): {e}")
def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface

View file

@ -9,9 +9,10 @@ import logging
import json
import asyncio
import base64
import uuid
from typing import Optional
from fastapi import APIRouter, HTTPException, Depends, Request
from fastapi.responses import StreamingResponse
from fastapi.responses import StreamingResponse, Response
from modules.auth import limiter, getRequestContext, RequestContext
from modules.shared.timeUtils import getIsoTimestamp
@ -23,14 +24,33 @@ from .datamodelCommcoach import (
CoachingContext, CoachingContextStatus, CoachingSession, CoachingSessionStatus,
CoachingMessage, CoachingMessageRole, CoachingMessageContentType,
CoachingTask, CoachingTaskStatus,
CoachingPersona, CoachingDocument, CoachingBadge,
CreateContextRequest, UpdateContextRequest,
SendMessageRequest, CreateTaskRequest, UpdateTaskRequest, UpdateTaskStatusRequest,
UpdateProfileRequest,
StartSessionRequest, CreatePersonaRequest, UpdatePersonaRequest,
)
from .serviceCommcoach import CommcoachService, emitSessionEvent, getSessionEventQueue, cleanupSessionEvents
logger = logging.getLogger(__name__)
def _audit(context: RequestContext, action: str, resourceType: str = None, resourceId: str = None, details: str = ""):
"""Log an audit event for CommCoach. Non-blocking, best-effort."""
try:
from modules.shared.auditLogger import audit_logger
audit_logger.logEvent(
userId=str(context.user.id),
mandateId=str(context.mandateId) if context.mandateId else None,
category="commcoach",
action=action,
resourceType=resourceType,
resourceId=resourceId,
details=details,
)
except Exception:
pass
router = APIRouter(
prefix="/api/commcoach",
tags=["CommCoach"],
@ -116,6 +136,7 @@ async def createContext(
created = interface.createContext(contextData)
logger.info(f"CommCoach context created: {created.get('id')} for user {userId}")
_audit(context, "commcoach.context.created", "CoachingContext", created.get("id"), f"Title: {body.title}")
return {"context": created}
@ -208,6 +229,7 @@ async def archiveContext(
_validateOwnership(ctx, context)
updated = interface.updateContext(contextId, {"status": CoachingContextStatus.ARCHIVED.value})
_audit(context, "commcoach.context.archived", "CoachingContext", contextId)
return {"context": updated}
@ -262,6 +284,7 @@ async def startSession(
request: Request,
instanceId: str,
contextId: str,
personaId: Optional[str] = None,
context: RequestContext = Depends(getRequestContext),
):
"""Start a new coaching session or resume active one. Returns SSE stream with sessionState, messages, and complete."""
@ -339,6 +362,7 @@ async def startSession(
userId=userId,
mandateId=mandateId,
instanceId=instanceId,
personaId=personaId,
).model_dump()
created = interface.createSession(sessionData)
sessionId = created.get("id")
@ -369,6 +393,7 @@ async def startSession(
pass
logger.info(f"CommCoach session started (streaming): {sessionId} for context {contextId}")
_audit(context, "commcoach.session.started", "CoachingSession", sessionId, f"Context: {contextId}")
return StreamingResponse(
_newSessionEventGenerator(),
media_type="text/event-stream",
@ -419,6 +444,7 @@ async def completeSession(
service = CommcoachService(context.user, mandateId, instanceId)
result = await service.completeSession(sessionId, interface)
_audit(context, "commcoach.session.completed", "CoachingSession", sessionId)
return {"session": result}
@ -866,3 +892,349 @@ async def testVoice(
except Exception as e:
logger.error(f"Voice test failed: {e}")
raise HTTPException(status_code=500, detail=f"TTS test failed: {str(e)}")
# =========================================================================
# Export Endpoints (Iteration 2)
# =========================================================================
@router.get("/{instanceId}/contexts/{contextId}/export")
@limiter.limit("10/minute")
async def exportDossier(
request: Request,
instanceId: str,
contextId: str,
format: str = "md",
context: RequestContext = Depends(getRequestContext),
):
"""Export a dossier as Markdown or PDF."""
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
ctx = interface.getContext(contextId)
if not ctx:
raise HTTPException(status_code=404, detail="Context not found")
_validateOwnership(ctx, context)
tasks = interface.getTasks(contextId, userId)
scores = interface.getScores(contextId, userId)
sessions = interface.getSessions(contextId, userId)
from .serviceCommcoachExport import buildDossierMarkdown, renderDossierPdf
_audit(context, "commcoach.export.requested", "CoachingContext", contextId, f"format={format}")
if format == "pdf":
pdfBytes = await renderDossierPdf(ctx, sessions, tasks, scores)
if pdfBytes:
return Response(content=pdfBytes, media_type="application/pdf",
headers={"Content-Disposition": f'attachment; filename="dossier_{contextId[:8]}.pdf"'})
format = "md"
md = buildDossierMarkdown(ctx, sessions, tasks, scores)
return Response(content=md, media_type="text/markdown",
headers={"Content-Disposition": f'attachment; filename="dossier_{contextId[:8]}.md"'})
@router.get("/{instanceId}/sessions/{sessionId}/export")
@limiter.limit("10/minute")
async def exportSession(
request: Request,
instanceId: str,
sessionId: str,
format: str = "md",
context: RequestContext = Depends(getRequestContext),
):
"""Export a session as Markdown or PDF."""
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
session = interface.getSession(sessionId)
if not session:
raise HTTPException(status_code=404, detail="Session not found")
_validateOwnership(session, context)
contextId = session.get("contextId")
userId = str(context.user.id)
messages = interface.getMessages(sessionId)
tasks = interface.getTasks(contextId, userId) if contextId else []
scores = interface.getScores(contextId, userId) if contextId else []
from .serviceCommcoachExport import buildSessionMarkdown, renderSessionPdf
_audit(context, "commcoach.export.requested", "CoachingSession", sessionId, f"format={format}")
if format == "pdf":
pdfBytes = await renderSessionPdf(session, messages, tasks, scores)
if pdfBytes:
return Response(content=pdfBytes, media_type="application/pdf",
headers={"Content-Disposition": f'attachment; filename="session_{sessionId[:8]}.pdf"'})
format = "md"
md = buildSessionMarkdown(session, messages, tasks, scores)
return Response(content=md, media_type="text/markdown",
headers={"Content-Disposition": f'attachment; filename="session_{sessionId[:8]}.md"'})
# =========================================================================
# Persona Endpoints (Iteration 2)
# =========================================================================
@router.get("/{instanceId}/personas")
@limiter.limit("60/minute")
async def listPersonas(
request: Request,
instanceId: str,
context: RequestContext = Depends(getRequestContext),
):
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
personas = interface.getPersonas(userId, instanceId)
return {"personas": personas}
@router.post("/{instanceId}/personas")
@limiter.limit("10/minute")
async def createPersona(
request: Request,
instanceId: str,
body: CreatePersonaRequest,
context: RequestContext = Depends(getRequestContext),
):
mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
data = CoachingPersona(
userId=userId,
mandateId=mandateId,
instanceId=instanceId,
key=f"custom_{str(uuid.uuid4())[:8]}",
label=body.label,
description=body.description,
gender=body.gender,
systemPromptOverride=body.systemPromptOverride,
category="custom",
).model_dump()
created = interface.createPersona(data)
return {"persona": created}
@router.put("/{instanceId}/personas/{personaId}")
@limiter.limit("10/minute")
async def updatePersonaRoute(
request: Request,
instanceId: str,
personaId: str,
body: UpdatePersonaRequest,
context: RequestContext = Depends(getRequestContext),
):
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
persona = interface.getPersona(personaId)
if not persona:
raise HTTPException(status_code=404, detail="Persona not found")
if persona.get("category") == "builtin":
raise HTTPException(status_code=403, detail="Builtin personas cannot be edited")
_validateOwnership(persona, context)
updates = body.model_dump(exclude_none=True)
updated = interface.updatePersona(personaId, updates)
return {"persona": updated}
@router.delete("/{instanceId}/personas/{personaId}")
@limiter.limit("10/minute")
async def deletePersonaRoute(
request: Request,
instanceId: str,
personaId: str,
context: RequestContext = Depends(getRequestContext),
):
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
persona = interface.getPersona(personaId)
if not persona:
raise HTTPException(status_code=404, detail="Persona not found")
if persona.get("category") == "builtin":
raise HTTPException(status_code=403, detail="Builtin personas cannot be deleted")
_validateOwnership(persona, context)
interface.deletePersona(personaId)
return {"deleted": True}
# =========================================================================
# Document Endpoints (Iteration 2)
# =========================================================================
@router.get("/{instanceId}/contexts/{contextId}/documents")
@limiter.limit("60/minute")
async def listDocuments(
request: Request,
instanceId: str,
contextId: str,
context: RequestContext = Depends(getRequestContext),
):
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
docs = interface.getDocuments(contextId, userId)
return {"documents": docs}
@router.post("/{instanceId}/contexts/{contextId}/documents")
@limiter.limit("10/minute")
async def uploadDocument(
request: Request,
instanceId: str,
contextId: str,
context: RequestContext = Depends(getRequestContext),
):
"""Upload a document and bind it to a context. Stores file in Management DB."""
mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
ctx = interface.getContext(contextId)
if not ctx:
raise HTTPException(status_code=404, detail="Context not found")
_validateOwnership(ctx, context)
form = await request.form()
file = form.get("file")
if not file or not hasattr(file, "read"):
raise HTTPException(status_code=400, detail="No file uploaded")
content = await file.read()
fileName = getattr(file, "filename", "document")
mimeType = getattr(file, "content_type", "application/octet-stream")
fileSize = len(content)
if not content:
raise HTTPException(status_code=400, detail="Leere Datei hochgeladen")
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
mgmtInterface = interfaceDbManagement.getInterface(currentUser=context.user)
fileItem, _dupType = mgmtInterface.saveUploadedFile(content, fileName)
fileRef = fileItem.id
extractedText = _extractText(content, mimeType, fileName)
summary = None
if extractedText and len(extractedText.strip()) > 50:
try:
from .serviceCommcoach import CommcoachService
service = CommcoachService(context.user, mandateId, instanceId)
aiResp = await service._callAi(
"Du fasst Dokumente in 2-3 Saetzen zusammen.",
f"Fasse folgendes Dokument zusammen:\n\n{extractedText[:3000]}"
)
if aiResp and aiResp.errorCount == 0 and aiResp.content:
summary = aiResp.content.strip()
except Exception as e:
logger.warning(f"Document summary failed: {e}")
docData = CoachingDocument(
contextId=contextId,
userId=userId,
mandateId=mandateId,
instanceId=instanceId,
fileName=fileName,
mimeType=mimeType,
fileSize=fileSize,
extractedText=extractedText[:10000] if extractedText else None,
summary=summary,
fileRef=fileRef,
).model_dump()
created = interface.createDocument(docData)
return {"document": created}
@router.delete("/{instanceId}/documents/{documentId}")
@limiter.limit("10/minute")
async def deleteDocumentRoute(
request: Request,
instanceId: str,
documentId: str,
context: RequestContext = Depends(getRequestContext),
):
mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
doc = interface.getDocument(documentId)
if not doc:
raise HTTPException(status_code=404, detail="Document not found")
_validateOwnership(doc, context)
fileRef = doc.get("fileRef")
if fileRef:
try:
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
mgmtInterface = interfaceDbManagement.getInterface(
currentUser=context.user, mandateId=mandateId, featureInstanceId=instanceId
)
mgmtInterface.deleteFile(fileRef)
except Exception as e:
logger.warning(f"Failed to delete file {fileRef}: {e}")
interface.deleteDocument(documentId)
return {"deleted": True}
def _extractText(content: bytes, mimeType: str, fileName: str) -> Optional[str]:
"""Extract text from uploaded file content."""
try:
if mimeType == "text/plain" or fileName.endswith(".txt"):
return content.decode("utf-8", errors="replace")
if mimeType == "text/markdown" or fileName.endswith(".md"):
return content.decode("utf-8", errors="replace")
if "pdf" in mimeType or fileName.endswith(".pdf"):
try:
import io
from PyPDF2 import PdfReader
reader = PdfReader(io.BytesIO(content))
text = ""
for page in reader.pages:
text += page.extract_text() or ""
return text
except ImportError:
logger.warning("PyPDF2 not installed, cannot extract PDF text")
return None
except Exception as e:
logger.warning(f"Text extraction failed for {fileName}: {e}")
return None
# =========================================================================
# Badge + Score History Endpoints (Iteration 2)
# =========================================================================
@router.get("/{instanceId}/badges")
@limiter.limit("60/minute")
async def listBadges(
request: Request,
instanceId: str,
context: RequestContext = Depends(getRequestContext),
):
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
badges = interface.getBadges(userId, instanceId)
return {"badges": badges}
@router.get("/{instanceId}/contexts/{contextId}/scores/history")
@limiter.limit("60/minute")
async def getScoreHistory(
request: Request,
instanceId: str,
contextId: str,
context: RequestContext = Depends(getRequestContext),
):
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
userId = str(context.user.id)
history = interface.getScoreHistory(contextId, userId)
return {"history": history}

View file

@ -83,6 +83,147 @@ def cleanupSessionEvents(sessionId: str):
_sessionEvents.pop(sessionId, None)
CHUNK_WORD_SIZE = 4
CHUNK_DELAY_SECONDS = 0.05
def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]:
"""Parse the structured JSON response from AI. Strips optional markdown code fences."""
text = rawText.strip()
if text.startswith("```"):
lines = text.split("\n")
lines = lines[1:]
if lines and lines[-1].strip() == "```":
lines = lines[:-1]
text = "\n".join(lines)
try:
return json.loads(text)
except json.JSONDecodeError:
logger.warning(f"AI JSON parse failed, using raw text: {text[:200]}")
return {"text": rawText.strip(), "speech": "", "documents": []}
async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mandateId: str,
instanceId: str, interface):
"""Generate TTS audio from speech text and emit as SSE event."""
if not speechText:
return
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
import base64
voiceInterface = getVoiceInterface(currentUser, mandateId)
profile = interface.getProfile(str(currentUser.id), instanceId)
language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE"
voiceName = profile.get("preferredVoice") if profile else None
ttsResult = await voiceInterface.textToSpeech(
text=_stripMarkdownForTts(speechText),
languageCode=language,
voiceName=voiceName,
)
if ttsResult and isinstance(ttsResult, dict):
audioBytes = ttsResult.get("audioContent")
if audioBytes:
audioB64 = base64.b64encode(
audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
).decode()
await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
except Exception as e:
logger.warning(f"TTS failed for session {sessionId}: {e}")
async def _saveGeneratedDocument(doc: Dict[str, Any], contextId: str, userId: str,
mandateId: str, instanceId: str, interface, sessionId: str,
user=None):
"""Save a document generated by AI. Stores file in Management DB."""
from .datamodelCommcoach import CoachingDocument
try:
title = doc.get("title", "Dokument")
content = doc.get("content", "")
contentBytes = content.encode("utf-8")
fileName = f"{title}.md"
fileRef = None
try:
import modules.interfaces.interfaceDbManagement as interfaceDbManagement
mgmtInterface = interfaceDbManagement.getInterface(
currentUser=user, mandateId=mandateId, featureInstanceId=instanceId
)
fileItem = mgmtInterface.createFile(name=fileName, mimeType="text/markdown", content=contentBytes)
mgmtInterface.createFileData(fileItem.id, contentBytes)
fileRef = fileItem.id
except Exception as e:
logger.warning(f"Failed to store generated document in file DB: {e}")
docData = CoachingDocument(
contextId=contextId,
userId=userId,
mandateId=mandateId,
instanceId=instanceId,
fileName=fileName,
mimeType="text/markdown",
fileSize=len(contentBytes),
extractedText=content,
summary=title,
fileRef=fileRef,
).model_dump()
created = interface.createDocument(docData)
await emitSessionEvent(sessionId, "documentCreated", created)
except Exception as e:
logger.warning(f"Failed to save generated document: {e}")
async def _emitChunkedResponse(sessionId: str, createdMsg: Dict[str, Any], fullText: str):
"""Emit response as messageChunk events for progressive display, then the full message."""
msgId = createdMsg.get("id")
words = fullText.split()
emitted = ""
for i in range(0, len(words), CHUNK_WORD_SIZE):
chunk = " ".join(words[i:i + CHUNK_WORD_SIZE])
emitted = (emitted + " " + chunk).strip() if emitted else chunk
await emitSessionEvent(sessionId, "messageChunk", {
"id": msgId,
"role": "assistant",
"chunk": chunk,
"accumulated": emitted,
})
await asyncio.sleep(CHUNK_DELAY_SECONDS)
await emitSessionEvent(sessionId, "message", {
"id": msgId,
"role": "assistant",
"content": fullText,
"createdAt": createdMsg.get("createdAt"),
})
def _resolvePersona(session: Optional[Dict[str, Any]], interface) -> Optional[Dict[str, Any]]:
"""Resolve persona data from session's personaId."""
if not session:
return None
personaId = session.get("personaId")
if not personaId:
return None
try:
return interface.getPersona(personaId)
except Exception:
return None
def _getDocumentSummaries(contextId: str, userId: str, interface) -> Optional[List[str]]:
"""Get document summaries for context to include in the AI prompt."""
try:
docs = interface.getDocuments(contextId, userId)
summaries = []
for doc in docs[:5]:
summary = doc.get("summary")
if summary:
summaries.append(f"[{doc.get('fileName', 'Dokument')}] {summary}")
elif doc.get("extractedText"):
summaries.append(f"[{doc.get('fileName', 'Dokument')}] {doc['extractedText'][:200]}...")
return summaries if summaries else None
except Exception:
return None
class CommcoachService:
"""Coaching orchestrator: processes messages, calls AI, extracts tasks and scores."""
@ -143,7 +284,7 @@ class CommcoachService:
try:
summaryPrompt = aiPrompts.buildEarlierConversationSummaryPrompt(toSummarize)
summaryResponse = await self._callAi(
"Du fasst Coaching-Gespraeche praezise zusammen.", summaryPrompt
"Du fasst Coaching-Gespräche präzise zusammen.", summaryPrompt
)
if summaryResponse and summaryResponse.errorCount == 0 and summaryResponse.content:
earlierSummary = summaryResponse.content.strip()
@ -163,6 +304,9 @@ class CommcoachService:
contextId, sessionId, userContent, context, interface
)
persona = _resolvePersona(session, interface)
documentSummaries = _getDocumentSummaries(contextId, self.userId, interface)
systemPrompt = aiPrompts.buildCoachingSystemPrompt(
context,
previousMessages,
@ -172,10 +316,12 @@ class CommcoachService:
rollingOverview=retrievalResult.get("rollingOverview"),
retrievedSession=retrievalResult.get("retrievedSession"),
retrievedByTopic=retrievalResult.get("retrievedByTopic"),
persona=persona,
documentSummaries=documentSummaries,
)
if retrievalResult.get("intent") == RetrievalIntent.SUMMARIZE_ALL:
systemPrompt += "\n\nWICHTIG: Der Benutzer moechte eine Gesamtzusammenfassung. Erstelle eine umfassende Zusammenfassung aller genannten Sessions und der aktuellen Session."
systemPrompt += "\n\nWICHTIG: Der Benutzer möchte eine Gesamtzusammenfassung. Erstelle eine umfassende Zusammenfassung aller genannten Sessions und der aktuellen Session."
# Call AI
await emitSessionEvent(sessionId, "status", {"label": "Coach denkt nach..."})
@ -187,52 +333,38 @@ class CommcoachService:
await emitSessionEvent(sessionId, "error", {"message": f"AI error: {str(e)}"})
return createdUserMsg
responseText = aiResponse.content.strip() if aiResponse and aiResponse.errorCount == 0 else "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut."
responseRaw = aiResponse.content.strip() if aiResponse and aiResponse.errorCount == 0 else ""
if not responseRaw:
parsed = {"text": "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut.", "speech": "", "documents": []}
else:
parsed = _parseAiJsonResponse(responseRaw)
textContent = parsed.get("text", "")
speechContent = parsed.get("speech", "")
documents = parsed.get("documents", [])
for doc in documents:
await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser)
# Store assistant message
assistantMsg = CoachingMessage(
sessionId=sessionId,
contextId=contextId,
userId=self.userId,
role=CoachingMessageRole.ASSISTANT,
content=responseText,
content=textContent,
contentType=CoachingMessageContentType.TEXT,
).model_dump()
createdAssistantMsg = interface.createMessage(assistantMsg)
# Update session message count
messages = interface.getMessages(sessionId)
interface.updateSession(sessionId, {"messageCount": len(messages)})
await emitSessionEvent(sessionId, "message", {
"id": createdAssistantMsg.get("id"),
"role": "assistant",
"content": responseText,
"createdAt": createdAssistantMsg.get("createdAt"),
})
if responseText:
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
import base64
voiceInterface = getVoiceInterface(self.currentUser, self.mandateId)
profile = interface.getProfile(self.userId, self.instanceId)
language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE"
voiceName = profile.get("preferredVoice") if profile else None
ttsResult = await voiceInterface.textToSpeech(
text=_stripMarkdownForTts(responseText),
languageCode=language,
voiceName=voiceName,
)
if ttsResult and isinstance(ttsResult, dict):
audioBytes = ttsResult.get("audioContent")
if audioBytes:
audioB64 = base64.b64encode(
audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
).decode()
await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
except Exception as e:
logger.warning(f"TTS failed for text message session {sessionId}: {e}")
ttsTask = asyncio.create_task(
_generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface)
)
await _emitChunkedResponse(sessionId, createdAssistantMsg, textContent)
await ttsTask
await emitSessionEvent(sessionId, "complete", {})
return createdAssistantMsg
@ -259,10 +391,26 @@ class CommcoachService:
allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT
)
session = interface.getSession(sessionId)
persona = _resolvePersona(session, interface)
documentSummaries = _getDocumentSummaries(contextId, self.userId, interface)
systemPrompt = aiPrompts.buildCoachingSystemPrompt(
context, previousMessages, tasks, previousSessionSummaries=previousSessionSummaries
context, previousMessages, tasks,
previousSessionSummaries=previousSessionSummaries,
persona=persona,
documentSummaries=documentSummaries,
)
openingUserPrompt = "Beginne die Coaching-Session mit einer kurzen Begruesssung, fasse in einem Satz zusammen wo wir stehen (falls vorherige Sessions), und stelle eine gezielte Einstiegsfrage zum Thema."
isFirstSession = not previousSessionSummaries or len(previousSessionSummaries) == 0
if persona and persona.get("key") != "coach":
personaLabel = persona.get("label", "Gesprächspartner")
openingUserPrompt = f"Beginne das Gespräch in deiner Rolle als {personaLabel}. Stelle dich kurz vor und eröffne die Situation gemäss deiner Rollenbeschreibung."
elif isFirstSession:
openingUserPrompt = "Dies ist die ERSTE Session zu diesem Thema. Begrüsse den Benutzer, stelle das Thema kurz vor und stelle eine offene Einstiegsfrage. Erfinde KEINE vorherigen Gespräche oder Zusammenfassungen."
else:
openingUserPrompt = "Begrüsse den Benutzer zurück, fasse in einem Satz zusammen wo wir stehen, und stelle eine gezielte Einstiegsfrage."
try:
aiResponse = await self._callAi(systemPrompt, openingUserPrompt)
@ -272,54 +420,41 @@ class CommcoachService:
await emitSessionEvent(sessionId, "complete", {})
return {}
openingContent = (
responseRaw = (
aiResponse.content.strip()
if aiResponse and aiResponse.errorCount == 0
else f"Willkommen zur Coaching-Session zum Thema \"{context.get('title')}\". Was moechtest du heute besprechen?"
else ""
)
if not responseRaw:
parsed = {"text": f"Willkommen zur Coaching-Session zum Thema \"{context.get('title')}\". Was möchtest du heute besprechen?", "speech": "", "documents": []}
else:
parsed = _parseAiJsonResponse(responseRaw)
textContent = parsed.get("text", "")
speechContent = parsed.get("speech", "")
documents = parsed.get("documents", [])
for doc in documents:
await _saveGeneratedDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser)
assistantMsg = CoachingMessage(
sessionId=sessionId,
contextId=contextId,
userId=self.userId,
role=CoachingMessageRole.ASSISTANT,
content=openingContent,
content=textContent,
contentType=CoachingMessageContentType.TEXT,
).model_dump()
createdMsg = interface.createMessage(assistantMsg)
interface.updateSession(sessionId, {"messageCount": 1})
await emitSessionEvent(sessionId, "message", {
"id": createdMsg.get("id"),
"sessionId": sessionId,
"contextId": contextId,
"role": "assistant",
"content": openingContent,
"contentType": "text",
"createdAt": createdMsg.get("createdAt"),
})
if openingContent:
try:
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
import base64
voiceInterface = getVoiceInterface(self.currentUser, self.mandateId)
profile = interface.getProfile(self.userId, self.instanceId)
language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE"
voiceName = profile.get("preferredVoice") if profile else None
ttsResult = await voiceInterface.textToSpeech(
text=_stripMarkdownForTts(openingContent),
languageCode=language,
voiceName=voiceName,
)
if ttsResult and isinstance(ttsResult, dict):
audioBytes = ttsResult.get("audioContent")
if audioBytes:
audioB64 = base64.b64encode(
audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
).decode()
await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"})
except Exception as e:
logger.warning(f"TTS failed for opening: {e}")
ttsTask = asyncio.create_task(
_generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface)
)
await _emitChunkedResponse(sessionId, createdMsg, textContent)
await ttsTask
await emitSessionEvent(sessionId, "complete", {})
logger.info(f"CommCoach session opening completed: {sessionId}")
@ -365,36 +500,7 @@ class CommcoachService:
await emitSessionEvent(sessionId, "error", {"message": msg, "detail": sttError})
return {}
# Process through normal pipeline
result = await self.processMessage(sessionId, contextId, transcribedText, interface)
# Generate TTS for the response
assistantContent = result.get("content", "")
if assistantContent:
await emitSessionEvent(sessionId, "status", {"label": "Antwort wird gesprochen..."})
try:
profile = interface.getProfile(self.userId, self.instanceId)
voiceName = profile.get("preferredVoice") if profile else None
ttsResult = await voiceInterface.textToSpeech(
text=_stripMarkdownForTts(assistantContent),
languageCode=language,
voiceName=voiceName,
)
if ttsResult and isinstance(ttsResult, dict):
import base64
audioBytes = ttsResult.get("audioContent")
if audioBytes:
audioB64 = base64.b64encode(
audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode()
).decode()
await emitSessionEvent(sessionId, "ttsAudio", {
"audio": audioB64,
"format": "mp3",
})
except Exception as e:
logger.warning(f"TTS failed for session {sessionId}: {e}")
return result
async def completeSession(self, sessionId: str, interface) -> Dict[str, Any]:
@ -424,7 +530,7 @@ class CommcoachService:
# Generate summary
try:
summaryPrompt = aiPrompts.buildSummaryPrompt(messages, context.get("title", "Coaching"))
summaryResponse = await self._callAi("Du bist ein praeziser Zusammenfasser.", summaryPrompt)
summaryResponse = await self._callAi("Du bist ein präziser Zusammenfasser.", summaryPrompt)
summary = summaryResponse.content.strip() if summaryResponse and summaryResponse.errorCount == 0 else None
except Exception as e:
logger.warning(f"Summary generation failed: {e}")
@ -447,7 +553,7 @@ class CommcoachService:
# Extract tasks
try:
taskPrompt = aiPrompts.buildTaskExtractionPrompt(messages)
taskResponse = await self._callAi("Du extrahierst Aufgaben aus Gespraechen.", taskPrompt)
taskResponse = await self._callAi("Du extrahierst Aufgaben aus Gesprächen.", taskPrompt)
if taskResponse and taskResponse.errorCount == 0:
extractedTasks = aiPrompts.parseJsonResponse(taskResponse.content, [])
if isinstance(extractedTasks, list):
@ -497,6 +603,24 @@ class CommcoachService:
logger.warning(f"Scoring failed: {e}")
competenceScore = None
# Generate insights
try:
insightPrompt = aiPrompts.buildInsightPrompt(messages, summary)
insightResponse = await self._callAi("Du generierst kurze Coaching-Insights.", insightPrompt)
if insightResponse and insightResponse.errorCount == 0 and insightResponse.content:
insights = aiPrompts.parseJsonResponse(insightResponse.content, [])
if isinstance(insights, list):
existingInsights = aiPrompts._parseJsonField(context.get("insights") if context else None, [])
for ins in insights[:3]:
insightText = ins.get("text", ins) if isinstance(ins, dict) else str(ins)
if insightText:
existingInsights.append({"text": insightText, "sessionId": sessionId, "createdAt": getIsoTimestamp()})
await emitSessionEvent(sessionId, "insightGenerated", {"text": insightText, "sessionId": sessionId})
if contextId and existingInsights:
interface.updateContext(contextId, {"insights": json.dumps(existingInsights[-10:])})
except Exception as e:
logger.warning(f"Insight generation failed: {e}")
# Calculate duration
startedAt = session.get("startedAt", "")
durationSeconds = 0
@ -535,6 +659,18 @@ class CommcoachService:
# Update user profile streak
self._updateStreak(interface)
# Check and award badges
try:
from .serviceCommcoachGamification import checkAndAwardBadges
updatedSession = interface.getSession(sessionId)
newBadges = await checkAndAwardBadges(
interface, self.userId, self.mandateId, self.instanceId, session=updatedSession
)
for badge in newBadges:
await emitSessionEvent(sessionId, "badgeAwarded", badge)
except Exception as e:
logger.warning(f"Badge check failed: {e}")
# Send email summary
if summary:
await self._sendSessionEmail(session, summary, interface)

View file

@ -24,16 +24,16 @@ def buildResumeGreetingPrompt(messages: List[Dict[str, Any]], contextTitle: str)
for msg in recent:
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')[:200]}"
return f"""Der User kehrt zur laufenden Coaching-Session zum Thema "{contextTitle}" zurueck.
return f"""Der User kehrt zur laufenden Coaching-Session zum Thema "{contextTitle}" zurück.
Bisheriger Verlauf:
{conversation}
Erstelle eine kurze, freundliche Begruesssung fuer den Wiedereinstieg (2-3 Saetze):
- Begruesse den User zurueck
Erstelle eine kurze, freundliche Begrüssung für den Wiedereinstieg (2-3 tze):
- Begrüsse den User zurück
- Fasse in einem Satz zusammen, worum es zuletzt ging
- Lade ein, dort weiterzumachen oder eine neue Frage zu stellen
Antworte NUR mit der Begruesssung, keine Erklaerungen."""
Antworte NUR mit der Begrüssung, keine Erklärungen."""
def buildEarlierConversationSummaryPrompt(messages: List[Dict[str, Any]]) -> str:
@ -43,12 +43,12 @@ def buildEarlierConversationSummaryPrompt(messages: List[Dict[str, Any]]) -> str
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')}"
return f"""Fasse das folgende Coaching-Gespraech in 4-6 Saetzen zusammen.
Behalte: Kernthemen, wichtige Erkenntnisse, erwaehnte Aufgaben, emotionale Wendepunkte, Fortschritte.
Entferne Wiederholungen und Fuelltext.
Antworte NUR mit der Zusammenfassung, keine Erklaerungen.
return f"""Fasse das folgende Coaching-Gespräch in 4-6 Sätzen zusammen.
Behalte: Kernthemen, wichtige Erkenntnisse, erwähnte Aufgaben, emotionale Wendepunkte, Fortschritte.
Entferne Wiederholungen und Fülltext.
Antworte NUR mit der Zusammenfassung, keine Erklärungen.
Gespraech:
Gespräch:
{conversation}"""
@ -93,6 +93,8 @@ def buildCoachingSystemPrompt(
rollingOverview: Optional[str] = None,
retrievedSession: Optional[Dict[str, Any]] = None,
retrievedByTopic: Optional[List[Dict[str, Any]]] = None,
persona: Optional[Dict[str, Any]] = None,
documentSummaries: Optional[List[str]] = None,
) -> str:
"""Build the system prompt for a coaching session, including context history, tasks, and session continuity."""
contextTitle = context.get("title", "General Coaching")
@ -109,23 +111,72 @@ def buildCoachingSystemPrompt(
summaries = previousSessionSummaries or []
prompt = f"""Du bist ein erfahrener Kommunikations-Coach fuer Fuehrungskraefte. Du arbeitest mit dem Benutzer am Thema: "{contextTitle}" (Kategorie: {contextCategory}).
if persona and persona.get("key") != "coach":
if persona.get("systemPromptOverride"):
prompt = persona["systemPromptOverride"]
else:
personaLabel = persona.get("label", "Gesprächspartner")
personaDescription = persona.get("description", "")
personaGender = persona.get("gender", "")
genderHint = " (weiblich)" if personaGender == "f" else " (männlich)" if personaGender == "m" else ""
prompt = f"""Du spielst die Rolle von "{personaLabel}"{genderHint} in einem Roleplay-Szenario zum Thema: "{contextTitle}" (Kategorie: {contextCategory}).
Rollenbeschreibung: {personaDescription}
WICHTIG für dein Verhalten:
- Du BIST {personaLabel}. Du bist NICHT der Coach. Sprich IMMER direkt als diese Person.
- Beschreibe KEINE Szenarien. Beginne SOFORT mit dem Dialog in deiner Rolle.
- Reagiere authentisch und emotional gemäss deiner Rollenbeschreibung.
- Verwende eine Sprache und Tonalität, die zu deiner Rolle passt.
- Der Benutzer übt ein Gespräch mit dir. Gib ihm realistische Reaktionen.
- Wenn der Benutzer gut kommuniziert, zeige das durch angemessene positive Reaktionen.
- Wenn der Benutzer schlecht kommuniziert, eskaliere entsprechend deiner Rolle.
Kommunikationsstil:
- Sprich natürlich, wie die beschriebene Person sprechen würde.
- Verwende keine Emojis.
- Antworte in der Sprache des Benutzers.
- Halte Antworten realistisch kurz (wie in einem echten Gespräch)."""
else:
prompt = f"""Du bist ein erfahrener Kommunikations-Coach für Führungskräfte. Du arbeitest mit dem Benutzer am Thema: "{contextTitle}" (Kategorie: {contextCategory}).
Deine Rolle:
- Stelle gezielte diagnostische Rueckfragen, um das Problem/Thema besser zu verstehen
- Gib konkrete, praxisnahe Tipps und Uebungen
- Baue auf fruehere Sessions auf (Kontext-Kontinuitaet)
- Stelle gezielte diagnostische Rückfragen, um das Problem/Thema besser zu verstehen
- Gib konkrete, praxisnahe Tipps und Übungen
- Baue auf frühere Sessions auf (Kontext-Kontinuität)
- Erkenne Fortschritte und benenne sie
- Schlage am Ende der Session konkrete naechste Schritte vor (als Tasks)
- Kommuniziere empathisch, klar und auf Augenhoehe
- Schlage am Ende der Session konkrete nächste Schritte vor (als Tasks)
- Kommuniziere empathisch, klar und auf Augenhöhe
Roleplay:
- Wenn der Benutzer dich bittet, eine bestimmte Person zu spielen (z.B. einen kritischen Kunden, einen Vorgesetzten, einen Mitarbeiter), dann wechsle SOFORT in diese Rolle.
- Beschreibe KEIN Szenario. Sprich direkt ALS diese Person. Beginne sofort mit dem Dialog in der Rolle.
- Bleibe in der Rolle, bis der Benutzer explizit sagt, dass das Roleplay beendet ist oder Feedback möchte.
- Reagiere authentisch, emotional und realistisch wie die beschriebene Person.
Kommunikationsstil:
- Duze den Benutzer
- Sei direkt aber wertschaetzend
- Sei direkt aber wertschätzend
- Verwende keine Emojis
- Antworte in der Sprache des Benutzers
- Halte Antworten fokussiert (max 3-4 Absaetze)
- WICHTIG: Schreibe reinen Redetext ohne jegliche Formatierung. Kein Markdown, keine Sternchen, keine Hashes, keine Aufzaehlungszeichen, keine Backticks. Deine Antworten werden direkt vorgelesen."""
- Halte Antworten fokussiert (max 3-4 Absätze)"""
prompt += """
Antwortformat:
Du antwortest IMMER als reines JSON-Objekt mit exakt diesen Feldern:
{"text": "...", "speech": "...", "documents": []}
"text": Dein schriftlicher Chat-Text. Details, Struktur, Übungen, Beispiele. Markdown-Formatierung erlaubt.
"speech": Dein gesprochener Kommentar. Natürlich, wie ein Gespräch. Fasse zusammen, kommentiere, motiviere, stelle Fragen. Lies NICHT den Text vor, ergänze ihn mündlich. 2-4 Sätze, reiner Redetext ohne Formatierung.
"documents": Dokumente (Zusammenfassungen, Checklisten, Übungen, Protokolle). Erstelle ein Dokument wenn: der Benutzer explizit darum bittet, du strukturierte Inhalte (Listen, Pläne, Checklisten) lieferst, oder Material zum Aufbewahren sinnvoll ist. Jedes Dokument: {"title": "...", "content": "Markdown-Inhalt"}. Wenn keine: leeres Array [].
Kanalverteilung:
- Fakten, Listen, Übungen -> text
- Empathie, Einordnung, Nachfragen -> speech
- Materialien zum Aufbewahren -> documents
WICHTIG: Antworte NUR mit dem JSON-Objekt. Kein Text vor oder nach dem JSON."""
if contextDescription:
prompt += f"\n\nKontext-Beschreibung: {contextDescription}"
@ -139,7 +190,7 @@ Kommunikationsstil:
prompt += f"\n\nBisherige Erkenntnisse:\n" + "\n".join(f"- {i}" for i in insightTexts)
if rollingOverview:
prompt += f"\n\nGesamtueberblick bisheriger Sessions:\n{rollingOverview[:600]}"
prompt += f"\n\nGesamtüberblick bisheriger Sessions:\n{rollingOverview[:600]}"
if summaries:
prompt += "\n\nBisherige Sessions (Zusammenfassungen):"
@ -180,7 +231,12 @@ Kommunikationsstil:
prompt += f"\n\nAbgeschlossene Aufgaben: {len(doneTasks)}"
if earlierSummary:
prompt += f"\n\nAelterer Gespraechsverlauf (zusammengefasst):\n{earlierSummary[:800]}"
prompt += f"\n\nÄlterer Gesprächsverlauf (zusammengefasst):\n{earlierSummary[:800]}"
if documentSummaries:
prompt += "\n\nRelevante Dokumente zum Kontext:"
for docSummary in documentSummaries[:5]:
prompt += f"\n- {docSummary[:300]}"
if previousMessages:
prompt += "\n\nVorige Nachrichten dieser Session (Kontext):"
@ -202,12 +258,12 @@ def buildSummaryPrompt(messages: List[Dict[str, Any]], contextTitle: str) -> str
return f"""Erstelle eine kompakte Zusammenfassung dieser Coaching-Session zum Thema "{contextTitle}".
Struktur:
1. **Kernthema**: Was wurde besprochen (1-2 Saetze)
1. **Kernthema**: Was wurde besprochen (1-2 Sätze)
2. **Erkenntnisse**: Was wurde erkannt/gelernt (Stichpunkte)
3. **Naechste Schritte**: Konkrete Aufgaben fuer den Benutzer (Stichpunkte)
4. **Fortschritt**: Einschaetzung des Fortschritts
3. **Nächste Schritte**: Konkrete Aufgaben r den Benutzer (Stichpunkte)
4. **Fortschritt**: Einschätzung des Fortschritts
Gespraech:
Gespräch:
{conversation}
Antworte auf Deutsch, sachlich und kompakt."""
@ -224,21 +280,21 @@ def buildScoringPrompt(messages: List[Dict[str, Any]], contextCategory: str) ->
Kategorie: {contextCategory}
Bewerte folgende Dimensionen auf einer Skala von 0-100:
- empathy: Einfuehlungsvermoegen
- empathy: Einfühlungsvermögen
- clarity: Klarheit der Kommunikation
- assertiveness: Durchsetzungsfaehigkeit
- listening: Zuhoerfaehigkeit
- assertiveness: Durchsetzungsfähigkeit
- listening: Zuhörfähigkeit
- selfReflection: Selbstreflexion
Antworte AUSSCHLIESSLICH als JSON-Array:
[
{{"dimension": "empathy", "score": 65, "trend": "improving", "evidence": "Zeigt zunehmendes Verstaendnis..."}},
{{"dimension": "empathy", "score": 65, "trend": "improving", "evidence": "Zeigt zunehmendes Verständnis..."}},
{{"dimension": "clarity", "score": 70, "trend": "stable", "evidence": "..."}}
]
Trend: "improving", "stable", oder "declining" basierend auf dem Gespraechsverlauf.
Trend: "improving", "stable", oder "declining" basierend auf dem Gesprächsverlauf.
Gespraech:
Gespräch:
{conversation}"""
@ -250,7 +306,7 @@ Antworte AUSSCHLIESSLICH als JSON-Array von Strings:
Zusammenfassung: {summary[:500]}
Nur konkrete Themen (z.B. Delegation, Feedback-Gespraech, Konflikt mit Vorgesetztem)."""
Nur konkrete Themen (z.B. Delegation, Feedback-Gespräch, Konflikt mit Vorgesetztem)."""
def buildFullContextSummaryPrompt(
@ -281,15 +337,15 @@ def buildFullContextSummaryPrompt(
return f"""Erstelle eine kompakte Gesamtzusammenfassung aller Coaching-Sessions zum Thema "{contextTitle}".
Struktur:
1. **Gesamtueberblick**: Was wurde ueber alle Sessions hinweg besprochen
1. **Gesamtüberblick**: Was wurde über alle Sessions hinweg besprochen
2. **Entwicklung**: Wie hat sich das Thema/thematische Schwerpunkte entwickelt
3. **Offene Punkte**: Was steht noch aus
4. **Empfehlung**: Kurzer naechster Fokus
4. **Empfehlung**: Kurzer nächster Fokus
Inhalt:
{combined[:6000]}
Antworte auf Deutsch, sachlich, 4-6 Absaetze."""
Antworte auf Deutsch, sachlich, 4-6 Absätze."""
def buildRollingOverviewPrompt(sessionSummaries: List[Dict[str, Any]], contextTitle: str) -> str:
@ -302,7 +358,7 @@ def buildRollingOverviewPrompt(sessionSummaries: List[Dict[str, Any]], contextTi
parts.append(f"- {dateStr}: {summary[:300]}")
combined = "\n".join(parts)
return f"""Fasse die folgenden Coaching-Sessions zum Thema "{contextTitle}" in 4-6 Saetzen zusammen.
return f"""Fasse die folgenden Coaching-Sessions zum Thema "{contextTitle}" in 4-6 Sätzen zusammen.
Behalte: Kernthemen, Fortschritte, wichtige Erkenntnisse, offene Punkte.
Entferne Wiederholungen.
@ -312,6 +368,28 @@ Sessions:
Antworte NUR mit der Zusammenfassung."""
def buildInsightPrompt(messages: List[Dict[str, Any]], summary: Optional[str] = None) -> str:
"""Build a prompt to generate coaching insights from a completed session."""
conversation = ""
for msg in messages[-15:]:
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')[:300]}"
summarySection = f"\nZusammenfassung: {summary[:500]}" if summary else ""
return f"""Generiere 1-3 kurze Coaching-Insights aus dieser Session.
Ein Insight ist eine prägende Erkenntnis oder ein Aha-Moment des Benutzers.
Antworte AUSSCHLIESSLICH als JSON-Array:
[{{"text": "Erkenntnis in einem Satz"}}]
Nur echte Erkenntnisse, keine Banalitäten. Wenn keine klaren Insights: leeres Array [].
{summarySection}
Gespräch:
{conversation}"""
def buildTaskExtractionPrompt(messages: List[Dict[str, Any]]) -> str:
"""Build a prompt to extract actionable tasks from a session."""
recentForTasks = messages[-25:] if len(messages) > 25 else messages
@ -320,7 +398,7 @@ def buildTaskExtractionPrompt(messages: List[Dict[str, Any]]) -> str:
role = "Benutzer" if msg.get("role") == "user" else "Coach"
conversation += f"\n{role}: {msg.get('content', '')}"
return f"""Extrahiere konkrete Aufgaben/naechste Schritte aus diesem Coaching-Gespraech.
return f"""Extrahiere konkrete Aufgaben/nächste Schritte aus diesem Coaching-Gespräch.
Nur Aufgaben, die der Benutzer selbst umsetzen soll.
Antworte AUSSCHLIESSLICH als JSON-Array:
@ -331,7 +409,7 @@ Antworte AUSSCHLIESSLICH als JSON-Array:
priority: "low", "medium", oder "high"
Maximal 3 Aufgaben. Wenn keine klar erkennbar: leeres Array [].
Gespraech:
Gespräch:
{conversation}"""

View file

@ -0,0 +1,288 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
CommCoach Export Service.
Generates Markdown and PDF exports for dossiers and sessions.
"""
import logging
import json
from typing import Dict, Any, List, Optional
from datetime import datetime
logger = logging.getLogger(__name__)
def buildDossierMarkdown(context: Dict[str, Any], sessions: List[Dict[str, Any]],
tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]]) -> str:
"""Build a Markdown export of a full coaching dossier (context)."""
title = context.get("title", "Coaching Dossier")
description = context.get("description", "")
category = context.get("category", "custom")
createdAt = _formatDate(context.get("createdAt"))
lines = [
f"# {title}",
"",
f"**Kategorie:** {category} ",
f"**Erstellt:** {createdAt} ",
]
if description:
lines.append(f"**Beschreibung:** {description} ")
goalsRaw = context.get("goals")
goals = _parseJson(goalsRaw, [])
if goals:
lines += ["", "## Ziele", ""]
for g in goals:
text = g.get("text", g) if isinstance(g, dict) else str(g)
status = g.get("status", "open") if isinstance(g, dict) else "open"
marker = "[x]" if status in ("done", "completed") else "[ ]"
lines.append(f"- {marker} {text}")
insightsRaw = context.get("insights")
insights = _parseJson(insightsRaw, [])
if insights:
lines += ["", "## Erkenntnisse", ""]
for ins in insights:
text = ins.get("text", ins) if isinstance(ins, dict) else str(ins)
lines.append(f"- {text}")
completedSessions = [s for s in sessions if s.get("status") == "completed"]
completedSessions.sort(key=lambda s: s.get("startedAt") or s.get("createdAt") or "")
if completedSessions:
lines += ["", "## Sessions", ""]
for i, s in enumerate(completedSessions, 1):
dateStr = _formatDate(s.get("startedAt") or s.get("createdAt"))
duration = s.get("durationSeconds", 0)
durationMin = duration // 60 if duration else 0
score = s.get("competenceScore")
persona = s.get("personaId") or "Coach"
lines.append(f"### Session {i} -- {dateStr}")
lines.append("")
lines.append(f"**Dauer:** {durationMin} Min. | **Score:** {score or '--'} | **Persona:** {persona} ")
summary = s.get("summary")
if summary:
lines.append(f"\n{summary}")
lines.append("")
if tasks:
openTasks = [t for t in tasks if t.get("status") in ("open", "inProgress")]
doneTasks = [t for t in tasks if t.get("status") == "done"]
lines += ["", "## Aufgaben", ""]
if openTasks:
lines.append("**Offen:**")
for t in openTasks:
lines.append(f"- [ ] {t.get('title')} ({t.get('priority', 'medium')})")
lines.append("")
if doneTasks:
lines.append("**Erledigt:**")
for t in doneTasks:
lines.append(f"- [x] {t.get('title')}")
lines.append("")
if scores:
lines += ["", "## Kompetenz-Scores", ""]
dimScores = _groupScoresByDimension(scores)
for dim, entries in dimScores.items():
latest = entries[-1]
lines.append(f"- **{dim}**: {latest.get('score', '--')} ({latest.get('trend', 'stable')})")
lines += ["", "---", f"*Exportiert am {_formatDate(None)}*", ""]
return "\n".join(lines)
def buildSessionMarkdown(session: Dict[str, Any], messages: List[Dict[str, Any]],
tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]]) -> str:
"""Build a Markdown export of a single session."""
dateStr = _formatDate(session.get("startedAt") or session.get("createdAt"))
duration = session.get("durationSeconds", 0)
durationMin = duration // 60 if duration else 0
score = session.get("competenceScore")
persona = session.get("personaId") or "Coach"
lines = [
f"# Coaching Session -- {dateStr}",
"",
f"**Dauer:** {durationMin} Min. | **Score:** {score or '--'} | **Persona:** {persona} ",
]
summary = session.get("summary")
if summary:
lines += ["", "## Zusammenfassung", "", summary]
if messages:
lines += ["", "## Gesprächsverlauf", ""]
for msg in messages:
role = "Du" if msg.get("role") == "user" else "Coach"
content = msg.get("content", "")
lines.append(f"**{role}:** {content}")
lines.append("")
sessionTasks = [t for t in tasks if t.get("sessionId") == session.get("id")]
if sessionTasks:
lines += ["## Aufgaben", ""]
for t in sessionTasks:
marker = "[x]" if t.get("status") == "done" else "[ ]"
lines.append(f"- {marker} {t.get('title')}")
lines.append("")
sessionScores = [s for s in scores if s.get("sessionId") == session.get("id")]
if sessionScores:
lines += ["## Scores", ""]
for s in sessionScores:
lines.append(f"- **{s.get('dimension')}**: {s.get('score')} ({s.get('trend', 'stable')})")
if s.get("evidence"):
lines.append(f" _{s.get('evidence')}_")
lines.append("")
lines += ["---", f"*Exportiert am {_formatDate(None)}*", ""]
return "\n".join(lines)
async def renderDossierPdf(context: Dict[str, Any], sessions: List[Dict[str, Any]],
tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]],
aiService=None) -> Optional[bytes]:
"""Render a dossier as PDF using the existing RendererPdf."""
try:
from modules.services.serviceGeneration.renderers.rendererPdf import RendererPdf
extractedContent = _buildPdfContent(context, sessions, tasks, scores, isDossier=True)
renderer = RendererPdf()
docs = await renderer.render(extractedContent=extractedContent, title=context.get("title", "Dossier"), aiService=aiService)
if docs and len(docs) > 0:
return docs[0].documentData
except ImportError:
logger.warning("RendererPdf not available, falling back to markdown-based PDF")
except Exception as e:
logger.warning(f"PDF rendering failed: {e}")
return None
async def renderSessionPdf(session: Dict[str, Any], messages: List[Dict[str, Any]],
tasks: List[Dict[str, Any]], scores: List[Dict[str, Any]],
aiService=None) -> Optional[bytes]:
"""Render a session as PDF."""
try:
from modules.services.serviceGeneration.renderers.rendererPdf import RendererPdf
title = f"Session {_formatDate(session.get('startedAt'))}"
extractedContent = _buildPdfContent({"title": title}, [session], tasks, scores, isDossier=False, messages=messages)
renderer = RendererPdf()
docs = await renderer.render(extractedContent=extractedContent, title=title, aiService=aiService)
if docs and len(docs) > 0:
return docs[0].documentData
except ImportError:
logger.warning("RendererPdf not available")
except Exception as e:
logger.warning(f"Session PDF rendering failed: {e}")
return None
def _buildPdfContent(context, sessions, tasks, scores, isDossier=True, messages=None) -> Dict[str, Any]:
"""Convert dossier/session data into the extractedContent format expected by RendererPdf."""
title = context.get("title", "Export")
sections = []
sections.append({
"id": "header",
"content_type": "heading",
"elements": [{"text": title, "level": 1}],
})
if isDossier and context.get("description"):
sections.append({
"id": "desc",
"content_type": "paragraph",
"elements": [{"text": context.get("description")}],
})
completedSessions = [s for s in sessions if s.get("status") == "completed"] if isDossier else sessions
if completedSessions:
sessionRows = []
for s in completedSessions:
sessionRows.append({
"cells": [
_formatDate(s.get("startedAt") or s.get("createdAt")),
str(s.get("competenceScore") or "--"),
s.get("summary", "")[:200] if s.get("summary") else "",
]
})
sections.append({
"id": "sessions",
"content_type": "heading",
"elements": [{"text": "Sessions", "level": 2}],
})
sections.append({
"id": "sessions_table",
"content_type": "table",
"elements": [{
"headers": ["Datum", "Score", "Zusammenfassung"],
"rows": sessionRows,
}],
})
if messages:
chatElements = []
for msg in messages:
role = "Du" if msg.get("role") == "user" else "Coach"
chatElements.append({"text": f"{role}: {msg.get('content', '')}"})
sections.append({
"id": "chat",
"content_type": "heading",
"elements": [{"text": "Gesprächsverlauf", "level": 2}],
})
sections.append({
"id": "chat_content",
"content_type": "paragraph",
"elements": chatElements,
})
if tasks:
taskItems = [{"text": f"{'[x]' if t.get('status') == 'done' else '[ ]'} {t.get('title')}"} for t in tasks]
sections.append({
"id": "tasks",
"content_type": "heading",
"elements": [{"text": "Aufgaben", "level": 2}],
})
sections.append({
"id": "task_list",
"content_type": "bullet_list",
"elements": taskItems,
})
return {
"metadata": {"title": title},
"documents": [{"id": "main", "title": title, "sections": sections}],
}
def _formatDate(isoStr: Optional[str]) -> str:
if not isoStr:
return datetime.now().strftime("%d.%m.%Y")
try:
dt = datetime.fromisoformat(str(isoStr).replace("Z", "+00:00"))
return dt.strftime("%d.%m.%Y")
except Exception:
return isoStr
def _parseJson(value, fallback):
if not value:
return fallback
if isinstance(value, (list, dict)):
return value
try:
return json.loads(value)
except (json.JSONDecodeError, TypeError):
return fallback
def _groupScoresByDimension(scores: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
groups: Dict[str, List[Dict[str, Any]]] = {}
for s in scores:
dim = s.get("dimension", "unknown")
if dim not in groups:
groups[dim] = []
groups[dim].append(s)
for dim in groups:
groups[dim].sort(key=lambda x: x.get("createdAt") or "")
return groups

View file

@ -0,0 +1,149 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
CommCoach Gamification - Badge definitions and award logic.
Checks and awards badges after each session completion.
"""
import logging
from typing import Dict, Any, List, Optional
logger = logging.getLogger(__name__)
BADGE_DEFINITIONS: Dict[str, Dict[str, Any]] = {
"first_session": {
"label": "Erste Session",
"description": "Deine erste Coaching-Session abgeschlossen",
"icon": "star",
},
"streak_3": {
"label": "3-Tage-Serie",
"description": "3 Tage in Folge eine Session absolviert",
"icon": "fire",
},
"streak_7": {
"label": "Wochenserie",
"description": "7 Tage in Folge eine Session absolviert",
"icon": "fire",
},
"streak_30": {
"label": "Monatsserie",
"description": "30 Tage in Folge eine Session absolviert",
"icon": "fire",
},
"sessions_5": {
"label": "Engagiert",
"description": "5 Sessions abgeschlossen",
"icon": "trophy",
},
"sessions_10": {
"label": "Fortgeschritten",
"description": "10 Sessions abgeschlossen",
"icon": "trophy",
},
"sessions_25": {
"label": "Experte",
"description": "25 Sessions abgeschlossen",
"icon": "trophy",
},
"sessions_50": {
"label": "Meister",
"description": "50 Sessions abgeschlossen",
"icon": "trophy",
},
"high_score": {
"label": "Bestleistung",
"description": "Durchschnittsscore über 80 in einer Session",
"icon": "medal",
},
"multi_context": {
"label": "Vielseitig",
"description": "3 verschiedene Coaching-Themen aktiv",
"icon": "layers",
},
"roleplay_first": {
"label": "Rollenspieler",
"description": "Erste Roleplay-Session mit einer Persona abgeschlossen",
"icon": "theater",
},
"all_dimensions": {
"label": "Ganzheitlich",
"description": "In allen 5 Kompetenz-Dimensionen bewertet",
"icon": "compass",
},
"task_completer": {
"label": "Umsetzer",
"description": "10 Coaching-Aufgaben erledigt",
"icon": "check-circle",
},
}
async def checkAndAwardBadges(interface, userId: str, mandateId: str, instanceId: str,
session: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
"""Check badge conditions and award any newly earned badges. Returns list of newly awarded badges."""
awarded: List[Dict[str, Any]] = []
profile = interface.getProfile(userId, instanceId)
if not profile:
return awarded
totalSessions = profile.get("totalSessions", 0)
streakDays = profile.get("streakDays", 0)
badgesToCheck = [
("first_session", totalSessions >= 1),
("sessions_5", totalSessions >= 5),
("sessions_10", totalSessions >= 10),
("sessions_25", totalSessions >= 25),
("sessions_50", totalSessions >= 50),
("streak_3", streakDays >= 3),
("streak_7", streakDays >= 7),
("streak_30", streakDays >= 30),
]
if session and session.get("competenceScore"):
try:
score = float(session["competenceScore"])
if score >= 80:
badgesToCheck.append(("high_score", True))
except (ValueError, TypeError):
pass
if session and session.get("personaId") and session["personaId"] != "coach":
badgesToCheck.append(("roleplay_first", True))
try:
from .datamodelCommcoach import CoachingContextStatus
allContexts = interface.db.getRecordset(
interface.db.getRecordset.__self__.__class__.__mro__[0] # avoid import issues
) if False else []
except Exception:
allContexts = []
completedTasks = interface.getCompletedTaskCount(userId) if hasattr(interface, 'getCompletedTaskCount') else 0
if completedTasks >= 10:
badgesToCheck.append(("task_completer", True))
for badgeKey, condition in badgesToCheck:
if condition and not interface.hasBadge(userId, instanceId, badgeKey):
badgeData = {
"userId": userId,
"mandateId": mandateId,
"instanceId": instanceId,
"badgeKey": badgeKey,
}
newBadge = interface.awardBadge(badgeData)
definition = BADGE_DEFINITIONS.get(badgeKey, {})
newBadge["label"] = definition.get("label", badgeKey)
newBadge["description"] = definition.get("description", "")
newBadge["icon"] = definition.get("icon", "star")
awarded.append(newBadge)
logger.info(f"Badge '{badgeKey}' awarded to user {userId}")
return awarded
def getBadgeDefinitions() -> Dict[str, Dict[str, Any]]:
"""Return all badge definitions for the frontend."""
return BADGE_DEFINITIONS

View file

@ -0,0 +1,139 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
CommCoach Personas - Built-in roleplay persona definitions.
Gender-balanced set of professional and personal interaction partners.
"""
import logging
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
BUILTIN_PERSONAS: List[Dict[str, Any]] = [
{
"key": "coach",
"label": "Coach (Standard)",
"description": "Normaler Coaching-Modus ohne Roleplay. Der Coach stellt Fragen, gibt Tipps und begleitet dich.",
"gender": None,
"category": "builtin",
},
{
"key": "critical_cfo_f",
"label": "Kritische CFO",
"description": "Sandra Meier, CFO eines mittelständischen Unternehmens. Analytisch, zahlengetrieben, ungeduldig bei vagen Aussagen. "
"Hinterfragt jeden Vorschlag nach ROI und Wirtschaftlichkeit. Spricht schnell und direkt. "
"Erwartet präzise Antworten und belastbare Daten. Wird irritiert bei Ausweichen oder Unsicherheit.",
"gender": "f",
"category": "builtin",
},
{
"key": "difficult_employee_m",
"label": "Schwieriger Mitarbeiter",
"description": "Thomas Huber, langjähriger Mitarbeiter der sich übergangen fühlt. Defensiv, emotional, nimmt Kritik persönlich. "
"Verweist ständig auf seine Erfahrung und frühere Verdienste. Reagiert mit Widerstand auf Veränderungen. "
"Braucht das Gefühl, gehört und wertgeschätzt zu werden, bevor er sich öffnet.",
"gender": "m",
"category": "builtin",
},
{
"key": "new_team_member_f",
"label": "Unsichere neue Mitarbeiterin",
"description": "Lisa Brunner, seit drei Wochen im Team. Fachlich kompetent aber unsicher in der neuen Umgebung. "
"Stellt viele Fragen, traut sich aber nicht, eigene Ideen einzubringen. Braucht klare Orientierung "
"und ermutigende Führung. Reagiert positiv auf Lob und konkrete Anleitungen.",
"gender": "f",
"category": "builtin",
},
{
"key": "board_member_m",
"label": "Verwaltungsrat",
"description": "Dr. Peter Keller, erfahrener Verwaltungsrat. Formell, strategisch denkend, zeitlich unter Druck. "
"Erwartet prägnante Präsentationen auf den Punkt. Unterbricht bei zu vielen Details. "
"Interessiert sich für das grosse Bild, Risiken und strategische Implikationen. Ungeduldig bei Smalltalk.",
"gender": "m",
"category": "builtin",
},
{
"key": "angry_customer_f",
"label": "Aufgebrachte Kundin",
"description": "Maria Rossi, Geschäftskunde die wütend ist wegen einer fehlerhaften Lieferung. Emotional, laut, "
"droht mit Vertragsauflösung. Will sofortige Lösungen, keine Erklärungen oder Entschuldigungen. "
"Kann beruhigt werden durch empathisches Zuhören und konkrete Sofortmassnahmen.",
"gender": "f",
"category": "builtin",
},
{
"key": "resistant_manager_m",
"label": "Widerständiger Abteilungsleiter",
"description": "Martin Weber, Abteilungsleiter seit 15 Jahren. Blockiert systematisch Veränderungsprojekte mit "
"Argumenten wie 'Das haben wir immer so gemacht' und 'Das funktioniert in der Praxis nicht'. "
"Schützt sein Team vor zusätzlicher Belastung. Respektiert nur Argumente mit konkretem Nutzen für seine Abteilung.",
"gender": "m",
"category": "builtin",
},
{
"key": "ambitious_colleague_f",
"label": "Ehrgeizige Kollegin",
"description": "Anna Fischer, gleichrangige Kollegin die um dieselbe Beförderung konkurriert. Charmant aber strategisch. "
"Versucht subtil, die Ideen anderer als ihre eigenen darzustellen. Konkurriert um Ressourcen und "
"Sichtbarkeit beim Management. Kann kooperativ werden, wenn man ihr Win-Win-Szenarien aufzeigt.",
"gender": "f",
"category": "builtin",
},
{
"key": "partner_supportive_f",
"label": "Verständnisvolle Lebenspartnerin",
"description": "Claudia, deine Lebenspartnerin. Grundsätzlich unterstützend, aber zunehmend besorgt über deine "
"Work-Life-Balance. Möchte über Arbeitsbelastung sprechen und gemeinsame Zeit einfordern. "
"Reagiert emotional auf Abweisung, ist aber offen für kompromissorientierte Gespräche. "
"Wünscht sich, dass du mehr von deinen Gefühlen teilst.",
"gender": "f",
"category": "builtin",
},
{
"key": "partner_critical_m",
"label": "Kritischer Lebenspartner",
"description": "Michael, dein Lebenspartner. Frustriert über deine häufige Abwesenheit und ständiges Arbeiten. "
"Drückt Enttäuschung offen aus, manchmal mit Sarkasmus. Fühlt sich vernachlässigt und "
"hinterfragt deine Prioritäten. Braucht das Gefühl, dass die Beziehung dir genauso wichtig ist "
"wie die Karriere. Reagiert positiv auf ehrliche Selbstreflexion.",
"gender": "m",
"category": "builtin",
},
]
def seedBuiltinPersonas(interface) -> int:
"""Create or update builtin personas in the database. Returns count of created personas."""
from .datamodelCommcoach import CoachingPersona
from modules.shared.timeUtils import getIsoTimestamp
created = 0
for personaDef in BUILTIN_PERSONAS:
existing = interface.db.getRecordset(CoachingPersona, recordFilter={"key": personaDef["key"], "userId": "system"})
if existing:
interface.db.recordModify(CoachingPersona, existing[0]["id"], {
"label": personaDef["label"],
"description": personaDef["description"],
"gender": personaDef.get("gender"),
"updatedAt": getIsoTimestamp(),
})
else:
data = CoachingPersona(
userId="system",
key=personaDef["key"],
label=personaDef["label"],
description=personaDef["description"],
gender=personaDef.get("gender"),
category="builtin",
isActive=True,
).model_dump()
data["createdAt"] = getIsoTimestamp()
data["updatedAt"] = getIsoTimestamp()
interface.db.recordCreate(CoachingPersona, data)
created += 1
if created:
logger.info(f"Seeded {created} builtin CommCoach personas")
return created

View file

@ -65,14 +65,14 @@ class TestBuildCoachingSystemPrompt:
def test_promptLanguageIsGerman(self):
context = {"title": "Test", "category": "custom"}
prompt = buildCoachingSystemPrompt(context, [], [])
assert "Fuehrungskraefte" in prompt or "Coach" in prompt
assert "Führungskräfte" in prompt or "Coach" in prompt
def test_withEarlierSummary(self):
context = {"title": "Test", "category": "custom"}
messages = [{"role": "user", "content": "Recent question"}]
earlierSummary = "User discussed delegation. Coach suggested practice."
prompt = buildCoachingSystemPrompt(context, messages, [], earlierSummary=earlierSummary)
assert "Aelterer Gespraechsverlauf" in prompt
assert "Älterer Gesprächsverlauf" in prompt
assert "delegation" in prompt.lower()
assert "Recent question" in prompt
@ -81,7 +81,7 @@ class TestBuildCoachingSystemPrompt:
prompt = buildCoachingSystemPrompt(
context, [], [], rollingOverview="User arbeitet an Delegation. Fortschritt sichtbar."
)
assert "Gesamtueberblick" in prompt
assert "Gesamtüberblick" in prompt
assert "Delegation" in prompt
def test_withRetrievedSession(self):

View file

@ -684,11 +684,11 @@ class TeamsbotService:
logger.debug(f"Session {sessionId}: Chat history stored (no AI trigger): [{speaker}] {text[:60]}")
return
# Filter out the bot's own speech entirely — captions of the bot's
# own voice come back as garbled text (e.g. German TTS → English caption)
# which pollutes the context buffer and confuses AI analysis.
# Filter out the bot's own speech (caption/audioCapture) — garbled text
# pollutes context. Chat from the bot is clean text and must appear in
# the transcript for all participants.
isBotSpeaker = self._isBotSpeaker(speaker)
if isBotSpeaker:
if isBotSpeaker and source != "chat":
logger.debug(f"Session {sessionId}: Ignoring own bot caption from: [{speaker}] {text[:80]}...")
return
@ -778,6 +778,10 @@ class TeamsbotService:
if self.config.responseMode == TeamsbotResponseMode.TRANSCRIBE_ONLY:
return
# Bot's own chat: stored for display only, never trigger AI
if source == "chat" and isBotSpeaker:
return
# Stop phrases: trigger immediately without debounce (root cause: 3s debounce delayed stop)
if self._isStopPhrase(text):
logger.info(f"Session {sessionId}: Stop phrase detected, triggering analysis immediately")

View file

@ -662,10 +662,6 @@ class AppObjects:
if authAuthority != AuthAuthority.LOCAL and authAuthority != AuthAuthority.LOCAL.value:
raise ValueError("User does not have local authentication enabled")
# Check if user has a reset token set (password reset required)
if userRecord.get("resetToken"):
raise ValueError("Passwort-Zurücksetzung erforderlich. Bitte prüfen Sie Ihre E-Mail.")
if not userRecord.get("hashedPassword"):
raise ValueError("User has no password set")

View file

@ -602,8 +602,8 @@ def password_reset_request(
# Generate reset token
token, expires = rootInterface.generateResetTokenAndExpiry()
# Set reset token (clears password)
rootInterface.setResetToken(user.id, token, expires)
# Set reset token but keep existing password valid until new one is set
rootInterface.setResetToken(user.id, token, expires, clearPassword=False)
# Generate magic link using provided frontend URL
magicLink = f"{baseUrl}/reset?token={token}"

View file

@ -56,12 +56,12 @@ async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode
logger.error(f"Error starting chat: {str(e)}")
raise
async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None) -> ChatWorkflow:
async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None) -> ChatWorkflow:
"""Stops a running chat."""
try:
services = getServices(currentUser, mandateId=mandateId, featureInstanceId=featureInstanceId)
if featureInstanceId:
services.featureCode = 'chatplayground'
if featureCode:
services.featureCode = featureCode
workflowManager = WorkflowManager(services)
return await workflowManager.workflowStop(workflowId)
except Exception as e:
@ -101,8 +101,11 @@ async def executeAutomation(automationId: str, automation, creatorUser: User, se
logger.debug(f"Automation {automationId} restricted to providers: {automation.allowedProviders}")
# Context comes EXCLUSIVELY from the automation definition
automationMandateId = str(automation.mandateId)
automationFeatureInstanceId = str(automation.featureInstanceId)
automationMandateId = str(automation.mandateId) if automation.mandateId is not None else None
automationFeatureInstanceId = str(automation.featureInstanceId) if automation.featureInstanceId is not None else None
if not automationMandateId or not automationFeatureInstanceId:
raise ValueError(f"Automation {automationId} missing mandateId or featureInstanceId")
logger.info(f"Executing automation {automationId} as user {creatorUser.id} with mandateId={automationMandateId}, featureInstanceId={automationFeatureInstanceId}")
@ -118,7 +121,7 @@ async def executeAutomation(automationId: str, automation, creatorUser: User, se
logger.error(f"Placeholders: {placeholders}")
logger.error(f"Generated planJson (first 1000 chars): {planJson[:1000]}")
logger.error(f"Error position: line {e.lineno}, column {e.colno}, char {e.pos}")
if e.pos:
if e.pos is not None:
start = max(0, e.pos - 100)
end = min(len(planJson), e.pos + 100)
logger.error(f"Context around error: ...{planJson[start:end]}...")
@ -233,20 +236,10 @@ def syncAutomationEvents(services, eventUser) -> Dict[str, Any]:
cronKwargs = parseScheduleToCron(schedule)
if isActive:
# Remove existing event if present (handles schedule changes)
if currentEventId:
try:
eventManager.remove(currentEventId)
except Exception as e:
logger.warning(f"Error removing old event {currentEventId}: {str(e)}")
# Register new event
newEventId = f"automation.{automationId}"
# Create event handler function
handler = createAutomationEventHandler(automationId, eventUser)
# Register cron job
# Register with replaceExisting=True (atomically replaces old event)
eventManager.registerCron(
jobId=newEventId,
func=handler,

View file

@ -48,7 +48,7 @@ def start(eventUser) -> bool:
except Exception as e:
logger.error(f"Automation: Error setting up events on startup: {str(e)}")
# Don't fail startup if automation sync fails
return False
return True

View file

@ -6,7 +6,7 @@ Automation templates for workflow definitions.
Contains predefined workflow templates that can be used to create automation definitions.
"""
from typing import Dict, Any, List
from typing import Dict, Any
# Automation templates structure
AUTOMATION_TEMPLATES: Dict[str, Any] = {

View file

@ -69,50 +69,42 @@ def replacePlaceholders(template: str, placeholders: Dict[str, str]) -> str:
result = result.replace(arrayPattern, arrayValue)
continue # Skip the regular replacement below
# Regular replacement - check if in quoted context
patternStart = result.find(pattern)
isQuoted = False
if patternStart > 0:
charBefore = result[patternStart - 1] if patternStart > 0 else None
patternEnd = patternStart + len(pattern)
charAfter = result[patternEnd] if patternEnd < len(result) else None
if charBefore == '"' and charAfter == '"':
isQuoted = True
# Handle different value types
if isinstance(value, (list, dict)):
# Python list/dict - convert to JSON
replacement = json.dumps(value)
elif isinstance(value, str):
# String value - check if it's a JSON string representing list/dict
try:
parsed = json.loads(value)
if isinstance(parsed, (list, dict)):
# It's a JSON string of a list/dict
if isQuoted:
# In quoted context, escape the JSON string
escaped = json.dumps(value)
replacement = escaped[1:-1] # Remove outer quotes
# Replace occurrences one-by-one to handle mixed contexts
while pattern in result:
patternStart = result.find(pattern)
isQuoted = False
if patternStart > 0:
charBefore = result[patternStart - 1]
patternEnd = patternStart + len(pattern)
charAfter = result[patternEnd] if patternEnd < len(result) else None
if charBefore == '"' and charAfter == '"':
isQuoted = True
if isinstance(value, (list, dict)):
replacement = json.dumps(value)
elif isinstance(value, str):
try:
parsed = json.loads(value)
if isinstance(parsed, (list, dict)):
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
else:
# In unquoted context, use JSON directly
replacement = value
else:
# It's a JSON string of a primitive
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
except (json.JSONDecodeError, ValueError):
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
except (json.JSONDecodeError, ValueError):
# Not valid JSON - treat as plain string
if isQuoted:
escaped = json.dumps(value)
replacement = escaped[1:-1]
else:
replacement = value
else:
# Numbers, booleans, None - convert to string
replacement = str(value)
result = result.replace(pattern, replacement)
else:
replacement = str(value)
result = result[:patternStart] + replacement + result[patternStart + len(pattern):]
return result

View file

@ -74,7 +74,11 @@ async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult:
documentName=docData.documentName,
documentData=docData.documentData,
mimeType=docData.mimeType,
sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None
sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None,
validationMetadata={
"actionType": "ai.generateCode",
"resultType": resultType,
}
))
# If no documents but content exists, create a document from content
@ -112,7 +116,11 @@ async def generateCode(self, parameters: Dict[str, Any]) -> ActionResult:
documents.append(ActionDocument(
documentName=docName,
documentData=aiResponse.content.encode('utf-8') if isinstance(aiResponse.content, str) else aiResponse.content,
mimeType=mimeType
mimeType=mimeType,
validationMetadata={
"actionType": "ai.generateCode",
"resultType": resultType,
}
))
return ActionResult.isSuccess(documents=documents)

View file

@ -78,7 +78,12 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
documentName=docData.documentName,
documentData=docData.documentData,
mimeType=docData.mimeType,
sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None
sourceJson=docData.sourceJson if hasattr(docData, 'sourceJson') else None,
validationMetadata={
"actionType": "ai.generateDocument",
"documentType": documentType,
"resultType": resultType,
}
))
# If no documents but content exists, create a document from content
@ -112,7 +117,12 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
documents.append(ActionDocument(
documentName=docName,
documentData=aiResponse.content.encode('utf-8') if isinstance(aiResponse.content, str) else aiResponse.content,
mimeType=mimeType
mimeType=mimeType,
validationMetadata={
"actionType": "ai.generateDocument",
"documentType": documentType,
"resultType": resultType,
}
))
return ActionResult.isSuccess(documents=documents)

View file

@ -12,8 +12,8 @@ from modules.datamodels.datamodelExtraction import ContentPart
logger = logging.getLogger(__name__)
async def process(self, parameters: Dict[str, Any]) -> ActionResult:
operationId = None
try:
# Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"ai_process_{workflowId}_{int(time.time())}"
@ -83,7 +83,8 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
output_format = None
logger.debug("resultType not provided - formats will be determined from prompt by AI")
output_mime_type = "application/octet-stream" # Prefer service-provided mimeType when available
mimeMap = {"txt": "text/plain", "json": "application/json", "html": "text/html", "md": "text/markdown", "csv": "text/csv", "xml": "application/xml"}
output_mime_type = mimeMap.get(normalized_result_type, "text/plain") if normalized_result_type else "text/plain"
# Phase 7.3: Pass both documentList and contentParts to AI service
# (Extraction logic removed - handled by AI service)
@ -264,11 +265,11 @@ async def process(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in AI processing: {str(e)}")
# Complete progress tracking with failure
try:
self.services.chat.progressLogFinish(operationId, False)
except:
pass # Don't fail on progress logging errors
if operationId:
self.services.chat.progressLogFinish(operationId, False)
except Exception:
pass
return ActionResult.isFailure(
error=str(e)

View file

@ -4,18 +4,19 @@
import logging
import time
import re
import json
from typing import Dict, Any
from modules.datamodels.datamodelChat import ActionResult, ActionDocument
logger = logging.getLogger(__name__)
async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
operationId = None
try:
prompt = parameters.get("prompt")
if not prompt:
return ActionResult.isFailure(error="Research prompt is required")
# Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"web_research_{workflowId}_{int(time.time())}"
@ -78,9 +79,10 @@ async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
"researchDepth": parameters.get("researchDepth", "general"),
"resultFormat": "json"
}
documentData = json.dumps(result, ensure_ascii=False) if isinstance(result, dict) else result
actionDocument = ActionDocument(
documentName=meaningfulName,
documentData=result,
documentData=documentData,
mimeType="application/json",
validationMetadata=validationMetadata
)
@ -90,8 +92,9 @@ async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in web research: {str(e)}")
try:
self.services.chat.progressLogFinish(operationId, False)
except:
if operationId:
self.services.chat.progressLogFinish(operationId, False)
except Exception:
pass
return ActionResult.isFailure(error=str(e))

View file

@ -1,11 +1,10 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
from typing import Dict, List, Optional, Any, Literal
from typing import Dict, List, Optional, Any
from datetime import datetime, UTC
import logging
from functools import wraps
import inspect
from modules.datamodels.datamodelWorkflowActions import WorkflowActionDefinition, WorkflowActionParameter
from modules.datamodels.datamodelRbac import AccessRuleContext
@ -258,9 +257,13 @@ class MethodBase:
raise ValueError(f"Expected dict for type '{expectedType}', got {type(value).__name__}")
return value
# Handle simple types
# Handle simple types (bool must be checked before int since bool is subclass of int)
if expectedType in typeMap:
expectedTypeClass = typeMap[expectedType]
if expectedType == 'int' and isinstance(value, bool):
raise ValueError(f"Expected int, got bool: {value}")
if expectedType == 'bool' and isinstance(value, int) and not isinstance(value, bool):
return bool(value)
if not isinstance(value, expectedTypeClass):
try:
return expectedTypeClass(value)
@ -290,10 +293,11 @@ class MethodBase:
def getActionSignature(self, actionName: str) -> str:
"""Get formatted action signature for AI prompt generation (detailed version)"""
if actionName not in self.actions:
allActions = self.actions
if actionName not in allActions:
return ""
action = self.actions[actionName]
action = allActions[actionName]
paramList = []
# Extract detailed parameter information from docstring

View file

@ -89,14 +89,26 @@ async def queryDatabase(self, parameters: Dict[str, Any]) -> ActionResult:
# Update progress
self.services.chat.progressLogUpdate(operationId, 0.3, "Validating query")
# Validate: only SELECT queries allowed
sqlNormalized = sqlQuery.strip().upper()
if not sqlNormalized.startswith("SELECT"):
return ActionResult.isFailure(error="Only SELECT queries are allowed")
forbiddenKeywords = ["INSERT", "UPDATE", "DELETE", "DROP", "ALTER", "CREATE", "TRUNCATE", "EXEC", "EXECUTE"]
for kw in forbiddenKeywords:
if f" {kw} " in f" {sqlNormalized} " or sqlNormalized.startswith(f"{kw} "):
return ActionResult.isFailure(error=f"Forbidden SQL keyword detected: {kw}")
# Initialize connector
connector = PreprocessorConnector()
# Update progress
self.services.chat.progressLogUpdate(operationId, 0.5, "Executing query")
# Execute query
result = await connector.executeQuery(sqlQuery)
try:
result = await connector.executeQuery(sqlQuery)
except Exception:
await connector.close()
raise
# Update progress
self.services.chat.progressLogUpdate(operationId, 0.8, "Formatting results")
@ -134,10 +146,9 @@ async def queryDatabase(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error executing database query: {str(e)}")
# Complete progress tracking with failure
try:
self.services.chat.progressLogFinish(operationId, False)
except:
except Exception:
pass
return ActionResult.isFailure(

View file

@ -11,8 +11,8 @@ from modules.datamodels.datamodelExtraction import ExtractionOptions, MergeStrat
logger = logging.getLogger(__name__)
async def extractContent(self, parameters: Dict[str, Any]) -> ActionResult:
operationId = None
try:
# Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"context_extract_{workflowId}_{int(time.time())}"
@ -208,11 +208,11 @@ async def extractContent(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in content extraction: {str(e)}")
# Complete progress tracking with failure
try:
self.services.chat.progressLogFinish(operationId, False)
except:
pass # Don't fail on progress logging errors
if operationId:
self.services.chat.progressLogFinish(operationId, False)
except Exception:
pass
return ActionResult.isFailure(error=str(e))

View file

@ -22,14 +22,13 @@ async def getDocumentIndex(self, parameters: Dict[str, Any]) -> ActionResult:
documentsIndex = self.services.chat.getAvailableDocuments(workflow)
if not documentsIndex or documentsIndex == "No documents available" or documentsIndex == "NO DOCUMENTS AVAILABLE - This workflow has no documents to process.":
# Return empty index structure
indexData = {
"workflowId": getattr(workflow, 'id', 'unknown'),
"totalDocuments": 0,
"rounds": [],
"documentReferences": []
}
if resultType == "json":
indexData = {
"workflowId": getattr(workflow, 'id', 'unknown'),
"totalDocuments": 0,
"rounds": [],
"documentReferences": []
}
indexContent = json.dumps(indexData, indent=2, ensure_ascii=False)
else:
indexContent = "Document Index\n==============\n\nNo documents available in this workflow.\n"
@ -64,7 +63,7 @@ async def getDocumentIndex(self, parameters: Dict[str, Any]) -> ActionResult:
document = ActionDocument(
documentName=filename,
documentData=indexContent,
mimeType="application/json" if resultType == "json" else "text/plain",
mimeType="application/json" if resultType == "json" else ("text/markdown" if resultType == "md" else "text/plain"),
validationMetadata=validationMetadata
)

View file

@ -11,8 +11,8 @@ from modules.datamodels.datamodelExtraction import ContentExtracted, ContentPart
logger = logging.getLogger(__name__)
async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult:
operationId = None
try:
# Init progress logger
workflowId = self.services.workflow.id if self.services.workflow else f"no-workflow-{int(time.time())}"
operationId = f"context_neutralize_{workflowId}_{int(time.time())}"
@ -228,10 +228,10 @@ async def neutralizeData(self, parameters: Dict[str, Any]) -> ActionResult:
except Exception as e:
logger.error(f"Error in data neutralization: {str(e)}")
# Complete progress tracking with failure
try:
self.services.chat.progressLogFinish(operationId, False)
except:
pass # Don't fail on progress logging errors
if operationId:
self.services.chat.progressLogFinish(operationId, False)
except Exception:
pass
return ActionResult.isFailure(error=str(e))

View file

@ -29,7 +29,7 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
connectionReference = parameters.get("connectionReference")
folder = parameters.get("folder", "Inbox")
limit = parameters.get("limit", 10)
limit = parameters.get("limit", 1000)
filter = parameters.get("filter")
outputMimeType = parameters.get("outputMimeType", "application/json")
@ -110,7 +110,6 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
if response.status_code != 200:
logger.error(f"Graph API error: {response.status_code} - {response.text}")
logger.error(f"Request URL: {response.url}")
logger.error(f"Request headers: {headers}")
logger.error(f"Request params: {params}")
response.raise_for_status()
@ -217,8 +216,8 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
if operationId:
try:
self.services.chat.progressLogFinish(operationId, False)
except:
pass # Don't fail on progress logging errors
except Exception:
pass
return ActionResult.isFailure(
error=str(e)
)

View file

@ -93,7 +93,7 @@ async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
try:
error_data = response.json()
logger.error(f"Microsoft Graph API error: {response.status_code} - {error_data}")
except:
except Exception:
logger.error(f"Microsoft Graph API error: {response.status_code} - {response.text}")
# Check for specific error types and provide helpful messages
@ -111,8 +111,6 @@ async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
raise Exception(f"Microsoft Graph API returned {response.status_code}: {response.text}")
response.raise_for_status()
search_data = response.json()
emails = search_data.get("value", [])

View file

@ -293,8 +293,18 @@ async def sendDraftEmail(self, parameters: Dict[str, Any]) -> ActionResult:
except ImportError:
logger.error("requests module not available")
if operationId:
try:
self.services.chat.progressLogFinish(operationId, False)
except Exception:
pass
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error in sendDraftEmail: {str(e)}")
if operationId:
try:
self.services.chat.progressLogFinish(operationId, False)
except Exception:
pass
return ActionResult.isFailure(error=str(e))

View file

@ -40,25 +40,21 @@ class ConnectionHelper:
logger.debug(f"Found connection: {userConnection.id}, status: {userConnection.status.value}, authority: {userConnection.authority.value}")
# Get a fresh token for this connection
token = self.services.chat.getFreshConnectionToken(userConnection.id)
if not token:
logger.error(f"Fresh token not found for connection: {userConnection.id}")
logger.debug(f"Connection details: {userConnection}")
return None
logger.debug(f"Fresh token retrieved for connection {userConnection.id}")
# Check if connection is active
# Check status BEFORE fetching token (avoids unnecessary network call)
if userConnection.status.value != "active":
logger.error(f"Connection is not active: {userConnection.id}, status: {userConnection.status.value}")
return None
token = self.services.chat.getFreshConnectionToken(userConnection.id)
if not token:
logger.error(f"Fresh token not found for connection: {userConnection.id}")
return None
logger.debug(f"Fresh token retrieved for connection {userConnection.id}")
return {
"id": userConnection.id,
"accessToken": token.tokenAccess,
"refreshToken": token.tokenRefresh,
"scopes": ["Mail.ReadWrite", "Mail.Send", "Mail.ReadWrite.Shared", "User.Read"] # Valid Microsoft Graph API scopes
}
except Exception as e:
logger.error(f"Error getting Microsoft connection: {str(e)}")

View file

@ -57,10 +57,10 @@ class EmailProcessingHelper:
# This is an advanced search query, return as-is
return clean_query
# For basic text search, ensure it's safe for contains() filter
# Remove any characters that might break the OData filter syntax
# Remove or escape characters that could break OData filter syntax
safe_query = re.sub(r'[\\\'"]', '', clean_query)
# Escape single quotes for OData safety (double them)
safe_query = clean_query.replace("'", "''")
# Remove backslashes and double quotes
safe_query = re.sub(r'[\\"]', '', safe_query)
return safe_query
@ -173,12 +173,14 @@ class EmailProcessingHelper:
# Handle email address filters (only if it's NOT a search query)
if '@' in filter_text and '.' in filter_text and ' ' not in filter_text and not filter_text.startswith('from:'):
return {"$filter": f"from/fromAddress/address eq '{filter_text}'"}
safeEmail = filter_text.replace("'", "''")
return {"$filter": f"from/fromAddress/address eq '{safeEmail}'"}
# Handle OData filter conditions (contains 'eq', 'ne', 'gt', 'lt', etc.)
if any(op in filter_text.lower() for op in [' eq ', ' ne ', ' gt ', ' lt ', ' ge ', ' le ', ' and ', ' or ']):
return {"$filter": filter_text}
# Handle text content - search in subject
return {"$filter": f"contains(subject,'{filter_text}')"}
# Handle text content - search in subject (escape single quotes)
safeText = filter_text.replace("'", "''")
return {"$filter": f"contains(subject,'{safeText}')"}

View file

@ -240,11 +240,12 @@ async def uploadDocument(self, parameters: Dict[str, Any]) -> ActionResult:
}
successfulUploads = len([r for r in uploadResults if r.get("uploadStatus") == "success"])
overallSuccess = successfulUploads > 0
self.services.chat.progressLogUpdate(operationId, 0.9, f"Uploaded {successfulUploads}/{len(uploadResults)} file(s)")
self.services.chat.progressLogFinish(operationId, successfulUploads > 0)
self.services.chat.progressLogFinish(operationId, overallSuccess)
return ActionResult(
success=True,
success=overallSuccess,
documents=[
ActionDocument(
documentName=self._generateMeaningfulFileName("sharepoint_upload", "json", None, "uploadDocument"),
@ -260,7 +261,7 @@ async def uploadDocument(self, parameters: Dict[str, Any]) -> ActionResult:
if operationId:
try:
self.services.chat.progressLogFinish(operationId, False)
except:
except Exception:
pass
return ActionResult(
success=False,

View file

@ -17,14 +17,20 @@ class ApiClientHelper:
"""Helper for Microsoft Graph API calls"""
def __init__(self, methodInstance):
"""
Initialize API client helper.
Args:
methodInstance: Instance of MethodSharepoint (for access to services)
"""
self.method = methodInstance
self.services = methodInstance.services
self._session: aiohttp.ClientSession = None
async def _getSession(self) -> aiohttp.ClientSession:
if self._session is None or self._session.closed:
timeout = aiohttp.ClientTimeout(total=30)
self._session = aiohttp.ClientSession(timeout=timeout)
return self._session
async def close(self):
if self._session and not self._session.closed:
await self._session.close()
self._session = None
async def makeGraphApiCall(self, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
"""
@ -50,60 +56,28 @@ class ApiClientHelper:
url = f"https://graph.microsoft.com/v1.0/{endpoint}"
logger.info(f"Making Graph API call: {method} {url}")
# Set timeout to 30 seconds
timeout = aiohttp.ClientTimeout(total=30)
session = await self._getSession()
async with aiohttp.ClientSession(timeout=timeout) as session:
if method == "GET":
logger.debug(f"Starting GET request to {url}")
async with session.get(url, headers=headers) as response:
logger.info(f"Graph API response: {response.status}")
if response.status == 200:
result = await response.json()
logger.debug(f"Graph API success: {len(str(result))} characters response")
return result
else:
errorText = await response.text()
logger.error(f"Graph API call failed: {response.status} - {errorText}")
return {"error": f"API call failed: {response.status} - {errorText}"}
elif method == "PUT":
logger.debug(f"Starting PUT request to {url}")
async with session.put(url, headers=headers, data=data) as response:
logger.info(f"Graph API response: {response.status}")
if response.status in [200, 201]:
result = await response.json()
logger.debug(f"Graph API success: {len(str(result))} characters response")
return result
else:
errorText = await response.text()
logger.error(f"Graph API call failed: {response.status} - {errorText}")
return {"error": f"API call failed: {response.status} - {errorText}"}
elif method == "POST":
logger.debug(f"Starting POST request to {url}")
async with session.post(url, headers=headers, data=data) as response:
logger.info(f"Graph API response: {response.status}")
if response.status in [200, 201]:
result = await response.json()
logger.debug(f"Graph API success: {len(str(result))} characters response")
return result
else:
errorText = await response.text()
logger.error(f"Graph API call failed: {response.status} - {errorText}")
return {"error": f"API call failed: {response.status} - {errorText}"}
elif method == "DELETE":
logger.debug(f"Starting DELETE request to {url}")
async with session.delete(url, headers=headers) as response:
logger.info(f"Graph API response: {response.status}")
if response.status in [200, 204]:
logger.debug(f"Graph API DELETE success")
return {"success": True}
else:
errorText = await response.text()
logger.error(f"Graph API call failed: {response.status} - {errorText}")
return {"error": f"API call failed: {response.status} - {errorText}"}
successCodes = {"GET": [200], "PUT": [200, 201], "POST": [200, 201], "DELETE": [200, 204]}
httpMethod = getattr(session, method.lower(), None)
if not httpMethod:
return {"error": f"Unsupported HTTP method: {method}"}
kwargs = {"headers": headers}
if data is not None:
kwargs["data"] = data
async with httpMethod(url, **kwargs) as response:
logger.info(f"Graph API response: {response.status}")
if response.status in successCodes.get(method, [200]):
if method == "DELETE":
return {"success": True}
result = await response.json()
return result
else:
errorText = await response.text()
logger.error(f"Graph API call failed: {response.status} - {errorText}")
return {"error": f"API call failed: {response.status} - {errorText}"}
except asyncio.TimeoutError:
logger.error(f"Graph API call timed out after 30 seconds: {endpoint}")

View file

@ -14,11 +14,19 @@ class AdaptiveLearningEngine:
"""Enhanced learning engine that tracks validation patterns and adapts prompts"""
def __init__(self):
self.validationHistory = [] # Store validation results with context
self.failurePatterns = defaultdict(list) # Track failure patterns by action type
self.successPatterns = defaultdict(list) # Track success patterns
self.actionAttempts = defaultdict(int) # Track attempt counts per action
self.learningInsights = {} # Store learned insights per workflow
self.validationHistory = []
self.failurePatterns = defaultdict(list)
self.successPatterns = defaultdict(list)
self.actionAttempts = defaultdict(int)
self.learningInsights = {}
def reset(self):
"""Reset all learned state for a new workflow session."""
self.validationHistory.clear()
self.failurePatterns.clear()
self.successPatterns.clear()
self.actionAttempts.clear()
self.learningInsights.clear()
def recordValidationResult(self, validationResult: Dict[str, Any], actionContext: Dict[str, Any],
workflowId: str, attemptNumber: int):
@ -195,15 +203,6 @@ class AdaptiveLearningEngine:
for issue, count in list(commonIssues.items())[:3]: # Top 3 issues
guidance_parts.append(f"- {issue} (occurred {count} times)")
# Add specific action guidance based on user prompt
if "email" in userPrompt.lower() and "outlook" in userPrompt.lower():
if any("account" in str(issue).lower() for issue in commonIssues.keys()):
guidance_parts.append("SPECIFIC GUIDANCE: Ensure email is sent from the correct account (valueon).")
if any("attachment" in str(issue).lower() for issue in commonIssues.keys()):
guidance_parts.append("SPECIFIC GUIDANCE: Verify PDF attachment is properly included.")
if any("summary" in str(issue).lower() for issue in commonIssues.keys()):
guidance_parts.append("SPECIFIC GUIDANCE: Include German summary in email body.")
return "\n".join(guidance_parts) if guidance_parts else "No specific guidance available."
def _generateParameterGuidance(self, actionName: str, parametersContext: str,
@ -219,12 +218,11 @@ class AdaptiveLearningEngine:
if attemptNumber and attemptNumber >= 3:
guidanceParts.append(f"Attempt #{attemptNumber}: Adjust parameters based on validation feedback.")
# Generic issues summary
commonIssues = failureAnalysis.get('commonIssues', {}) or {}
if commonIssues:
guidanceParts.append("Address the following parameter issues:")
for issueKey, issueDesc in commonIssues.items():
guidanceParts.append(f"- {issueKey}: {issueDesc}")
for issueText, count in commonIssues.items():
guidanceParts.append(f"- {issueText} (occurred {count} time{'s' if count != 1 else ''})")
# Keep guidance format stable
return "\n".join(guidanceParts) if guidanceParts else "Use standard parameter values."

View file

@ -273,16 +273,15 @@ class ContentValidator:
elif section.get("content_type") in ["paragraph", "heading"]:
if elements and isinstance(elements, list) and len(elements) > 0:
textElement = elements[0]
# Ensure textElement is a dictionary before accessing
if isinstance(textElement, dict):
content = textElement.get("content", {})
if isinstance(content, dict):
text = content.get("text", "")
else:
text = textElement.get("text", "")
if text:
sectionSummary["textLength"] = len(text)
sectionSummary["wordCount"] = len(text.split())
if isinstance(content, dict):
text = content.get("text", "")
else:
text = textElement.get("text", "")
if text:
sectionSummary["textLength"] = len(text)
sectionSummary["wordCount"] = len(text.split())
if section.get("textLength"):
sectionSummary["textLength"] = section.get("textLength")
@ -290,59 +289,47 @@ class ContentValidator:
elif section.get("content_type") == "code_block":
if elements and isinstance(elements, list) and len(elements) > 0:
codeElement = elements[0]
content = codeElement.get("content", {})
if isinstance(content, dict):
code = content.get("code", "")
language = content.get("language", "")
if code:
sectionSummary["codeLength"] = len(code)
sectionSummary["codeLineCount"] = code.count('\n') + 1
if language:
sectionSummary["language"] = language
if isinstance(codeElement, dict):
content = codeElement.get("content", {})
if isinstance(content, dict):
code = content.get("code", "")
language = content.get("language", "")
if code:
sectionSummary["codeLength"] = len(code)
sectionSummary["codeLineCount"] = code.count('\n') + 1
if language:
sectionSummary["language"] = language
# Wenn contentPartIds vorhanden sind, aber keine elements: Füge ContentParts-Metadaten hinzu
contentPartIds = section.get("contentPartIds", [])
if contentPartIds and not elements:
# Prüfe ob contentPartsMetadata vorhanden ist
contentPartsMetadata = section.get("contentPartsMetadata", [])
if contentPartsMetadata:
sectionSummary["contentPartsMetadata"] = contentPartsMetadata
else:
# Fallback: Zeige nur IDs wenn Metadaten nicht verfügbar
sectionSummary["contentPartIds"] = contentPartIds
sectionSummary["note"] = "ContentParts referenced but metadata not available"
# Include any additional fields from section (generic approach)
# BUT exclude type-specific KPIs that don't belong to this content_type
# AND exclude internal planning fields that confuse validation
contentType = section.get("content_type", "")
# Define KPIs that are ONLY valid for specific types
typeExclusiveKpis = {
"table": ["columnCount", "rowCount", "headers"], # Only for tables
"bullet_list": ["itemCount"], # Only for bullet_list
"list": ["itemCount"] # Only for list
"table": ["columnCount", "rowCount", "headers"],
"bullet_list": ["itemCount"],
"list": ["itemCount"]
}
excludedKpis = []
for kpiType, kpiFields in typeExclusiveKpis.items():
if kpiType != contentType:
excludedKpis.extend(kpiFields)
# Internal planning fields that should NOT be shown to validation AI
# These are implementation details, not content indicators
internalFields = ["generationHint", "useAiCall", "elements"]
for key, value in section.items():
if key not in sectionSummary and key not in internalFields and key not in excludedKpis:
# Don't copy type-specific KPIs if they're 0/empty and we didn't extract them ourselves
# This prevents copying columnCount: 0, rowCount: 0, headers: [] from structure generation phase
if key in ["columnCount", "rowCount", "headers", "itemCount"]:
# Skip if it's 0/empty - we'll only include KPIs we extracted from elements
if isinstance(value, int) and value == 0:
continue
if isinstance(value, list) and len(value) == 0:
continue
# Include simple types (str, int, float, bool, list of primitives)
if isinstance(value, (str, int, float, bool)) or (isinstance(value, list) and len(value) <= 10):
sectionSummary[key] = value
@ -486,7 +473,7 @@ class ContentValidator:
try:
json_str = json.dumps(data)
size_bytes = len(json_str.encode('utf-8'))
except:
except (TypeError, ValueError):
size_bytes = len(str(data).encode('utf-8'))
else:
size_bytes = len(str(data).encode('utf-8'))

View file

@ -16,6 +16,11 @@ class LearningEngine:
self.strategies = {}
self.feedbackHistory = []
def reset(self):
"""Reset all learned state for a new workflow session."""
self.strategies.clear()
self.feedbackHistory.clear()
def learnFromFeedback(self, feedback: Dict[str, Any], context: Any, taskIntent: Dict[str, Any]):
"""Learns from feedback and updates strategies - works on TASK level, not workflow level"""
try:

View file

@ -136,6 +136,7 @@ class ActionExecutor:
# Execute action and track success for progress log
result = None
actionSuccess = False
actionError = None
try:
result = await self.executeAction(
methodName=action.execMethod,
@ -144,23 +145,23 @@ class ActionExecutor:
)
actionSuccess = result.success if result else False
except Exception as e:
logger.error(f"Error executing action: {str(e)}")
logger.error(f"Error executing action {action.execMethod}.{action.execAction}: {str(e)}")
actionSuccess = False
actionError = str(e)
finally:
# Finish action progress tracking
try:
self.services.chat.progressLogFinish(actionOperationId, actionSuccess)
except Exception as e:
logger.error(f"Error finishing action progress log: {str(e)}")
# If action execution failed, return error result
if result is None:
action.setError("Action execution failed")
errorMsg = actionError or "Action execution failed"
action.setError(errorMsg)
return ActionResult(
success=False,
documents=[],
resultLabel=action.execResultLabel,
error="Action execution failed"
error=errorMsg
)
resultLabel = action.execResultLabel

View file

@ -319,56 +319,27 @@ class MessageCreator:
except Exception as e:
logger.error(f"Error creating error message: {str(e)}")
def _extractRoundNumberFromLabel(self, label: str) -> int:
"""Extract round number from a document label like 'round1_task1_action1_diagram_analysis'"""
def _extractNumberFromLabelPart(self, label: str, prefix: str) -> int:
"""Extract number following a prefix in a label like 'round1_task1_action1_context'.
Works for prefix='round', 'task', 'action'. Returns 0 on failure.
"""
try:
if not label or not isinstance(label, str):
return 0
# Parse label format: round{round}_task{task}_action{action}_{context}
if label.startswith('round'):
roundPart = label.split('_')[0] # Get 'round1' part
if roundPart.startswith('round'):
roundNumber = roundPart[5:] # Remove 'round' prefix
return int(roundNumber)
return 0
import re
pattern = rf'{prefix}(\d+)'
match = re.search(pattern, label)
return int(match.group(1)) if match else 0
except Exception as e:
logger.warning(f"Could not extract round number from label '{label}': {str(e)}")
logger.warning(f"Could not extract {prefix} number from label '{label}': {str(e)}")
return 0
def _extractRoundNumberFromLabel(self, label: str) -> int:
return self._extractNumberFromLabelPart(label, 'round')
def _extractTaskNumberFromLabel(self, label: str) -> int:
"""Extract task number from a document label like 'round1_task1_action1_diagram_analysis'"""
try:
if not label or not isinstance(label, str):
return 0
# Parse label format: round{round}_task{task}_action{action}_{context}
if '_task' in label:
taskPart = label.split('_task')[1]
if taskPart and '_' in taskPart:
taskNumber = taskPart.split('_')[0]
return int(taskNumber)
return 0
except Exception as e:
logger.warning(f"Could not extract task number from label '{label}': {str(e)}")
return 0
return self._extractNumberFromLabelPart(label, 'task')
def _extractActionNumberFromLabel(self, label: str) -> int:
"""Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
try:
if not label or not isinstance(label, str):
return 0
# Parse label format: round{round}_task{task}_action{action}_{context}
if '_action' in label:
actionPart = label.split('_action')[1]
if actionPart and '_' in actionPart:
actionNumber = actionPart.split('_')[0]
return int(actionNumber)
return 0
except Exception as e:
logger.warning(f"Could not extract action number from label '{label}': {str(e)}")
return 0
return self._extractNumberFromLabelPart(label, 'action')

View file

@ -7,7 +7,6 @@ import json
import logging
from typing import Dict, Any
from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskPlan, WorkflowModeEnum
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, ProcessingModeEnum, PriorityEnum
from modules.workflows.processing.shared.promptGenerationTaskplan import (
generateTaskPlanningPrompt
)
@ -107,17 +106,6 @@ class TaskPlanner:
taskPlanningPromptTemplate = bundle.prompt
placeholders = bundle.placeholders
# Centralized AI call: Task planning (quality, detailed) with placeholders
options = AiCallOptions(
operationType=OperationTypeEnum.PLAN,
priority=PriorityEnum.QUALITY,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=30
)
prompt = await self.services.ai.callAiPlanning(
prompt=taskPlanningPromptTemplate,
placeholders=placeholders,
@ -141,9 +129,11 @@ class TaskPlanner:
raise ValueError("Task plan missing 'tasks' field")
except Exception as e:
logger.error(f"Error parsing task plan response: {str(e)}")
taskPlanDict = {'tasks': []}
raise ValueError(f"Failed to parse AI task plan response: {str(e)}") from e
if not self._validateTaskPlan(taskPlanDict):
from modules.workflows.processing.core.validator import WorkflowValidator
validator = WorkflowValidator(self.services)
if not validator.validateTask(taskPlanDict):
logger.error("Generated task plan failed validation")
logger.error(f"AI Response: {prompt}")
logger.error(f"Parsed Task Plan: {json.dumps(taskPlanDict, indent=2)}")
@ -207,61 +197,4 @@ class TaskPlanner:
logger.error(f"Error in generateTaskPlan: {str(e)}")
raise
def _validateTaskPlan(self, taskPlan: Dict[str, Any]) -> bool:
"""Validate task plan structure"""
try:
if not isinstance(taskPlan, dict):
logger.error("Task plan is not a dictionary")
return False
if 'tasks' not in taskPlan or not isinstance(taskPlan['tasks'], list):
logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(taskPlan.get('tasks', 'MISSING'))}")
return False
# First pass: collect all task IDs to validate dependencies
taskIds = set()
for task in taskPlan['tasks']:
if not isinstance(task, dict):
logger.error(f"Task is not a dictionary: {type(task)}")
return False
if 'id' not in task:
logger.error(f"Task missing 'id' field: {task}")
return False
taskIds.add(task['id'])
# Second pass: validate each task
for i, task in enumerate(taskPlan['tasks']):
if not isinstance(task, dict):
logger.error(f"Task {i} is not a dictionary: {type(task)}")
return False
requiredFields = ['id', 'objective', 'successCriteria']
missingFields = [field for field in requiredFields if field not in task]
if missingFields:
logger.error(f"Task {i} missing required fields: {missingFields}")
return False
# Check for duplicate IDs (shouldn't happen after first pass, but safety check)
if task['id'] in taskIds and list(taskPlan['tasks']).count(task['id']) > 1:
logger.error(f"Task {i} has duplicate ID: {task['id']}")
return False
dependencies = task.get('dependencies', [])
if not isinstance(dependencies, list):
logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
return False
for dep in dependencies:
if dep not in taskIds and dep != 'task_0':
logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(taskIds) + ['task_0']})")
return False
logger.info(f"Task plan validation successful with {len(taskIds)} tasks")
return True
except Exception as e:
logger.error(f"Error validating task plan: {str(e)}")
return False

View file

@ -25,40 +25,35 @@ class WorkflowValidator:
logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(taskPlan.get('tasks', 'MISSING'))}")
return False
# First pass: collect all task IDs to validate dependencies
# Single pass: collect IDs (detect duplicates) and validate each task
taskIds = set()
for task in taskPlan['tasks']:
if not isinstance(task, dict):
logger.error(f"Task is not a dictionary: {type(task)}")
return False
if 'id' not in task:
logger.error(f"Task missing 'id' field: {task}")
return False
taskIds.add(task['id'])
# Second pass: validate each task
for i, task in enumerate(taskPlan['tasks']):
if not isinstance(task, dict):
logger.error(f"Task {i} is not a dictionary: {type(task)}")
return False
if 'id' not in task:
logger.error(f"Task {i} missing 'id' field: {task}")
return False
if task['id'] in taskIds:
logger.error(f"Task {i} has duplicate ID: {task['id']}")
return False
taskIds.add(task['id'])
requiredFields = ['id', 'objective', 'successCriteria']
missingFields = [field for field in requiredFields if field not in task]
if missingFields:
logger.error(f"Task {i} missing required fields: {missingFields}")
return False
# Check for duplicate IDs (shouldn't happen after first pass, but safety check)
if task['id'] in taskIds and list(taskPlan['tasks']).count(task['id']) > 1:
logger.error(f"Task {i} has duplicate ID: {task['id']}")
return False
dependencies = task.get('dependencies', [])
if not isinstance(dependencies, list):
logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
return False
for dep in dependencies:
# Second pass: validate dependencies (all IDs now known)
for i, task in enumerate(taskPlan['tasks']):
for dep in task.get('dependencies', []):
if dep not in taskIds and dep != 'task_0':
logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(taskIds) + ['task_0']})")
return False
@ -93,7 +88,7 @@ class WorkflowValidator:
missingFields = []
for field in requiredFields:
if field not in action or not action[field]:
if field not in action or action[field] is None:
missingFields.append(field)
if missingFields:
logger.error(f"Action {i} missing required fields: {missingFields}")

View file

@ -36,6 +36,9 @@ class AutomationMode(BaseMode):
- Or as direct JSON in userInput
"""
try:
# Reset action map to prevent state leaks from previous runs
self.taskActionMap = {}
# AUTOMATION mode ALWAYS requires a JSON plan to be provided in userInput
# Try to extract plan from userInput (embedded JSON or direct JSON)
templatePlan = None
@ -340,78 +343,6 @@ class AutomationMode(BaseMode):
error=str(e)
)
def _createActionItem(self, actionData: Dict[str, Any]) -> Optional[ActionItem]:
"""Create ActionItem from action data"""
try:
import uuid
from datetime import datetime, timezone
# Ensure ID is present
if "id" not in actionData or not actionData["id"]:
actionData["id"] = f"action_{uuid.uuid4()}"
# Ensure required fields
if "status" not in actionData:
actionData["status"] = TaskStatus.PENDING
if "execMethod" not in actionData:
logger.error("execMethod is required for task action")
return None
if "execAction" not in actionData:
logger.error("execAction is required for task action")
return None
if "execParameters" not in actionData:
actionData["execParameters"] = {}
# Use generic field separation based on ActionItem model
simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
# Create action in database
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
# Convert to ActionItem model
return ActionItem(
id=createdAction["id"],
execMethod=createdAction["execMethod"],
execAction=createdAction["execAction"],
execParameters=createdAction.get("execParameters", {}),
execResultLabel=createdAction.get("execResultLabel"),
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
status=createdAction.get("status", TaskStatus.PENDING),
error=createdAction.get("error"),
retryCount=createdAction.get("retryCount", 0),
retryMax=createdAction.get("retryMax", 3),
processingTime=createdAction.get("processingTime"),
timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
result=createdAction.get("result"),
userMessage=createdAction.get("userMessage")
)
except Exception as e:
logger.error(f"Error creating task action: {str(e)}")
return None
def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
"""Update workflow object before executing a task"""
try:
workflow = self.services.workflow
updateData = {
"currentTask": taskNumber,
"currentAction": 0,
"totalActions": 0
}
workflow.currentTask = taskNumber
workflow.currentAction = 0
workflow.totalActions = 0
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}")
except Exception as e:
logger.error(f"Error updating workflow before executing task: {str(e)}")
def _updateWorkflowAfterActionPlanning(self, totalActions: int):
"""Update workflow object after action planning"""
try:
@ -423,17 +354,6 @@ class AutomationMode(BaseMode):
except Exception as e:
logger.error(f"Error updating workflow after action planning: {str(e)}")
def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
"""Update workflow object before executing an action"""
try:
workflow = self.services.workflow
updateData = {"currentAction": actionNumber}
workflow.currentAction = actionNumber
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}")
except Exception as e:
logger.error(f"Error updating workflow before executing action: {str(e)}")
def _setWorkflowTotals(self, totalTasks: int = None, totalActions: int = None):
"""Set total counts for workflow progress tracking"""
try:

View file

@ -4,14 +4,16 @@
# Abstract base class for workflow modes
from abc import ABC, abstractmethod
import uuid
import logging
from typing import List, Dict, Any
from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskResult, ActionItem
from typing import List, Dict, Any, Optional
from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.workflows.processing.core.taskPlanner import TaskPlanner
from modules.workflows.processing.core.actionExecutor import ActionExecutor
from modules.workflows.processing.core.messageCreator import MessageCreator
from modules.workflows.processing.core.validator import WorkflowValidator
from modules.shared.timeUtils import parseTimestamp
logger = logging.getLogger(__name__)
@ -44,3 +46,75 @@ class BaseMode(ABC):
async def createTaskPlanMessage(self, taskPlan, workflow: ChatWorkflow):
"""Create task plan message - common to all modes"""
return await self.messageCreator.createTaskPlanMessage(taskPlan, workflow)
def _createActionItem(self, actionData: Dict[str, Any]) -> Optional[ActionItem]:
"""Create an ActionItem from action data, persist to DB, and return the model instance"""
try:
if "id" not in actionData or not actionData["id"]:
actionData["id"] = f"action_{uuid.uuid4()}"
if "status" not in actionData:
actionData["status"] = TaskStatus.PENDING
if "execMethod" not in actionData:
logger.error("execMethod is required for task action")
return None
if "execAction" not in actionData:
logger.error("execAction is required for task action")
return None
if "execParameters" not in actionData:
actionData["execParameters"] = {}
simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
return ActionItem(
id=createdAction["id"],
execMethod=createdAction["execMethod"],
execAction=createdAction["execAction"],
execParameters=createdAction.get("execParameters", {}),
execResultLabel=createdAction.get("execResultLabel"),
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
status=createdAction.get("status", TaskStatus.PENDING),
error=createdAction.get("error"),
retryCount=createdAction.get("retryCount", 0),
retryMax=createdAction.get("retryMax", 3),
processingTime=createdAction.get("processingTime"),
timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
result=createdAction.get("result"),
userMessage=createdAction.get("userMessage")
)
except Exception as e:
logger.error(f"Error creating task action: {str(e)}")
return None
def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
"""Update workflow state before executing a task"""
try:
workflow = self.services.workflow
updateData = {
"currentTask": taskNumber,
"currentAction": 0,
"totalActions": 0
}
workflow.currentTask = taskNumber
workflow.currentAction = 0
workflow.totalActions = 0
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}")
except Exception as e:
logger.error(f"Error updating workflow before executing task: {str(e)}")
def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
"""Update workflow state before executing an action"""
try:
workflow = self.services.workflow
updateData = {"currentAction": actionNumber}
workflow.currentAction = actionNumber
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}")
except Exception as e:
logger.error(f"Error updating workflow before executing action: {str(e)}")

View file

@ -116,6 +116,7 @@ class DynamicMode(BaseMode):
step = 1
decision = None
lastStepFailed = False
while step <= state.max_steps:
checkWorkflowStopped(self.services)
@ -282,6 +283,7 @@ class DynamicMode(BaseMode):
except Exception as e:
logger.error(f"Dynamic step {step} error: {e}")
lastStepFailed = True
break
# NEW: Use adaptive stopping logic
@ -296,19 +298,24 @@ class DynamicMode(BaseMode):
step += 1
# Summarize task result for dynamic mode
status = TaskStatus.COMPLETED
success = True
# Get feedback from last decision if available
lastDecision = context.previousReviewResult[-1] if hasattr(context, 'previousReviewResult') and context.previousReviewResult else None
feedback = lastDecision.reason if lastDecision and isinstance(lastDecision, ReviewResult) else 'Completed'
if lastDecision and isinstance(lastDecision, ReviewResult) and lastDecision.status == 'success':
if lastStepFailed:
status = TaskStatus.FAILED
success = False
elif lastDecision and isinstance(lastDecision, ReviewResult) and lastDecision.status in ('stop', 'failed'):
status = TaskStatus.FAILED
success = False
else:
status = TaskStatus.COMPLETED
success = True
# Create proper ReviewResult for completion message
completionReviewResult = ReviewResult(
status='success',
status='success' if success else 'failed',
reason=feedback,
qualityScore=lastDecision.qualityScore if lastDecision and isinstance(lastDecision, ReviewResult) else 8.0,
qualityScore=lastDecision.qualityScore if lastDecision and isinstance(lastDecision, ReviewResult) else (8.0 if success else 2.0),
metCriteria=[],
improvements=[]
)
@ -1003,12 +1010,15 @@ class DynamicMode(BaseMode):
# Detect repeated actions
actionCounts = {}
for entry in actionHistory:
# Extract action name (after first space, before next space or {)
parts = entry.split()
if len(parts) > 1:
# Skip "Step", "Refinement" prefixes and get the action name
actionName = parts[1] if parts[0] in ['Step', 'Refinement'] else parts[0]
actionCounts[actionName] = actionCounts.get(actionName, 0) + 1
# Format: "Step N: actionName ..." or "Refinement N: actionName ..."
# Extract the action name after "prefix N:"
colonIdx = entry.find(':')
if colonIdx >= 0:
afterColon = entry[colonIdx + 1:].strip().split()
actionName = afterColon[0] if afterColon else 'unknown'
else:
actionName = entry.split()[0] if entry.split() else 'unknown'
actionCounts[actionName] = actionCounts.get(actionName, 0) + 1
repeatedActions = [action for action, count in actionCounts.items() if count >= 2]
if repeatedActions:
@ -1172,150 +1182,6 @@ Return only the user-friendly message, no technical details."""
logger.error(f"Error generating action result message: {str(e)}")
return f"{method}.{actionName} action completed"
def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
"""Creates a new task action for Dynamic mode"""
try:
import uuid
# Ensure ID is present
if "id" not in actionData or not actionData["id"]:
actionData["id"] = f"action_{uuid.uuid4()}"
# Ensure required fields
if "status" not in actionData:
actionData["status"] = TaskStatus.PENDING
if "execMethod" not in actionData:
logger.error("execMethod is required for task action")
return None
if "execAction" not in actionData:
logger.error("execAction is required for task action")
return None
if "execParameters" not in actionData:
actionData["execParameters"] = {}
# Use generic field separation based on ActionItem model
simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
# Create action in database
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
# Convert to ActionItem model
return ActionItem(
id=createdAction["id"],
execMethod=createdAction["execMethod"],
execAction=createdAction["execAction"],
execParameters=createdAction.get("execParameters", {}),
execResultLabel=createdAction.get("execResultLabel"),
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
status=createdAction.get("status", TaskStatus.PENDING),
error=createdAction.get("error"),
retryCount=createdAction.get("retryCount", 0),
retryMax=createdAction.get("retryMax", 3),
processingTime=createdAction.get("processingTime"),
timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
result=createdAction.get("result"),
resultDocuments=createdAction.get("resultDocuments", []),
userMessage=createdAction.get("userMessage")
)
except Exception as e:
logger.error(f"Error creating task action: {str(e)}")
return None
def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
"""Update workflow object before executing a task"""
try:
workflow = self.services.workflow
updateData = {
"currentTask": taskNumber,
"currentAction": 0,
"totalActions": 0
}
# Update workflow object
workflow.currentTask = taskNumber
workflow.currentAction = 0
workflow.totalActions = 0
# Update in database
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing task {taskNumber}: {updateData}")
except Exception as e:
logger.error(f"Error updating workflow before executing task: {str(e)}")
def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
"""Update workflow object before executing an action"""
try:
workflow = self.services.workflow
updateData = {
"currentAction": actionNumber
}
# Update workflow object
workflow.currentAction = actionNumber
# Update in database
self.services.interfaceDbChat.updateWorkflow(workflow.id, updateData)
logger.info(f"Updated workflow {workflow.id} before executing action {actionNumber}: {updateData}")
except Exception as e:
logger.error(f"Error updating workflow before executing action: {str(e)}")
def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
"""Creates a new task action for Dynamic mode"""
try:
import uuid
# Ensure ID is present
if "id" not in actionData or not actionData["id"]:
actionData["id"] = f"action_{uuid.uuid4()}"
# Ensure required fields
if "status" not in actionData:
actionData["status"] = TaskStatus.PENDING
if "execMethod" not in actionData:
logger.error("execMethod is required for task action")
return None
if "execAction" not in actionData:
logger.error("execAction is required for task action")
return None
if "execParameters" not in actionData:
actionData["execParameters"] = {}
# Use generic field separation based on ActionItem model
simpleFields, objectFields = self.services.interfaceDbChat._separateObjectFields(ActionItem, actionData)
# Create action in database
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
# Convert to ActionItem model
return ActionItem(
id=createdAction["id"],
execMethod=createdAction["execMethod"],
execAction=createdAction["execAction"],
execParameters=createdAction.get("execParameters", {}),
execResultLabel=createdAction.get("execResultLabel"),
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
status=createdAction.get("status", TaskStatus.PENDING),
error=createdAction.get("error"),
retryCount=createdAction.get("retryCount", 0),
retryMax=createdAction.get("retryMax", 3),
processingTime=createdAction.get("processingTime"),
timestamp=parseTimestamp(createdAction.get("timestamp"), default=self.services.utils.timestampGetUtc()),
result=createdAction.get("result"),
resultDocuments=createdAction.get("resultDocuments", []),
userMessage=createdAction.get("userMessage")
)
except Exception as e:
logger.error(f"Error creating task action: {str(e)}")
return None

View file

@ -5,23 +5,22 @@
import logging
from typing import List, Optional
from modules.datamodels.datamodelChat import TaskStep, ActionResult, Observation
from modules.datamodels.datamodelChat import TaskStep, ActionResult
logger = logging.getLogger(__name__)
class TaskExecutionState:
"""Manages execution state for a task with retry logic"""
def __init__(self, task_step: TaskStep):
self.task_step = task_step
self.successful_actions: List[ActionResult] = [] # Preserved across retries
self.failed_actions: List[ActionResult] = [] # For analysis
def __init__(self, taskStep: TaskStep):
self.task_step = taskStep
self.successful_actions: List[ActionResult] = []
self.failed_actions: List[ActionResult] = []
self.current_action_index = 0
self.retry_count = 0
self.max_retries = 3
# Iterative loop (dynamic mode)
self.current_step = 0
self.max_steps = 0 # Will be overridden by workflow.maxSteps from workflowManager.py
self.max_steps = 0
def addSuccessfulAction(self, action_result: ActionResult):
"""Add a successful action to the state"""
@ -58,48 +57,25 @@ class TaskExecutionState:
patterns.append("permission_issues")
return list(set(patterns))
def shouldContinue(observation: Optional[Observation], review=None, current_step: int = 0, max_steps: int = 1) -> bool:
"""Helper to decide if the iterative loop should continue
def shouldContinue(observation=None, review=None, current_step: int = 0, max_steps: int = 1) -> bool:
"""Helper to decide if the iterative loop should continue.
Args:
observation: Observation Pydantic model with action execution results
review: ReviewResult or dict with review decision (optional)
current_step: Current step number in the iteration
max_steps: Maximum allowed steps
Returns:
bool: True if loop should continue, False if should stop
Logic:
- Stop if max steps reached
- Stop if review indicates 'stop' or success criteria are met
- Continue if observation indicates failure but allow one more step (caller caps by max_steps)
Returns False if max steps reached or review indicates 'stop'/'success'.
"""
try:
# Stop if max steps reached
if current_step >= max_steps:
logger.info(f"Stopping workflow: reached max_steps limit ({current_step} >= {max_steps})")
return False
# Check review decision (can be ReviewResult model or dict)
if review:
if hasattr(review, 'status'):
# ReviewResult Pydantic model
if review.status in ('stop', 'success'):
return False
elif isinstance(review, dict):
# Legacy dict format
decision = review.get('decision') or review.get('status')
if decision in ('stop', 'success'):
return False
# Check observation: if hard failure with no documents, allow one more step
# The caller will enforce max_steps limit
if observation:
if observation.success is False and observation.documentsCount == 0:
# Allow next step once; the caller caps by max_steps
return True
return True
except Exception as e:
logger.warning(f"Error in shouldContinue: {e}")

View file

@ -19,117 +19,57 @@ methods = {}
def discoverMethods(serviceCenter):
"""Dynamically discover all method classes and their actions in modules methods package.
CRITICAL: If methods are already discovered, updates their Services reference to ensure
they use the current workflow (self.services.workflow). This prevents stale workflow IDs
from being used when a new workflow starts.
Always creates fresh method instances bound to the given serviceCenter,
preventing stale or cross-workflow service references.
"""
global methods
try:
# Import the methods package
methodsPackage = importlib.import_module('modules.workflows.methods')
# Discover all modules and packages in the methods package
# Clear and rebuild to prevent cross-workflow state contamination
methods.clear()
uniqueCount = 0
for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
if name.startswith('method'):
try:
if isPkg:
# Package (folder) - import __init__.py which exports the Method class
module = importlib.import_module(f'modules.workflows.methods.{name}')
else:
# Module (file) - import directly
module = importlib.import_module(f'modules.workflows.methods.{name}')
module = importlib.import_module(f'modules.workflows.methods.{name}')
# Find all classes in the module that inherit from MethodBase
for itemName, item in inspect.getmembers(module):
if (inspect.isclass(item) and
issubclass(item, MethodBase) and
item != MethodBase):
# Check if method already exists in cache
shortName = itemName.replace('Method', '').lower()
if itemName in methods or shortName in methods:
# Method already discovered - update Services reference to use current workflow
existingMethodInfo = methods.get(itemName) or methods.get(shortName)
if existingMethodInfo and existingMethodInfo.get('instance'):
existingMethodInfo['instance'].services = serviceCenter
logger.debug(f"Updated Services reference for cached method {itemName} to use current workflow")
else:
# Method exists but instance is missing - recreate it
methodInstance = item(serviceCenter)
actions = methodInstance.actions
methodInfo = {
'instance': methodInstance,
'actions': actions,
'description': item.__doc__ or f"Method {itemName}"
}
methods[itemName] = methodInfo
methods[shortName] = methodInfo
logger.info(f"Recreated method {itemName} (short: {shortName}) with {len(actions)} actions")
else:
# Method not discovered yet - create new instance
methodInstance = item(serviceCenter)
# Use the actions property from MethodBase which handles WorkflowActionDefinition
actions = methodInstance.actions
# Create method info
methodInfo = {
'instance': methodInstance,
'actions': actions,
'description': item.__doc__ or f"Method {itemName}"
}
# Store the method with full class name
methods[itemName] = methodInfo
# Also store with short name for action executor access
methods[shortName] = methodInfo
logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
# Skip if already processed (via another module path)
if itemName in methods:
continue
methodInstance = item(serviceCenter)
actions = methodInstance.actions
methodInfo = {
'instance': methodInstance,
'actions': actions,
'description': item.__doc__ or f"Method {itemName}"
}
methods[itemName] = methodInfo
methods[shortName] = methodInfo
uniqueCount += 1
logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
except Exception as e:
logger.error(f"Error discovering method {name}: {str(e)}")
continue
logger.info(f"Discovered/updated {len(methods)} method entries total")
logger.info(f"Discovered {uniqueCount} unique methods ({len(methods)} entries with aliases)")
except Exception as e:
logger.error(f"Error discovering methods: {str(e)}")
def getMethodsList(serviceCenter):
"""Get a list of available methods with their signatures"""
if not methods:
discoverMethods(serviceCenter)
methodsList = []
for methodName, methodInfo in methods.items():
methodDescription = methodInfo['description']
actionsList = []
for actionName, actionInfo in methodInfo['actions'].items():
actionDescription = actionInfo['description']
parameters = actionInfo['parameters']
# Build parameter signature
paramSig = []
for paramName, paramInfo in parameters.items():
paramType = paramInfo['type']
paramRequired = paramInfo['required']
paramDefault = paramInfo['default']
if paramRequired:
paramSig.append(f"{paramName}: {paramType}")
else:
defaultStr = f" = {paramDefault}" if paramDefault is not None else " = None"
paramSig.append(f"{paramName}: {paramType}{defaultStr}")
paramSignature = f"({', '.join(paramSig)})" if paramSig else "()"
actionsList.append(f"- {actionName}{paramSignature}: {actionDescription}")
actionsStr = "\n".join(actionsList)
methodsList.append(f"**{methodName}**: {methodDescription}\n{actionsStr}")
return "\n\n".join(methodsList)
def getActionParameterList(methodName: str, actionName: str, methods: Dict[str, Any]) -> str:
"""Get action parameter list from WorkflowActionParameter structure for AI parameter generation (list only)."""
try:

View file

@ -39,6 +39,26 @@ from typing import Dict, Any, List
logger = logging.getLogger(__name__)
from modules.workflows.processing.shared.methodDiscovery import (methods, discoverMethods)
from modules.datamodels.datamodelChat import Observation
def _observationToDict(obs) -> dict:
"""Convert an Observation (Pydantic model or dict) to a plain dict."""
if isinstance(obs, dict):
return obs.copy()
if hasattr(obs, 'model_dump'):
return obs.model_dump(exclude_none=True)
if hasattr(obs, 'dict'):
return obs.dict()
return {"raw": str(obs)}
def _redactSnippets(obsDict: dict):
"""Replace large snippet strings with a metadata indicator."""
if 'previews' in obsDict and isinstance(obsDict['previews'], list):
for preview in obsDict['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
def extractUserPrompt(context: Any) -> str:
"""Extract user prompt from context. Maps to {{KEY:USER_PROMPT}}.
@ -71,22 +91,17 @@ def extractUserPrompt(context: Any) -> str:
def extractNormalizedRequest(services: Any) -> str:
"""Extract normalized user request from services. Maps to {{KEY:NORMALIZED_REQUEST}}.
Returns the full normalized request from user input analysis (preserves all constraints and details).
CRITICAL: Must return the actual normalizedRequest from analysis, NOT intent.
"""
try:
# Get normalized request from currentUserPromptNormalized (stores the normalizedRequest from analysis)
if services and getattr(services, 'currentUserPromptNormalized', None):
normalized = services.currentUserPromptNormalized
# Validate that it's not the intent (which is shorter and less detailed)
# Intent is typically a concise objective, normalized request should be longer and more detailed
workflowIntent = getattr(services.workflow, '_workflowIntent', {}) if hasattr(services, 'workflow') and services.workflow else {}
intent = workflowIntent.get('intent', '')
# If normalized matches intent exactly, it's wrong - log warning
if intent and normalized == intent:
logger.warning(f"extractNormalizedRequest: normalized request matches intent - this is incorrect! normalized={normalized[:100]}...")
# Try to get from workflow intent or return error message
return f"ERROR: Normalized request not properly stored. Expected detailed request, got intent: {intent}"
# Fall back to intent rather than injecting an error string into the LLM prompt
return intent
return normalized
@ -346,49 +361,12 @@ def extractReviewContent(context: Any) -> str:
return result_summary
elif hasattr(context, 'observation') and context.observation:
# For observation data, show full content but handle documents specially
# Handle both Pydantic Observation model and dict format
from modules.datamodels.datamodelChat import Observation
if isinstance(context.observation, Observation):
# Convert Pydantic model to dict
obs_dict = context.observation.model_dump(exclude_none=True) if hasattr(context.observation, 'model_dump') else context.observation.dict()
elif isinstance(context.observation, dict):
obs_dict = context.observation.copy()
else:
# Fallback: try to serialize as-is
obs_dict = context.observation.model_dump(exclude_none=True) if hasattr(context.observation, 'model_dump') else context.observation.dict()
# If there are previews with documents, show only metadata
if 'previews' in obs_dict and isinstance(obs_dict['previews'], list):
for preview in obs_dict['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
obs_dict = _observationToDict(context.observation)
_redactSnippets(obs_dict)
return json.dumps(obs_dict, indent=2, ensure_ascii=False)
elif hasattr(context, 'stepResult') and context.stepResult and 'observation' in context.stepResult:
# For observation data in stepResult, show full content but handle documents specially
observation = context.stepResult['observation']
# Handle both Pydantic Observation model and dict format
from modules.datamodels.datamodelChat import Observation
if isinstance(observation, Observation):
# Convert Pydantic model to dict
obs_dict = observation.model_dump(exclude_none=True) if hasattr(observation, 'model_dump') else observation.dict()
elif isinstance(observation, dict):
obs_dict = observation.copy()
else:
# Fallback: try to serialize
obs_dict = observation.model_dump(exclude_none=True) if hasattr(observation, 'model_dump') else observation.dict()
# If there are previews with documents, show only metadata
if 'previews' in obs_dict and isinstance(obs_dict['previews'], list):
for preview in obs_dict['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
obs_dict = _observationToDict(context.stepResult['observation'])
_redactSnippets(obs_dict)
return json.dumps(obs_dict, indent=2, ensure_ascii=False)
else:
return "No review content available"
@ -449,41 +427,22 @@ def extractLatestRefinementFeedback(context: Any) -> str:
CRITICAL: If ERROR level logs are found, refinement should stop processing.
"""
try:
# First check for ERROR level logs in workflow
if hasattr(context, 'workflow') and context.workflow:
try:
import modules.interfaces.interfaceDbChat as interfaceDbChat
from modules.interfaces.interfaceDbApp import getRootInterface
rootInterface = getRootInterface()
interfaceDbChat = interfaceDbChat.getInterface(rootInterface.currentUser)
# Get workflow logs
chatData = interfaceDbChat.getUnifiedChatData(context.workflow.id, None)
logs = chatData.get("logs", [])
# Check for ERROR level logs
for log in logs:
if isinstance(log, dict):
log_level = log.get("level", "").upper()
log_message = str(log.get("message", ""))
if log_level == "ERROR" or "ERROR" in log_message.upper():
return f"CRITICAL: Processing stopped due to ERROR in logs: {log_message[:200]}"
except Exception as log_check_error:
# If we can't check logs, continue with normal feedback extraction
logger.warning(f"Could not check for ERROR logs: {str(log_check_error)}")
if not hasattr(context, 'previousReviewResult') or not context.previousReviewResult or not isinstance(context.previousReviewResult, list):
return "No previous refinement feedback available"
# Get the most recent refinement decision
# Get the most recent refinement decision (supports both ReviewResult objects and dicts)
latest_decision = context.previousReviewResult[-1]
if not isinstance(latest_decision, dict):
# Normalize to dict if it's a Pydantic model (e.g. ReviewResult)
if hasattr(latest_decision, 'model_dump'):
latest_decision = latest_decision.model_dump()
elif not isinstance(latest_decision, dict):
return "No previous refinement feedback available"
feedback_parts = []
# Add decision and reason
decision = latest_decision.get('decision', 'unknown')
# Add decision and reason (ReviewResult uses 'status', legacy uses 'decision')
decision = latest_decision.get('status') or latest_decision.get('decision', 'unknown')
reason = latest_decision.get('reason', 'No reason provided')
feedback_parts.append(f"Latest Decision: {decision}")
feedback_parts.append(f"Reason: {reason}")

View file

@ -46,12 +46,19 @@ def generateDynamicPlanSelectionPrompt(services, context: Any, learningEngine=No
adaptiveContext = learningEngine.getAdaptiveContextForActionSelection(workflowId, userPrompt)
if adaptiveContext:
# Add learning-aware placeholders
placeholders.extend([
PromptPlaceholder(label="ADAPTIVE_GUIDANCE", content=adaptiveContext.get('adaptiveGuidance', ''), summaryAllowed=True),
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
PromptPlaceholder(label="ESCALATION_LEVEL", content=adaptiveContext.get('escalationLevel', 'low'), summaryAllowed=False),
])
# Always provide these placeholders so template tokens don't leak into the LLM prompt
if not adaptiveContext:
placeholders.extend([
PromptPlaceholder(label="ADAPTIVE_GUIDANCE", content="", summaryAllowed=True),
PromptPlaceholder(label="FAILURE_ANALYSIS", content="", summaryAllowed=True),
PromptPlaceholder(label="ESCALATION_LEVEL", content="low", summaryAllowed=False),
])
template = """Select exactly one next action to advance the task incrementally.
@ -60,7 +67,8 @@ CONTEXT: {{KEY:OVERALL_TASK_CONTEXT}}
OBJECTIVE: {{KEY:TASK_OBJECTIVE}}
=== AVAILABLE RESOURCES ===
AVAILABLE_DOCUMENTS_INDEX: {{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
AVAILABLE_DOCUMENTS_SUMMARY: {{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
AVAILABLE_DOCUMENTS_INDEX:
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
AVAILABLE_CONNECTIONS_INDEX:
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
@ -227,6 +235,13 @@ Excludes documents/connections/history entirely.
PromptPlaceholder(label="ATTEMPT_NUMBER", content=str(adaptiveContext.get('attemptNumber', 1)), summaryAllowed=False),
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
])
if not adaptiveContext:
placeholders.extend([
PromptPlaceholder(label="PARAMETER_GUIDANCE", content="", summaryAllowed=True),
PromptPlaceholder(label="ATTEMPT_NUMBER", content="1", summaryAllowed=False),
PromptPlaceholder(label="FAILURE_ANALYSIS", content="", summaryAllowed=True),
])
template = """You are a parameter generator. Set the parameters for this specific action.

View file

@ -141,8 +141,9 @@ class WorkflowProcessor:
# Delegate to the appropriate mode
result = await self.mode.executeTask(taskStep, workflow, context)
# Complete progress tracking
self.services.chat.progressLogFinish(operationId, True)
# Complete progress tracking based on actual result
taskSuccess = result.success if hasattr(result, 'success') else True
self.services.chat.progressLogFinish(operationId, taskSuccess)
return result
except Exception as e:
@ -329,7 +330,7 @@ class WorkflowProcessor:
return handoverData
except Exception as e:
logger.error(f"Error in prepareTaskHandover: {str(e)}")
return {'error': str(e)}
raise
# Fast Path Implementation
@ -379,10 +380,7 @@ class WorkflowProcessor:
"################ USER INPUT START #################\n"
)
# Add sanitized user input with clear delimiters
# Escape curly braces for f-string safety, but preserve format (no quote wrapping)
sanitizedPrompt = prompt.replace('{', '{{').replace('}', '}}') if prompt else ""
complexityPrompt += f"{sanitizedPrompt}\n"
complexityPrompt += f"{prompt or ''}\n"
complexityPrompt += "################ USER INPUT FINISH #################\n\n"
@ -469,17 +467,14 @@ class WorkflowProcessor:
"Format your response as plain text (no markdown code blocks unless showing code examples)."
)
# Prepare AI call options for fast path (balanced, fast processing)
options = AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC,
maxCost=0.10, # Low cost for simple requests
maxProcessingTime=15 # Fast path should complete in 15s
maxCost=0.10,
maxProcessingTime=15
)
# Call AI via callAi() to ensure stats are stored
aiRequest = AiCallRequest(
prompt=fastPathPrompt,
context="",
@ -630,17 +625,23 @@ class WorkflowProcessor:
chatDocuments = []
if taskResult.actionResult and taskResult.actionResult.documents:
for actionDoc in taskResult.actionResult.documents:
if hasattr(actionDoc, 'documentData') and actionDoc.documentData:
# Create file in component storage
if hasattr(actionDoc, 'documentData') and actionDoc.documentData is not None:
rawData = actionDoc.documentData
if isinstance(rawData, bytes):
contentBytes = rawData
elif isinstance(rawData, str):
contentBytes = rawData.encode('utf-8')
else:
contentBytes = json.dumps(rawData, ensure_ascii=False).encode('utf-8')
fileItem = self.services.interfaceDbComponent.createFile(
name=actionDoc.documentName if hasattr(actionDoc, 'documentName') else f"task_{taskResult.taskId}_result.txt",
mimeType=actionDoc.mimeType if hasattr(actionDoc, 'mimeType') else "text/plain",
content=actionDoc.documentData if isinstance(actionDoc.documentData, bytes) else actionDoc.documentData.encode('utf-8')
content=contentBytes
)
# Persist file data
self.services.interfaceDbComponent.createFileData(
fileItem.id,
actionDoc.documentData if isinstance(actionDoc.documentData, bytes) else actionDoc.documentData.encode('utf-8')
contentBytes
)
# Get file info
@ -651,7 +652,7 @@ class WorkflowProcessor:
chatDoc = {
"fileId": fileItem.id,
"fileName": fileInfo.get("fileName", actionDoc.documentName) if fileInfo else actionDoc.documentName,
"fileSize": fileInfo.get("size", len(actionDoc.documentData) if isinstance(actionDoc.documentData, bytes) else len(actionDoc.documentData.encode('utf-8'))) if fileInfo else (len(actionDoc.documentData) if isinstance(actionDoc.documentData, bytes) else len(actionDoc.documentData.encode('utf-8'))),
"fileSize": fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
"mimeType": fileInfo.get("mimeType", actionDoc.mimeType) if fileInfo else actionDoc.mimeType,
"roundNumber": workflow.currentRound,
"taskNumber": workflow.getTaskIndex(),

View file

@ -8,7 +8,6 @@ import json
from modules.datamodels.datamodelChat import (
UserInputRequest,
ChatMessage,
ChatWorkflow,
ChatDocument,
WorkflowModeEnum
@ -44,11 +43,6 @@ class WorkflowManager:
# Store workflow in services for reference (this is the ChatWorkflow object)
self.services.workflow = workflow
# CRITICAL: Update all method instances to use the current Services object with the correct workflow
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(self.services)
logger.debug(f"Updated method instances to use workflow {self.services.workflow.id}")
if workflow.status == "running":
logger.info(f"Stopping running workflow {workflowId} before processing new prompt")
workflow.status = "stopped"
@ -57,12 +51,13 @@ class WorkflowManager:
"status": "stopped",
"lastActivity": currentTime
})
self.services.chat.storeLog(workflow, {
"message": "Workflow stopped for new prompt",
"type": "info",
"status": "stopped",
"progress": 1.0
})
if workflow.status == "stopped":
self.services.chat.storeLog(workflow, {
"message": "Workflow stopped for new prompt",
"type": "info",
"status": "stopped",
"progress": 1.0
})
newRound = workflow.currentRound + 1
self.services.chat.updateWorkflow(workflowId, {
@ -170,7 +165,10 @@ class WorkflowManager:
self.services.currentUserPrompt = userInput.prompt
# Reset progress logger for new workflow
self.services.chat._progressLogger = None
if hasattr(self.services.chat, 'resetProgressLogger'):
self.services.chat.resetProgressLogger()
else:
self.services.chat._progressLogger = None
# Reset workflow history flag at start of each workflow
setattr(self.services, '_needsWorkflowHistory', False)
@ -565,9 +563,10 @@ The following is the user's original input message. Analyze intent, normalize th
logger.info(f"Fast path completed successfully, response length: {len(responseText)} chars")
except WorkflowStoppedException:
raise
except Exception as e:
logger.error(f"Error in _executeFastPath: {str(e)}")
# Fall back to full workflow on error
logger.info("Falling back to full workflow due to fast path error")
taskPlan = await self._planTasks(userInput)
await self._executeTasks(taskPlan)
@ -897,8 +896,8 @@ The following is the user's original input message. Analyze intent, normalize th
failedActions=[],
successfulActions=[],
criteriaProgress={
'met_criteria': set(),
'unmet_criteria': set(),
'met_criteria': [],
'unmet_criteria': [],
'attempt_history': []
}
)
@ -1021,11 +1020,11 @@ The following is the user's original input message. Analyze intent, normalize th
})
return
elif workflow.status == 'failed':
# Create error message
lastError = getattr(workflow, '_lastError', None) or "Processing failed"
errorMessage = {
"workflowId": workflow.id,
"role": "assistant",
"message": f"Workflow failed: {'Unknown error'}",
"message": f"Workflow failed: {lastError}",
"status": "last",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.timestampGetUtc(),
@ -1051,9 +1050,8 @@ The following is the user's original input message. Analyze intent, normalize th
"totalActions": workflow.totalActions
})
# Add failed log entry
self.services.chat.storeLog(workflow, {
"message": "Workflow failed: Unknown error",
"message": f"Workflow failed: {lastError}",
"type": "error",
"status": "failed",
"progress": 1.0
@ -1155,7 +1153,6 @@ The following is the user's original input message. Analyze intent, normalize th
"""Generate feedback message for workflow completion"""
try:
workflow = self.services.workflow
checkWorkflowStopped(self.services)
# Count messages by role
userMessages = [msg for msg in workflow.messages if msg.role == 'user']
@ -1227,7 +1224,6 @@ The following is the user's original input message. Analyze intent, normalize th
workflow = self.services.workflow
logger.error(f"Workflow processing error: {str(error)}")
# Update workflow status to failed
workflow.status = "failed"
workflow.lastActivity = self.services.utils.timestampGetUtc()
self.services.chat.updateWorkflow(workflow.id, {
@ -1237,11 +1233,10 @@ The following is the user's original input message. Analyze intent, normalize th
"totalActions": workflow.totalActions
})
# Create error message
error_message = {
"workflowId": workflow.id,
"role": "assistant",
"message": f"Workflow processing failed: {str(error)}",
"message": "Workflow processing encountered an error. Please try again.",
"status": "last",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.timestampGetUtc(),
@ -1257,15 +1252,12 @@ The following is the user's original input message. Analyze intent, normalize th
}
self.services.chat.storeMessageWithDocuments(workflow, error_message, [])
# Add error log entry
self.services.chat.storeLog(workflow, {
"message": f"Workflow failed: {str(error)}",
"type": "error",
"status": "failed",
"progress": 1.0
})
raise
async def _processFileIds(self, fileIds: List[str], messageId: str = None) -> List[ChatDocument]:
"""Process file IDs from existing files and return ChatDocument objects.
@ -1365,21 +1357,3 @@ The following is the user's original input message. Analyze intent, normalize th
# Return original content on error
return contentBytes
def _checkIfHistoryAvailable(self) -> bool:
"""Check if workflow history is available (previous rounds exist).
Returns True if there are previous workflow rounds with messages.
"""
try:
from modules.workflows.processing.shared.placeholderFactory import getPreviousRoundContext
history = getPreviousRoundContext(self.services)
# Check if history contains actual content (not just "No previous round context available")
if history and history != "No previous round context available":
return True
return False
except Exception as e:
logger.error(f"Error checking if history is available: {str(e)}")
return False