148 lines
5.3 KiB
Python
148 lines
5.3 KiB
Python
# Copyright (c) 2025 Patrick Motsch
|
|
# All rights reserved.
|
|
"""CodeEditor processor -- single-shot orchestrator (Phase 1).
|
|
Loads files, builds prompt, calls AI, parses response, emits SSE events."""
|
|
|
|
import logging
|
|
import uuid
|
|
from typing import List, Optional, Dict, Any
|
|
|
|
from modules.features.codeeditor import fileContextManager, promptAssembly, responseParser
|
|
from modules.features.codeeditor.datamodelCodeeditor import (
|
|
FileEditProposal, ResponseSegment, SegmentTypeEnum
|
|
)
|
|
from modules.shared.timeUtils import getUtcTimestamp
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
async def processMessage(
|
|
workflowId: str,
|
|
userPrompt: str,
|
|
selectedFileIds: List[str],
|
|
dbManagement,
|
|
interfaceAi,
|
|
chatInterface,
|
|
eventManager
|
|
):
|
|
"""Process a user message: load files, call AI, parse and emit response segments.
|
|
|
|
Args:
|
|
workflowId: the active workflow ID
|
|
userPrompt: user's input text
|
|
selectedFileIds: file IDs the user selected as context
|
|
dbManagement: interfaceDbManagement instance with user context
|
|
interfaceAi: AiObjects instance for AI calls
|
|
chatInterface: interfaceDbChat instance for storing messages
|
|
eventManager: EventManager for SSE emission
|
|
"""
|
|
try:
|
|
await eventManager.emit_event(workflowId, "chatdata", {
|
|
"type": "status", "label": "Loading files..."
|
|
})
|
|
|
|
fileContexts = await fileContextManager.loadFileContexts(dbManagement, selectedFileIds)
|
|
|
|
await eventManager.emit_event(workflowId, "chatdata", {
|
|
"type": "status", "label": "Building prompt..."
|
|
})
|
|
|
|
chatHistory = _loadChatHistory(chatInterface, workflowId)
|
|
|
|
aiRequest = promptAssembly.buildRequest(userPrompt, fileContexts, chatHistory)
|
|
|
|
await eventManager.emit_event(workflowId, "chatdata", {
|
|
"type": "status", "label": "AI is processing..."
|
|
})
|
|
|
|
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
|
|
|
|
if aiResponse.errorCount > 0:
|
|
logger.error(f"AI call failed: {aiResponse.content}")
|
|
await eventManager.emit_event(workflowId, "chatdata", {
|
|
"type": "message",
|
|
"item": {"role": "assistant", "content": f"Error: {aiResponse.content}"}
|
|
})
|
|
await eventManager.emit_event(workflowId, "error", {
|
|
"workflowId": workflowId, "error": aiResponse.content
|
|
})
|
|
return
|
|
|
|
segments = responseParser.parseResponse(aiResponse.content)
|
|
|
|
for segment in segments:
|
|
messageData = {
|
|
"role": "assistant",
|
|
"content": segment.content,
|
|
"type": segment.type.value,
|
|
"createdAt": getUtcTimestamp()
|
|
}
|
|
|
|
await eventManager.emit_event(workflowId, "chatdata", {
|
|
"type": "message", "item": messageData
|
|
})
|
|
|
|
if segment.type == SegmentTypeEnum.FILE_EDIT:
|
|
proposal = FileEditProposal(
|
|
workflowId=workflowId,
|
|
fileId=_resolveFileId(segment.fileName, fileContexts),
|
|
fileName=segment.fileName,
|
|
operation="edit",
|
|
oldContent=segment.oldContent,
|
|
newContent=segment.newContent
|
|
)
|
|
await eventManager.emit_event(workflowId, "chatdata", {
|
|
"type": "file_edit_proposal", "item": proposal.model_dump()
|
|
})
|
|
|
|
_logAiStats(aiResponse, workflowId)
|
|
|
|
await eventManager.emit_event(workflowId, "complete", {
|
|
"workflowId": workflowId,
|
|
"modelName": aiResponse.modelName,
|
|
"priceCHF": aiResponse.priceCHF,
|
|
"processingTime": aiResponse.processingTime
|
|
})
|
|
|
|
except Exception as e:
|
|
logger.error(f"CodeEditor processing failed for workflow {workflowId}: {e}", exc_info=True)
|
|
await eventManager.emit_event(workflowId, "error", {
|
|
"workflowId": workflowId, "error": str(e)
|
|
})
|
|
|
|
|
|
def _loadChatHistory(chatInterface, workflowId: str) -> List[Dict[str, Any]]:
|
|
"""Load recent chat messages for multi-turn context."""
|
|
try:
|
|
messages = chatInterface.getMessages(workflowId)
|
|
if not messages:
|
|
return []
|
|
history = []
|
|
for msg in messages:
|
|
role = msg.get("role", "unknown") if isinstance(msg, dict) else getattr(msg, "role", "unknown")
|
|
content = msg.get("content", "") if isinstance(msg, dict) else getattr(msg, "content", "")
|
|
history.append({"role": role, "content": content})
|
|
return history
|
|
except Exception as e:
|
|
logger.warning(f"Could not load chat history for {workflowId}: {e}")
|
|
return []
|
|
|
|
|
|
def _resolveFileId(fileName: str, fileContexts) -> str:
|
|
"""Resolve a fileName to its fileId from the loaded contexts."""
|
|
for fc in fileContexts:
|
|
if fc.fileName == fileName:
|
|
return fc.fileId
|
|
return f"unknown-{fileName}"
|
|
|
|
|
|
def _logAiStats(aiResponse, workflowId: str):
|
|
"""Log AI call statistics."""
|
|
logger.info(
|
|
f"CodeEditor AI call for {workflowId}: "
|
|
f"model={aiResponse.modelName}, "
|
|
f"provider={aiResponse.provider}, "
|
|
f"cost={aiResponse.priceCHF:.4f} CHF, "
|
|
f"time={aiResponse.processingTime:.1f}s, "
|
|
f"sent={aiResponse.bytesSent}B, received={aiResponse.bytesReceived}B"
|
|
)
|