gateway/modules/features/codeeditor/codeEditorProcessor.py
2026-02-23 22:09:27 +01:00

280 lines
11 KiB
Python

# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""CodeEditor processor -- single-shot (Phase 1) and agent loop (Phase 2).
Orchestrates file loading, prompt building, AI calls, response parsing, and SSE emission."""
import logging
from typing import List, Dict, Any
from modules.features.codeeditor import fileContextManager, promptAssembly, responseParser
from modules.features.codeeditor.datamodelCodeeditor import (
FileEditProposal, SegmentTypeEnum, AgentState
)
from modules.features.codeeditor import toolRegistry
from modules.shared.timeUtils import getUtcTimestamp
logger = logging.getLogger(__name__)
async def processMessage(
workflowId: str,
userPrompt: str,
selectedFileIds: List[str],
dbManagement,
interfaceAi,
chatInterface,
eventManager,
agentMode: bool = False
):
"""Process a user message. Dispatches to single-shot or agent loop based on mode."""
if agentMode:
await _processAgentMessage(
workflowId, userPrompt, dbManagement, interfaceAi, chatInterface, eventManager
)
else:
await _processSingleShot(
workflowId, userPrompt, selectedFileIds, dbManagement, interfaceAi, chatInterface, eventManager
)
async def _processSingleShot(
workflowId, userPrompt, selectedFileIds, dbManagement, interfaceAi, chatInterface, eventManager
):
"""Phase 1: Single AI call with pre-loaded file context."""
try:
await _emitStatus(eventManager, workflowId, "Loading files...")
fileContexts = await fileContextManager.loadFileContexts(dbManagement, selectedFileIds)
await _emitStatus(eventManager, workflowId, "Building prompt...")
chatHistory = _loadChatHistory(chatInterface, workflowId)
aiRequest = promptAssembly.buildRequest(userPrompt, fileContexts, chatHistory)
await _emitStatus(eventManager, workflowId, "AI is processing...")
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
if aiResponse.errorCount > 0:
await _emitError(eventManager, workflowId, aiResponse.content)
return
segments = responseParser.parseResponse(aiResponse.content)
await _emitSegments(eventManager, workflowId, segments, fileContexts)
_logAiStats(aiResponse, workflowId)
await eventManager.emit_event(workflowId, "complete", {
"workflowId": workflowId,
"modelName": aiResponse.modelName,
"priceCHF": aiResponse.priceCHF,
"processingTime": aiResponse.processingTime
})
except Exception as e:
logger.error(f"CodeEditor single-shot failed for {workflowId}: {e}", exc_info=True)
await eventManager.emit_event(workflowId, "error", {
"workflowId": workflowId, "error": str(e)
})
async def _processAgentMessage(
workflowId, userPrompt, dbManagement, interfaceAi, chatInterface, eventManager
):
"""Phase 2: Agent loop -- multiple AI calls with tool execution until done."""
state = AgentState(workflowId=workflowId)
try:
await _emitStatus(eventManager, workflowId, "Agent: Scanning available files...")
fileListContext = fileContextManager.buildFileListContext(dbManagement)
state.conversationHistory.append({"role": "user", "content": userPrompt})
aiRequest = promptAssembly.buildAgentRequest(
userPrompt=userPrompt,
fileListContext=fileListContext,
conversationHistory=[]
)
while state.status == "running" and state.currentRound < state.maxRounds:
state.currentRound += 1
state.totalAiCalls += 1
await _emitStatus(eventManager, workflowId,
f"Agent round {state.currentRound}: AI is thinking...")
await eventManager.emit_event(workflowId, "chatdata", {
"type": "agent_progress",
"item": {
"round": state.currentRound,
"totalAiCalls": state.totalAiCalls,
"totalToolCalls": state.totalToolCalls,
"costCHF": round(state.totalCostCHF, 4),
}
})
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
state.totalCostCHF += aiResponse.priceCHF
state.totalProcessingTime += aiResponse.processingTime
if aiResponse.errorCount > 0:
logger.error(f"Agent AI call failed in round {state.currentRound}: {aiResponse.content}")
await _emitError(eventManager, workflowId, aiResponse.content)
state.status = "error"
break
_logAiStats(aiResponse, workflowId)
state.conversationHistory.append({"role": "assistant", "content": aiResponse.content})
segments = responseParser.parseResponse(aiResponse.content)
textAndEditSegments = [s for s in segments if s.type != SegmentTypeEnum.TOOL_CALL]
if textAndEditSegments:
await _emitSegments(eventManager, workflowId, textAndEditSegments, [])
toolCallSegments = [s for s in segments if s.type == SegmentTypeEnum.TOOL_CALL]
if not toolCallSegments:
state.status = "completed"
break
toolResultTexts = []
for tc in toolCallSegments:
state.totalToolCalls += 1
await _emitStatus(eventManager, workflowId,
f"Agent: Running {tc.toolName}...")
result = await toolRegistry.dispatch(tc.toolName, tc.toolArgs or {}, dbManagement)
toolResultTexts.append(f"[{tc.toolName}] (success={result.success}):\n{result.result}")
logger.info(f"Agent tool {tc.toolName}: success={result.success}, time={result.executionTime:.2f}s")
combinedResults = "\n\n".join(toolResultTexts)
state.conversationHistory.append({
"role": "tool_result",
"content": combinedResults,
"toolName": "batch"
})
aiRequest = promptAssembly.buildAgentRequest(
userPrompt=None,
fileListContext=fileListContext,
conversationHistory=state.conversationHistory
)
if state.currentRound >= state.maxRounds and state.status == "running":
state.status = "max_rounds"
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message",
"item": {
"role": "system",
"content": f"Agent stopped: maximum rounds ({state.maxRounds}) reached.",
"createdAt": getUtcTimestamp()
}
})
await eventManager.emit_event(workflowId, "chatdata", {
"type": "agent_summary",
"item": {
"rounds": state.currentRound,
"totalAiCalls": state.totalAiCalls,
"totalToolCalls": state.totalToolCalls,
"costCHF": round(state.totalCostCHF, 4),
"processingTime": round(state.totalProcessingTime, 1),
"status": state.status,
}
})
await eventManager.emit_event(workflowId, "complete", {
"workflowId": workflowId,
"agentRounds": state.currentRound,
"totalCostCHF": round(state.totalCostCHF, 4),
"processingTime": round(state.totalProcessingTime, 1)
})
except Exception as e:
logger.error(f"CodeEditor agent loop failed for {workflowId}: {e}", exc_info=True)
await eventManager.emit_event(workflowId, "error", {
"workflowId": workflowId, "error": str(e)
})
# ---------------------------------------------------------------------------
# Shared helpers
# ---------------------------------------------------------------------------
async def _emitStatus(eventManager, workflowId: str, label: str):
await eventManager.emit_event(workflowId, "chatdata", {
"type": "status", "label": label
})
async def _emitError(eventManager, workflowId: str, errorMsg: str):
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message",
"item": {"role": "assistant", "content": f"Error: {errorMsg}"}
})
await eventManager.emit_event(workflowId, "error", {
"workflowId": workflowId, "error": errorMsg
})
async def _emitSegments(eventManager, workflowId: str, segments, fileContexts):
"""Emit parsed segments as SSE events."""
for segment in segments:
messageData = {
"role": "assistant",
"content": segment.content,
"type": segment.type.value,
"createdAt": getUtcTimestamp()
}
await eventManager.emit_event(workflowId, "chatdata", {
"type": "message", "item": messageData
})
if segment.type == SegmentTypeEnum.FILE_EDIT:
proposal = FileEditProposal(
workflowId=workflowId,
fileId=_resolveFileId(segment.fileName, fileContexts),
fileName=segment.fileName,
operation="edit",
oldContent=segment.oldContent,
newContent=segment.newContent
)
await eventManager.emit_event(workflowId, "chatdata", {
"type": "file_edit_proposal", "item": proposal.model_dump()
})
def _loadChatHistory(chatInterface, workflowId: str) -> List[Dict[str, Any]]:
"""Load recent chat messages for multi-turn context."""
try:
messages = chatInterface.getMessages(workflowId)
if not messages:
return []
history = []
for msg in messages:
role = msg.get("role", "unknown") if isinstance(msg, dict) else getattr(msg, "role", "unknown")
content = msg.get("content", "") if isinstance(msg, dict) else getattr(msg, "content", "")
history.append({"role": role, "content": content})
return history
except Exception as e:
logger.warning(f"Could not load chat history for {workflowId}: {e}")
return []
def _resolveFileId(fileName: str, fileContexts) -> str:
"""Resolve a fileName to its fileId from the loaded contexts."""
for fc in fileContexts:
if fc.fileName == fileName:
return fc.fileId
return f"unknown-{fileName}"
def _logAiStats(aiResponse, workflowId: str):
"""Log AI call statistics."""
logger.info(
f"CodeEditor AI call for {workflowId}: "
f"model={aiResponse.modelName}, "
f"provider={aiResponse.provider}, "
f"cost={aiResponse.priceCHF:.4f} CHF, "
f"time={aiResponse.processingTime:.1f}s, "
f"sent={aiResponse.bytesSent}B, received={aiResponse.bytesReceived}B"
)