Merge pull request #99 from valueonag/feat/cursor-style-feature
Feat/cursor style feature
This commit is contained in:
commit
5713b5be3a
16 changed files with 1707 additions and 8 deletions
7
app.py
7
app.py
|
|
@ -461,6 +461,13 @@ app.add_middleware(
|
||||||
max_age=86400, # Increased caching for preflight requests
|
max_age=86400, # Increased caching for preflight requests
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# SlowAPI rate limiter initialization
|
||||||
|
from modules.auth import limiter
|
||||||
|
from slowapi.errors import RateLimitExceeded
|
||||||
|
from slowapi import _rate_limit_exceeded_handler
|
||||||
|
app.state.limiter = limiter
|
||||||
|
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
|
||||||
|
|
||||||
# CSRF protection middleware
|
# CSRF protection middleware
|
||||||
from modules.auth import CSRFMiddleware
|
from modules.auth import CSRFMiddleware
|
||||||
from modules.auth import (
|
from modules.auth import (
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ Replaces Azure Speech Services with Google Cloud APIs
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import html
|
import html
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, Optional, Any
|
from typing import Dict, Optional, Any
|
||||||
from google.cloud import speech
|
from google.cloud import speech
|
||||||
|
|
@ -73,6 +74,11 @@ class ConnectorGoogleSpeech:
|
||||||
Dict containing transcribed text, confidence, and metadata
|
Dict containing transcribed text, confidence, and metadata
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
# Treat sampleRate=0 as unknown (invalid value from client)
|
||||||
|
if sampleRate is not None and sampleRate <= 0:
|
||||||
|
logger.warning(f"Invalid sampleRate={sampleRate}, treating as unknown for auto-detection")
|
||||||
|
sampleRate = None
|
||||||
|
|
||||||
# Auto-detect audio format if not provided
|
# Auto-detect audio format if not provided
|
||||||
if sampleRate is None or channels is None:
|
if sampleRate is None or channels is None:
|
||||||
validation = self.validateAudioFormat(audioContent)
|
validation = self.validateAudioFormat(audioContent)
|
||||||
|
|
@ -164,8 +170,11 @@ class ConnectorGoogleSpeech:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Use regular recognition for single audio files (not streaming)
|
# Use regular recognition for single audio files (not streaming)
|
||||||
|
# Run in thread pool to avoid blocking the asyncio event loop
|
||||||
logger.info("Using regular recognition for single audio file...")
|
logger.info("Using regular recognition for single audio file...")
|
||||||
response = self.speech_client.recognize(config=config, audio=audio)
|
response = await asyncio.to_thread(
|
||||||
|
self.speech_client.recognize, config=config, audio=audio
|
||||||
|
)
|
||||||
logger.debug(f"Google Cloud response: {response}")
|
logger.debug(f"Google Cloud response: {response}")
|
||||||
|
|
||||||
except Exception as apiError:
|
except Exception as apiError:
|
||||||
|
|
@ -175,7 +184,7 @@ class ConnectorGoogleSpeech:
|
||||||
logger.info("Trying fallback with LINEAR16 encoding...")
|
logger.info("Trying fallback with LINEAR16 encoding...")
|
||||||
fallbackConfig = speech.RecognitionConfig(
|
fallbackConfig = speech.RecognitionConfig(
|
||||||
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
|
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
|
||||||
sample_rate_hertz=16000, # Use standard sample rate
|
sample_rate_hertz=16000,
|
||||||
audio_channel_count=1,
|
audio_channel_count=1,
|
||||||
language_code=language,
|
language_code=language,
|
||||||
enable_automatic_punctuation=True,
|
enable_automatic_punctuation=True,
|
||||||
|
|
@ -183,7 +192,9 @@ class ConnectorGoogleSpeech:
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = self.speech_client.recognize(config=fallbackConfig, audio=audio)
|
response = await asyncio.to_thread(
|
||||||
|
self.speech_client.recognize, config=fallbackConfig, audio=audio
|
||||||
|
)
|
||||||
logger.debug(f"Google Cloud fallback response: {response}")
|
logger.debug(f"Google Cloud fallback response: {response}")
|
||||||
except Exception as fallbackError:
|
except Exception as fallbackError:
|
||||||
logger.error(f"Google Cloud fallback error: {fallbackError}")
|
logger.error(f"Google Cloud fallback error: {fallbackError}")
|
||||||
|
|
@ -297,7 +308,18 @@ class ConnectorGoogleSpeech:
|
||||||
"description": f"LINEAR16 with {std_rate}Hz"
|
"description": f"LINEAR16 with {std_rate}Hz"
|
||||||
})
|
})
|
||||||
|
|
||||||
# Try with different models
|
# Detect likely silence before expensive fallback loop
|
||||||
|
if len(audioContent) > 100:
|
||||||
|
sampleSlice = audioContent[100:min(500, len(audioContent))]
|
||||||
|
if len(set(sampleSlice)) < 3:
|
||||||
|
logger.warning("Audio appears silent (low byte variation) - skipping fallbacks")
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"text": "",
|
||||||
|
"confidence": 0.0,
|
||||||
|
"error": "No recognition results (silence or unclear audio)"
|
||||||
|
}
|
||||||
|
|
||||||
models = ["latest_long", "phone_call", "latest_short"]
|
models = ["latest_long", "phone_call", "latest_short"]
|
||||||
|
|
||||||
for fallback_config in fallback_configs:
|
for fallback_config in fallback_configs:
|
||||||
|
|
@ -305,7 +327,6 @@ class ConnectorGoogleSpeech:
|
||||||
try:
|
try:
|
||||||
logger.info(f"Trying fallback: {fallback_config['description']} with {model} model...")
|
logger.info(f"Trying fallback: {fallback_config['description']} with {model} model...")
|
||||||
|
|
||||||
# Build fallback config with proper sample rate handling
|
|
||||||
fallback_config_params = {
|
fallback_config_params = {
|
||||||
"encoding": fallback_config["encoding"],
|
"encoding": fallback_config["encoding"],
|
||||||
"audio_channel_count": fallback_config["channels"],
|
"audio_channel_count": fallback_config["channels"],
|
||||||
|
|
@ -314,12 +335,13 @@ class ConnectorGoogleSpeech:
|
||||||
"model": model
|
"model": model
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only add sample_rate_hertz if needed
|
|
||||||
if fallback_config["use_sample_rate"]:
|
if fallback_config["use_sample_rate"]:
|
||||||
fallback_config_params["sample_rate_hertz"] = fallback_config["sample_rate"]
|
fallback_config_params["sample_rate_hertz"] = fallback_config["sample_rate"]
|
||||||
|
|
||||||
fallback_config_obj = speech.RecognitionConfig(**fallback_config_params)
|
fallback_config_obj = speech.RecognitionConfig(**fallback_config_params)
|
||||||
fallback_response = self.speech_client.recognize(config=fallback_config_obj, audio=audio)
|
fallback_response = await asyncio.to_thread(
|
||||||
|
self.speech_client.recognize, config=fallback_config_obj, audio=audio
|
||||||
|
)
|
||||||
|
|
||||||
if fallback_response.results:
|
if fallback_response.results:
|
||||||
result = fallback_response.results[0]
|
result = fallback_response.results[0]
|
||||||
|
|
|
||||||
|
|
@ -285,6 +285,7 @@ class WorkflowModeEnum(str, Enum):
|
||||||
WORKFLOW_DYNAMIC = "Dynamic"
|
WORKFLOW_DYNAMIC = "Dynamic"
|
||||||
WORKFLOW_AUTOMATION = "Automation"
|
WORKFLOW_AUTOMATION = "Automation"
|
||||||
WORKFLOW_CHATBOT = "Chatbot"
|
WORKFLOW_CHATBOT = "Chatbot"
|
||||||
|
WORKFLOW_CODEEDITOR = "CodeEditor"
|
||||||
WORKFLOW_REACT = "React" # Legacy mode - kept for backward compatibility
|
WORKFLOW_REACT = "React" # Legacy mode - kept for backward compatibility
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -295,6 +296,7 @@ registerModelLabels(
|
||||||
"WORKFLOW_DYNAMIC": {"en": "Dynamic", "fr": "Dynamique"},
|
"WORKFLOW_DYNAMIC": {"en": "Dynamic", "fr": "Dynamique"},
|
||||||
"WORKFLOW_AUTOMATION": {"en": "Automation", "fr": "Automatisation"},
|
"WORKFLOW_AUTOMATION": {"en": "Automation", "fr": "Automatisation"},
|
||||||
"WORKFLOW_CHATBOT": {"en": "Chatbot", "fr": "Chatbot"},
|
"WORKFLOW_CHATBOT": {"en": "Chatbot", "fr": "Chatbot"},
|
||||||
|
"WORKFLOW_CODEEDITOR": {"en": "Code Editor", "fr": "Éditeur de code"},
|
||||||
"WORKFLOW_REACT": {"en": "React (Legacy)", "fr": "React (Hérité)"},
|
"WORKFLOW_REACT": {"en": "React (Legacy)", "fr": "React (Hérité)"},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
||||||
1
modules/features/codeeditor/__init__.py
Normal file
1
modules/features/codeeditor/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
"""CodeEditor Feature - Cursor-style AI file editing via chat interface."""
|
||||||
280
modules/features/codeeditor/codeEditorProcessor.py
Normal file
280
modules/features/codeeditor/codeEditorProcessor.py
Normal file
|
|
@ -0,0 +1,280 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""CodeEditor processor -- single-shot (Phase 1) and agent loop (Phase 2).
|
||||||
|
Orchestrates file loading, prompt building, AI calls, response parsing, and SSE emission."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
|
||||||
|
from modules.features.codeeditor import fileContextManager, promptAssembly, responseParser
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import (
|
||||||
|
FileEditProposal, SegmentTypeEnum, AgentState
|
||||||
|
)
|
||||||
|
from modules.features.codeeditor import toolRegistry
|
||||||
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
async def processMessage(
|
||||||
|
workflowId: str,
|
||||||
|
userPrompt: str,
|
||||||
|
selectedFileIds: List[str],
|
||||||
|
dbManagement,
|
||||||
|
interfaceAi,
|
||||||
|
chatInterface,
|
||||||
|
eventManager,
|
||||||
|
agentMode: bool = False
|
||||||
|
):
|
||||||
|
"""Process a user message. Dispatches to single-shot or agent loop based on mode."""
|
||||||
|
if agentMode:
|
||||||
|
await _processAgentMessage(
|
||||||
|
workflowId, userPrompt, dbManagement, interfaceAi, chatInterface, eventManager
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
await _processSingleShot(
|
||||||
|
workflowId, userPrompt, selectedFileIds, dbManagement, interfaceAi, chatInterface, eventManager
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def _processSingleShot(
|
||||||
|
workflowId, userPrompt, selectedFileIds, dbManagement, interfaceAi, chatInterface, eventManager
|
||||||
|
):
|
||||||
|
"""Phase 1: Single AI call with pre-loaded file context."""
|
||||||
|
try:
|
||||||
|
await _emitStatus(eventManager, workflowId, "Loading files...")
|
||||||
|
fileContexts = await fileContextManager.loadFileContexts(dbManagement, selectedFileIds)
|
||||||
|
|
||||||
|
await _emitStatus(eventManager, workflowId, "Building prompt...")
|
||||||
|
chatHistory = _loadChatHistory(chatInterface, workflowId)
|
||||||
|
aiRequest = promptAssembly.buildRequest(userPrompt, fileContexts, chatHistory)
|
||||||
|
|
||||||
|
await _emitStatus(eventManager, workflowId, "AI is processing...")
|
||||||
|
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
|
||||||
|
|
||||||
|
if aiResponse.errorCount > 0:
|
||||||
|
await _emitError(eventManager, workflowId, aiResponse.content)
|
||||||
|
return
|
||||||
|
|
||||||
|
segments = responseParser.parseResponse(aiResponse.content)
|
||||||
|
await _emitSegments(eventManager, workflowId, segments, fileContexts)
|
||||||
|
_logAiStats(aiResponse, workflowId)
|
||||||
|
|
||||||
|
await eventManager.emit_event(workflowId, "complete", {
|
||||||
|
"workflowId": workflowId,
|
||||||
|
"modelName": aiResponse.modelName,
|
||||||
|
"priceCHF": aiResponse.priceCHF,
|
||||||
|
"processingTime": aiResponse.processingTime
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"CodeEditor single-shot failed for {workflowId}: {e}", exc_info=True)
|
||||||
|
await eventManager.emit_event(workflowId, "error", {
|
||||||
|
"workflowId": workflowId, "error": str(e)
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
async def _processAgentMessage(
|
||||||
|
workflowId, userPrompt, dbManagement, interfaceAi, chatInterface, eventManager
|
||||||
|
):
|
||||||
|
"""Phase 2: Agent loop -- multiple AI calls with tool execution until done."""
|
||||||
|
state = AgentState(workflowId=workflowId)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await _emitStatus(eventManager, workflowId, "Agent: Scanning available files...")
|
||||||
|
fileListContext = fileContextManager.buildFileListContext(dbManagement)
|
||||||
|
|
||||||
|
state.conversationHistory.append({"role": "user", "content": userPrompt})
|
||||||
|
|
||||||
|
aiRequest = promptAssembly.buildAgentRequest(
|
||||||
|
userPrompt=userPrompt,
|
||||||
|
fileListContext=fileListContext,
|
||||||
|
conversationHistory=[]
|
||||||
|
)
|
||||||
|
|
||||||
|
while state.status == "running" and state.currentRound < state.maxRounds:
|
||||||
|
state.currentRound += 1
|
||||||
|
state.totalAiCalls += 1
|
||||||
|
|
||||||
|
await _emitStatus(eventManager, workflowId,
|
||||||
|
f"Agent round {state.currentRound}: AI is thinking...")
|
||||||
|
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "agent_progress",
|
||||||
|
"item": {
|
||||||
|
"round": state.currentRound,
|
||||||
|
"totalAiCalls": state.totalAiCalls,
|
||||||
|
"totalToolCalls": state.totalToolCalls,
|
||||||
|
"costCHF": round(state.totalCostCHF, 4),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
aiResponse = await interfaceAi.callWithTextContext(aiRequest)
|
||||||
|
state.totalCostCHF += aiResponse.priceCHF
|
||||||
|
state.totalProcessingTime += aiResponse.processingTime
|
||||||
|
|
||||||
|
if aiResponse.errorCount > 0:
|
||||||
|
logger.error(f"Agent AI call failed in round {state.currentRound}: {aiResponse.content}")
|
||||||
|
await _emitError(eventManager, workflowId, aiResponse.content)
|
||||||
|
state.status = "error"
|
||||||
|
break
|
||||||
|
|
||||||
|
_logAiStats(aiResponse, workflowId)
|
||||||
|
|
||||||
|
state.conversationHistory.append({"role": "assistant", "content": aiResponse.content})
|
||||||
|
|
||||||
|
segments = responseParser.parseResponse(aiResponse.content)
|
||||||
|
|
||||||
|
textAndEditSegments = [s for s in segments if s.type != SegmentTypeEnum.TOOL_CALL]
|
||||||
|
if textAndEditSegments:
|
||||||
|
await _emitSegments(eventManager, workflowId, textAndEditSegments, [])
|
||||||
|
|
||||||
|
toolCallSegments = [s for s in segments if s.type == SegmentTypeEnum.TOOL_CALL]
|
||||||
|
|
||||||
|
if not toolCallSegments:
|
||||||
|
state.status = "completed"
|
||||||
|
break
|
||||||
|
|
||||||
|
toolResultTexts = []
|
||||||
|
for tc in toolCallSegments:
|
||||||
|
state.totalToolCalls += 1
|
||||||
|
await _emitStatus(eventManager, workflowId,
|
||||||
|
f"Agent: Running {tc.toolName}...")
|
||||||
|
|
||||||
|
result = await toolRegistry.dispatch(tc.toolName, tc.toolArgs or {}, dbManagement)
|
||||||
|
toolResultTexts.append(f"[{tc.toolName}] (success={result.success}):\n{result.result}")
|
||||||
|
|
||||||
|
logger.info(f"Agent tool {tc.toolName}: success={result.success}, time={result.executionTime:.2f}s")
|
||||||
|
|
||||||
|
combinedResults = "\n\n".join(toolResultTexts)
|
||||||
|
state.conversationHistory.append({
|
||||||
|
"role": "tool_result",
|
||||||
|
"content": combinedResults,
|
||||||
|
"toolName": "batch"
|
||||||
|
})
|
||||||
|
|
||||||
|
aiRequest = promptAssembly.buildAgentRequest(
|
||||||
|
userPrompt=None,
|
||||||
|
fileListContext=fileListContext,
|
||||||
|
conversationHistory=state.conversationHistory
|
||||||
|
)
|
||||||
|
|
||||||
|
if state.currentRound >= state.maxRounds and state.status == "running":
|
||||||
|
state.status = "max_rounds"
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "message",
|
||||||
|
"item": {
|
||||||
|
"role": "system",
|
||||||
|
"content": f"Agent stopped: maximum rounds ({state.maxRounds}) reached.",
|
||||||
|
"createdAt": getUtcTimestamp()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "agent_summary",
|
||||||
|
"item": {
|
||||||
|
"rounds": state.currentRound,
|
||||||
|
"totalAiCalls": state.totalAiCalls,
|
||||||
|
"totalToolCalls": state.totalToolCalls,
|
||||||
|
"costCHF": round(state.totalCostCHF, 4),
|
||||||
|
"processingTime": round(state.totalProcessingTime, 1),
|
||||||
|
"status": state.status,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
await eventManager.emit_event(workflowId, "complete", {
|
||||||
|
"workflowId": workflowId,
|
||||||
|
"agentRounds": state.currentRound,
|
||||||
|
"totalCostCHF": round(state.totalCostCHF, 4),
|
||||||
|
"processingTime": round(state.totalProcessingTime, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"CodeEditor agent loop failed for {workflowId}: {e}", exc_info=True)
|
||||||
|
await eventManager.emit_event(workflowId, "error", {
|
||||||
|
"workflowId": workflowId, "error": str(e)
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Shared helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
async def _emitStatus(eventManager, workflowId: str, label: str):
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "status", "label": label
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
async def _emitError(eventManager, workflowId: str, errorMsg: str):
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "message",
|
||||||
|
"item": {"role": "assistant", "content": f"Error: {errorMsg}"}
|
||||||
|
})
|
||||||
|
await eventManager.emit_event(workflowId, "error", {
|
||||||
|
"workflowId": workflowId, "error": errorMsg
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
async def _emitSegments(eventManager, workflowId: str, segments, fileContexts):
|
||||||
|
"""Emit parsed segments as SSE events."""
|
||||||
|
for segment in segments:
|
||||||
|
messageData = {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": segment.content,
|
||||||
|
"type": segment.type.value,
|
||||||
|
"createdAt": getUtcTimestamp()
|
||||||
|
}
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "message", "item": messageData
|
||||||
|
})
|
||||||
|
|
||||||
|
if segment.type == SegmentTypeEnum.FILE_EDIT:
|
||||||
|
proposal = FileEditProposal(
|
||||||
|
workflowId=workflowId,
|
||||||
|
fileId=_resolveFileId(segment.fileName, fileContexts),
|
||||||
|
fileName=segment.fileName,
|
||||||
|
operation="edit",
|
||||||
|
oldContent=segment.oldContent,
|
||||||
|
newContent=segment.newContent
|
||||||
|
)
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "file_edit_proposal", "item": proposal.model_dump()
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
def _loadChatHistory(chatInterface, workflowId: str) -> List[Dict[str, Any]]:
|
||||||
|
"""Load recent chat messages for multi-turn context."""
|
||||||
|
try:
|
||||||
|
messages = chatInterface.getMessages(workflowId)
|
||||||
|
if not messages:
|
||||||
|
return []
|
||||||
|
history = []
|
||||||
|
for msg in messages:
|
||||||
|
role = msg.get("role", "unknown") if isinstance(msg, dict) else getattr(msg, "role", "unknown")
|
||||||
|
content = msg.get("content", "") if isinstance(msg, dict) else getattr(msg, "content", "")
|
||||||
|
history.append({"role": role, "content": content})
|
||||||
|
return history
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not load chat history for {workflowId}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _resolveFileId(fileName: str, fileContexts) -> str:
|
||||||
|
"""Resolve a fileName to its fileId from the loaded contexts."""
|
||||||
|
for fc in fileContexts:
|
||||||
|
if fc.fileName == fileName:
|
||||||
|
return fc.fileId
|
||||||
|
return f"unknown-{fileName}"
|
||||||
|
|
||||||
|
|
||||||
|
def _logAiStats(aiResponse, workflowId: str):
|
||||||
|
"""Log AI call statistics."""
|
||||||
|
logger.info(
|
||||||
|
f"CodeEditor AI call for {workflowId}: "
|
||||||
|
f"model={aiResponse.modelName}, "
|
||||||
|
f"provider={aiResponse.provider}, "
|
||||||
|
f"cost={aiResponse.priceCHF:.4f} CHF, "
|
||||||
|
f"time={aiResponse.processingTime:.1f}s, "
|
||||||
|
f"sent={aiResponse.bytesSent}B, received={aiResponse.bytesReceived}B"
|
||||||
|
)
|
||||||
121
modules/features/codeeditor/datamodelCodeeditor.py
Normal file
121
modules/features/codeeditor/datamodelCodeeditor.py
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Data models for the CodeEditor feature."""
|
||||||
|
|
||||||
|
from typing import List, Dict, Any, Optional
|
||||||
|
from enum import Enum
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from modules.shared.timeUtils import getUtcTimestamp
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
class SegmentTypeEnum(str, Enum):
|
||||||
|
TEXT = "text"
|
||||||
|
CODE_BLOCK = "code_block"
|
||||||
|
FILE_EDIT = "file_edit"
|
||||||
|
TOOL_CALL = "tool_call"
|
||||||
|
|
||||||
|
|
||||||
|
class EditStatusEnum(str, Enum):
|
||||||
|
PENDING = "pending"
|
||||||
|
ACCEPTED = "accepted"
|
||||||
|
REJECTED = "rejected"
|
||||||
|
|
||||||
|
|
||||||
|
class FileContext(BaseModel):
|
||||||
|
"""A text file loaded as context for the AI."""
|
||||||
|
fileId: str
|
||||||
|
fileName: str
|
||||||
|
content: Optional[str] = None
|
||||||
|
mimeType: str
|
||||||
|
sizeBytes: int = 0
|
||||||
|
tags: List[str] = Field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class ResponseSegment(BaseModel):
|
||||||
|
"""A parsed segment from the AI response."""
|
||||||
|
type: SegmentTypeEnum
|
||||||
|
content: str
|
||||||
|
language: Optional[str] = None
|
||||||
|
fileId: Optional[str] = None
|
||||||
|
fileName: Optional[str] = None
|
||||||
|
oldContent: Optional[str] = None
|
||||||
|
newContent: Optional[str] = None
|
||||||
|
toolName: Optional[str] = None
|
||||||
|
toolArgs: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class FileEditProposal(BaseModel):
|
||||||
|
"""A proposed file edit from the AI, awaiting user accept/reject."""
|
||||||
|
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
|
workflowId: str
|
||||||
|
fileId: str
|
||||||
|
fileName: str
|
||||||
|
operation: str = "edit"
|
||||||
|
oldContent: Optional[str] = None
|
||||||
|
newContent: str
|
||||||
|
diffSummary: Optional[str] = None
|
||||||
|
status: EditStatusEnum = EditStatusEnum.PENDING
|
||||||
|
createdAt: float = Field(default_factory=getUtcTimestamp)
|
||||||
|
|
||||||
|
|
||||||
|
class FileVersion(BaseModel):
|
||||||
|
"""A new version of a file created after accepting an edit proposal."""
|
||||||
|
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
|
sourceFileId: str
|
||||||
|
editProposalId: str
|
||||||
|
newFileId: str
|
||||||
|
createdAt: float = Field(default_factory=getUtcTimestamp)
|
||||||
|
|
||||||
|
|
||||||
|
class AgentState(BaseModel):
|
||||||
|
"""Tracks state across an agent loop execution."""
|
||||||
|
workflowId: str
|
||||||
|
currentRound: int = 0
|
||||||
|
maxRounds: int = 50
|
||||||
|
totalAiCalls: int = 0
|
||||||
|
totalToolCalls: int = 0
|
||||||
|
totalCostCHF: float = 0.0
|
||||||
|
totalProcessingTime: float = 0.0
|
||||||
|
conversationHistory: List[Dict[str, Any]] = Field(default_factory=list)
|
||||||
|
status: str = "running"
|
||||||
|
|
||||||
|
|
||||||
|
class ToolResult(BaseModel):
|
||||||
|
"""Result from executing a tool."""
|
||||||
|
toolName: str
|
||||||
|
result: str
|
||||||
|
success: bool = True
|
||||||
|
executionTime: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
TEXT_MIME_TYPES = {
|
||||||
|
"text/plain", "text/markdown", "text/html", "text/css", "text/csv",
|
||||||
|
"text/xml", "text/yaml", "text/x-python", "text/x-java",
|
||||||
|
"text/javascript", "text/x-typescript", "text/x-sql",
|
||||||
|
"application/json", "application/xml", "application/yaml",
|
||||||
|
"application/x-yaml", "application/javascript",
|
||||||
|
}
|
||||||
|
|
||||||
|
TEXT_EXTENSIONS = {
|
||||||
|
".md", ".txt", ".json", ".yaml", ".yml", ".xml", ".csv",
|
||||||
|
".py", ".js", ".ts", ".tsx", ".jsx", ".html", ".htm", ".css", ".scss",
|
||||||
|
".sql", ".sh", ".bash", ".zsh", ".ps1", ".bat",
|
||||||
|
".toml", ".ini", ".cfg", ".conf", ".env", ".gitignore",
|
||||||
|
".dockerfile", ".docker-compose", ".makefile",
|
||||||
|
".java", ".kt", ".go", ".rs", ".rb", ".php", ".swift", ".c", ".cpp", ".h",
|
||||||
|
".r", ".lua", ".dart", ".vue", ".svelte",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def isTextFile(mimeType: Optional[str], fileName: Optional[str] = None) -> bool:
|
||||||
|
"""Check if a file is a text-based file suitable for the editor."""
|
||||||
|
if mimeType and mimeType.lower() in TEXT_MIME_TYPES:
|
||||||
|
return True
|
||||||
|
if mimeType and mimeType.lower().startswith("text/"):
|
||||||
|
return True
|
||||||
|
if fileName:
|
||||||
|
ext = "." + fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
|
||||||
|
if ext in TEXT_EXTENSIONS:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
82
modules/features/codeeditor/fileContextManager.py
Normal file
82
modules/features/codeeditor/fileContextManager.py
Normal file
|
|
@ -0,0 +1,82 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""File context manager for CodeEditor feature.
|
||||||
|
Loads text files from the database and provides them as context for AI calls."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import FileContext, isTextFile
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
async def loadFileContexts(dbManagement, fileIds: List[str]) -> List[FileContext]:
|
||||||
|
"""Load text files from DB and return as FileContext list.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dbManagement: interfaceDbManagement instance with user context set
|
||||||
|
fileIds: list of file IDs to load
|
||||||
|
"""
|
||||||
|
contexts = []
|
||||||
|
for fileId in fileIds:
|
||||||
|
fileItem = dbManagement.getFile(fileId)
|
||||||
|
if not fileItem:
|
||||||
|
logger.warning(f"File {fileId} not found or no access")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not isTextFile(fileItem.mimeType, fileItem.fileName):
|
||||||
|
logger.warning(f"File {fileItem.fileName} ({fileItem.mimeType}) is not a text file, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
fileData = dbManagement.getFileData(fileId)
|
||||||
|
if not fileData:
|
||||||
|
logger.warning(f"No data for file {fileId}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = fileData.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
logger.warning(f"File {fileItem.fileName} is not valid UTF-8, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
contexts.append(FileContext(
|
||||||
|
fileId=fileId,
|
||||||
|
fileName=fileItem.fileName,
|
||||||
|
content=content,
|
||||||
|
mimeType=fileItem.mimeType,
|
||||||
|
sizeBytes=fileItem.fileSize
|
||||||
|
))
|
||||||
|
|
||||||
|
logger.info(f"Loaded {len(contexts)} file contexts from {len(fileIds)} requested")
|
||||||
|
return contexts
|
||||||
|
|
||||||
|
|
||||||
|
def listTextFiles(dbManagement) -> List[FileContext]:
|
||||||
|
"""List all text files accessible to the user (metadata only, no content)."""
|
||||||
|
allFiles = dbManagement.getAllFiles()
|
||||||
|
textFiles = []
|
||||||
|
|
||||||
|
if not allFiles:
|
||||||
|
return textFiles
|
||||||
|
|
||||||
|
for fileItem in allFiles:
|
||||||
|
if isTextFile(fileItem.mimeType, fileItem.fileName):
|
||||||
|
textFiles.append(FileContext(
|
||||||
|
fileId=fileItem.id,
|
||||||
|
fileName=fileItem.fileName,
|
||||||
|
content=None,
|
||||||
|
mimeType=fileItem.mimeType,
|
||||||
|
sizeBytes=fileItem.fileSize
|
||||||
|
))
|
||||||
|
|
||||||
|
return textFiles
|
||||||
|
|
||||||
|
|
||||||
|
def buildFileListContext(dbManagement) -> str:
|
||||||
|
"""Build a compact file list string for the agent prompt (no content, just metadata)."""
|
||||||
|
textFiles = listTextFiles(dbManagement)
|
||||||
|
if not textFiles:
|
||||||
|
return "No text files available."
|
||||||
|
lines = [f"- {f.fileName} (id: {f.fileId}, size: {f.sizeBytes}B)" for f in textFiles]
|
||||||
|
return f"Total: {len(lines)} text files\n" + "\n".join(lines)
|
||||||
248
modules/features/codeeditor/mainCodeeditor.py
Normal file
248
modules/features/codeeditor/mainCodeeditor.py
Normal file
|
|
@ -0,0 +1,248 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
CodeEditor Feature Container - Main Module.
|
||||||
|
Handles feature initialization and RBAC catalog registration.
|
||||||
|
Cursor-style AI file editing via chat interface.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
FEATURE_CODE = "codeeditor"
|
||||||
|
FEATURE_LABEL = {"en": "Code Editor", "de": "Code Editor", "fr": "Code Editor"}
|
||||||
|
FEATURE_ICON = "mdi-file-document-edit"
|
||||||
|
|
||||||
|
UI_OBJECTS = [
|
||||||
|
{
|
||||||
|
"objectKey": "ui.feature.codeeditor.editor",
|
||||||
|
"label": {"en": "Editor", "de": "Editor", "fr": "Editeur"},
|
||||||
|
"meta": {"area": "editor"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"objectKey": "ui.feature.codeeditor.workflows",
|
||||||
|
"label": {"en": "Workflows", "de": "Workflows", "fr": "Workflows"},
|
||||||
|
"meta": {"area": "workflows"}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
RESOURCE_OBJECTS = [
|
||||||
|
{
|
||||||
|
"objectKey": "resource.feature.codeeditor.start",
|
||||||
|
"label": {"en": "Start Workflow", "de": "Workflow starten", "fr": "Demarrer workflow"},
|
||||||
|
"meta": {"endpoint": "/api/codeeditor/{instanceId}/start/stream", "method": "POST"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"objectKey": "resource.feature.codeeditor.stop",
|
||||||
|
"label": {"en": "Stop Workflow", "de": "Workflow stoppen", "fr": "Arreter workflow"},
|
||||||
|
"meta": {"endpoint": "/api/codeeditor/{instanceId}/{workflowId}/stop", "method": "POST"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"objectKey": "resource.feature.codeeditor.chatData",
|
||||||
|
"label": {"en": "Get Chat Data", "de": "Chat-Daten abrufen", "fr": "Recuperer donnees chat"},
|
||||||
|
"meta": {"endpoint": "/api/codeeditor/{instanceId}/{workflowId}/chatData", "method": "GET"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"objectKey": "resource.feature.codeeditor.files",
|
||||||
|
"label": {"en": "Manage Files", "de": "Dateien verwalten", "fr": "Gerer fichiers"},
|
||||||
|
"meta": {"endpoint": "/api/codeeditor/{instanceId}/files", "method": "GET"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"objectKey": "resource.feature.codeeditor.apply",
|
||||||
|
"label": {"en": "Apply Edit", "de": "Aenderung anwenden", "fr": "Appliquer modification"},
|
||||||
|
"meta": {"endpoint": "/api/codeeditor/{instanceId}/{workflowId}/apply", "method": "POST"}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
TEMPLATE_ROLES = [
|
||||||
|
{
|
||||||
|
"roleLabel": "codeeditor-viewer",
|
||||||
|
"description": {
|
||||||
|
"en": "Code Editor Viewer - View editor (read-only)",
|
||||||
|
"de": "Code Editor Betrachter - Editor ansehen (nur lesen)",
|
||||||
|
"fr": "Visualiseur Code Editor - Consulter l'editeur (lecture seule)"
|
||||||
|
},
|
||||||
|
"accessRules": [
|
||||||
|
{"context": "UI", "item": "ui.feature.codeeditor.editor", "view": True},
|
||||||
|
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"roleLabel": "codeeditor-user",
|
||||||
|
"description": {
|
||||||
|
"en": "Code Editor User - Use editor and workflows",
|
||||||
|
"de": "Code Editor Benutzer - Editor und Workflows nutzen",
|
||||||
|
"fr": "Utilisateur Code Editor - Utiliser l'editeur et les workflows"
|
||||||
|
},
|
||||||
|
"accessRules": [
|
||||||
|
{"context": "UI", "item": "ui.feature.codeeditor.editor", "view": True},
|
||||||
|
{"context": "UI", "item": "ui.feature.codeeditor.workflows", "view": True},
|
||||||
|
{"context": "RESOURCE", "item": "resource.feature.codeeditor.start", "view": True},
|
||||||
|
{"context": "RESOURCE", "item": "resource.feature.codeeditor.stop", "view": True},
|
||||||
|
{"context": "RESOURCE", "item": "resource.feature.codeeditor.chatData", "view": True},
|
||||||
|
{"context": "RESOURCE", "item": "resource.feature.codeeditor.files", "view": True},
|
||||||
|
{"context": "RESOURCE", "item": "resource.feature.codeeditor.apply", "view": True},
|
||||||
|
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"roleLabel": "codeeditor-admin",
|
||||||
|
"description": {
|
||||||
|
"en": "Code Editor Admin - Full access to code editor",
|
||||||
|
"de": "Code Editor Admin - Vollzugriff auf Code Editor",
|
||||||
|
"fr": "Administrateur Code Editor - Acces complet au code editor"
|
||||||
|
},
|
||||||
|
"accessRules": [
|
||||||
|
{"context": "UI", "item": None, "view": True},
|
||||||
|
{"context": "RESOURCE", "item": None, "view": True},
|
||||||
|
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def getFeatureDefinition() -> Dict[str, Any]:
|
||||||
|
"""Return the feature definition for registration."""
|
||||||
|
return {
|
||||||
|
"code": FEATURE_CODE,
|
||||||
|
"label": FEATURE_LABEL,
|
||||||
|
"icon": FEATURE_ICON,
|
||||||
|
"autoCreateInstance": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def getUiObjects() -> List[Dict[str, Any]]:
|
||||||
|
"""Return UI objects for RBAC catalog registration."""
|
||||||
|
return UI_OBJECTS
|
||||||
|
|
||||||
|
|
||||||
|
def getResourceObjects() -> List[Dict[str, Any]]:
|
||||||
|
"""Return resource objects for RBAC catalog registration."""
|
||||||
|
return RESOURCE_OBJECTS
|
||||||
|
|
||||||
|
|
||||||
|
def getTemplateRoles() -> List[Dict[str, Any]]:
|
||||||
|
"""Return template roles for this feature."""
|
||||||
|
return TEMPLATE_ROLES
|
||||||
|
|
||||||
|
|
||||||
|
def registerFeature(catalogService) -> bool:
|
||||||
|
"""Register this feature's RBAC objects in the catalog."""
|
||||||
|
try:
|
||||||
|
for uiObj in UI_OBJECTS:
|
||||||
|
catalogService.registerUiObject(
|
||||||
|
featureCode=FEATURE_CODE,
|
||||||
|
objectKey=uiObj["objectKey"],
|
||||||
|
label=uiObj["label"],
|
||||||
|
meta=uiObj.get("meta")
|
||||||
|
)
|
||||||
|
|
||||||
|
for resObj in RESOURCE_OBJECTS:
|
||||||
|
catalogService.registerResourceObject(
|
||||||
|
featureCode=FEATURE_CODE,
|
||||||
|
objectKey=resObj["objectKey"],
|
||||||
|
label=resObj["label"],
|
||||||
|
meta=resObj.get("meta")
|
||||||
|
)
|
||||||
|
|
||||||
|
_syncTemplateRolesToDb()
|
||||||
|
|
||||||
|
logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI objects and {len(RESOURCE_OBJECTS)} resource objects")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to register feature '{FEATURE_CODE}': {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _syncTemplateRolesToDb() -> int:
|
||||||
|
"""Sync template roles and their AccessRules to the database."""
|
||||||
|
try:
|
||||||
|
from modules.interfaces.interfaceDbApp import getRootInterface
|
||||||
|
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
|
||||||
|
|
||||||
|
rootInterface = getRootInterface()
|
||||||
|
|
||||||
|
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
|
||||||
|
templateRoles = [r for r in existingRoles if r.mandateId is None]
|
||||||
|
existingRoleLabels = {r.roleLabel: str(r.id) for r in templateRoles}
|
||||||
|
|
||||||
|
createdCount = 0
|
||||||
|
for roleTemplate in TEMPLATE_ROLES:
|
||||||
|
roleLabel = roleTemplate["roleLabel"]
|
||||||
|
|
||||||
|
if roleLabel in existingRoleLabels:
|
||||||
|
roleId = existingRoleLabels[roleLabel]
|
||||||
|
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
|
||||||
|
else:
|
||||||
|
newRole = Role(
|
||||||
|
roleLabel=roleLabel,
|
||||||
|
description=roleTemplate.get("description", {}),
|
||||||
|
featureCode=FEATURE_CODE,
|
||||||
|
mandateId=None,
|
||||||
|
featureInstanceId=None,
|
||||||
|
isSystemRole=False
|
||||||
|
)
|
||||||
|
createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
|
||||||
|
roleId = createdRole.get("id")
|
||||||
|
_ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
|
||||||
|
logger.info(f"Created template role '{roleLabel}' with ID {roleId}")
|
||||||
|
createdCount += 1
|
||||||
|
|
||||||
|
if createdCount > 0:
|
||||||
|
logger.info(f"Feature '{FEATURE_CODE}': Created {createdCount} template roles")
|
||||||
|
|
||||||
|
return createdCount
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error syncing template roles for feature '{FEATURE_CODE}': {e}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _ensureAccessRulesForRole(rootInterface, roleId: str, ruleTemplates: List[Dict[str, Any]]) -> int:
|
||||||
|
"""Ensure AccessRules exist for a role based on templates."""
|
||||||
|
from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext
|
||||||
|
|
||||||
|
existingRules = rootInterface.getAccessRulesByRole(roleId)
|
||||||
|
existingSignatures = set()
|
||||||
|
for rule in existingRules:
|
||||||
|
sig = (rule.context.value if rule.context else None, rule.item)
|
||||||
|
existingSignatures.add(sig)
|
||||||
|
|
||||||
|
createdCount = 0
|
||||||
|
for template in ruleTemplates:
|
||||||
|
context = template.get("context", "UI")
|
||||||
|
item = template.get("item")
|
||||||
|
sig = (context, item)
|
||||||
|
|
||||||
|
if sig in existingSignatures:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if context == "UI":
|
||||||
|
contextEnum = AccessRuleContext.UI
|
||||||
|
elif context == "DATA":
|
||||||
|
contextEnum = AccessRuleContext.DATA
|
||||||
|
elif context == "RESOURCE":
|
||||||
|
contextEnum = AccessRuleContext.RESOURCE
|
||||||
|
else:
|
||||||
|
contextEnum = context
|
||||||
|
|
||||||
|
newRule = AccessRule(
|
||||||
|
roleId=roleId,
|
||||||
|
context=contextEnum,
|
||||||
|
item=item,
|
||||||
|
view=template.get("view", False),
|
||||||
|
read=template.get("read"),
|
||||||
|
create=template.get("create"),
|
||||||
|
update=template.get("update"),
|
||||||
|
delete=template.get("delete"),
|
||||||
|
)
|
||||||
|
rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
|
||||||
|
createdCount += 1
|
||||||
|
|
||||||
|
if createdCount > 0:
|
||||||
|
logger.debug(f"Created {createdCount} AccessRules for role {roleId}")
|
||||||
|
|
||||||
|
return createdCount
|
||||||
183
modules/features/codeeditor/promptAssembly.py
Normal file
183
modules/features/codeeditor/promptAssembly.py
Normal file
|
|
@ -0,0 +1,183 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Prompt assembly for the CodeEditor feature.
|
||||||
|
Builds Cursor-style system prompts with file context and format instructions."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List, Optional, Dict, Any
|
||||||
|
|
||||||
|
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import FileContext
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SYSTEM_PROMPT = """You are an AI assistant for text and code file editing. You receive files as context and can suggest changes.
|
||||||
|
|
||||||
|
## Rules for file edits
|
||||||
|
- Use ```file_edit``` blocks for file changes
|
||||||
|
- Each file_edit block must contain: fileName, oldContent (exact text to replace), newContent (replacement text)
|
||||||
|
- Explain changes in normal text before or after the block
|
||||||
|
- oldContent must EXACTLY match existing content (including whitespace and indentation)
|
||||||
|
- You may propose edits to multiple files in one response
|
||||||
|
|
||||||
|
## Response format
|
||||||
|
Normal text is displayed as explanation.
|
||||||
|
File changes must use this format:
|
||||||
|
|
||||||
|
```file_edit
|
||||||
|
fileName: <filename>
|
||||||
|
oldContent: |
|
||||||
|
<exact existing content to replace>
|
||||||
|
newContent: |
|
||||||
|
<new replacement content>
|
||||||
|
```
|
||||||
|
|
||||||
|
Code examples (without edits) use standard markdown code blocks:
|
||||||
|
```language
|
||||||
|
code here
|
||||||
|
```
|
||||||
|
|
||||||
|
## Important
|
||||||
|
- Only edit files that are provided in context
|
||||||
|
- Make minimal, targeted changes
|
||||||
|
- Preserve existing formatting and style
|
||||||
|
- If a task is unclear, ask for clarification instead of guessing"""
|
||||||
|
|
||||||
|
|
||||||
|
def buildRequest(
|
||||||
|
userPrompt: str,
|
||||||
|
fileContexts: List[FileContext],
|
||||||
|
chatHistory: Optional[List[Dict[str, Any]]] = None
|
||||||
|
) -> AiCallRequest:
|
||||||
|
"""Build an AiCallRequest with system prompt, file context, and user prompt."""
|
||||||
|
systemPart = SYSTEM_PROMPT
|
||||||
|
fileContextPart = _buildFileContext(fileContexts)
|
||||||
|
historyPart = _buildChatHistory(chatHistory) if chatHistory else ""
|
||||||
|
|
||||||
|
fullPrompt = systemPart
|
||||||
|
if historyPart:
|
||||||
|
fullPrompt += f"\n\n## Previous conversation\n{historyPart}"
|
||||||
|
fullPrompt += f"\n\n## User request\n{userPrompt}"
|
||||||
|
|
||||||
|
return AiCallRequest(
|
||||||
|
prompt=fullPrompt,
|
||||||
|
context=fileContextPart if fileContextPart else None,
|
||||||
|
options=AiCallOptions(
|
||||||
|
operationType=OperationTypeEnum.DATA_ANALYSE,
|
||||||
|
temperature=0.0,
|
||||||
|
compressPrompt=False,
|
||||||
|
compressContext=False,
|
||||||
|
resultFormat="txt"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _buildFileContext(fileContexts: List[FileContext]) -> str:
|
||||||
|
"""Build the file context string with line numbers."""
|
||||||
|
if not fileContexts:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
for fc in fileContexts:
|
||||||
|
if not fc.content:
|
||||||
|
continue
|
||||||
|
lines = fc.content.split("\n")
|
||||||
|
numberedLines = [f"{i + 1}|{line}" for i, line in enumerate(lines)]
|
||||||
|
numbered = "\n".join(numberedLines)
|
||||||
|
parts.append(f"--- FILE: {fc.fileName} ---\n{numbered}\n--- END FILE ---")
|
||||||
|
|
||||||
|
return "\n\n".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def buildAgentRequest(
|
||||||
|
userPrompt: Optional[str],
|
||||||
|
fileListContext: str,
|
||||||
|
conversationHistory: List[Dict[str, Any]]
|
||||||
|
) -> AiCallRequest:
|
||||||
|
"""Build an AiCallRequest for agent mode with tool definitions and conversation history."""
|
||||||
|
from modules.features.codeeditor.toolRegistry import formatToolDefinitions
|
||||||
|
|
||||||
|
systemPrompt = _AGENT_SYSTEM_PROMPT.replace("{{TOOL_DEFINITIONS}}", formatToolDefinitions())
|
||||||
|
|
||||||
|
if not conversationHistory:
|
||||||
|
fullPrompt = systemPrompt
|
||||||
|
context = f"## Available files\n{fileListContext}\n\n## Task\n{userPrompt}"
|
||||||
|
else:
|
||||||
|
fullPrompt = systemPrompt
|
||||||
|
historyText = _buildConversationHistory(conversationHistory)
|
||||||
|
context = f"## Available files\n{fileListContext}\n\n## Conversation\n{historyText}"
|
||||||
|
|
||||||
|
return AiCallRequest(
|
||||||
|
prompt=fullPrompt,
|
||||||
|
context=context,
|
||||||
|
options=AiCallOptions(
|
||||||
|
operationType=OperationTypeEnum.DATA_ANALYSE,
|
||||||
|
temperature=0.0,
|
||||||
|
compressPrompt=False,
|
||||||
|
compressContext=False,
|
||||||
|
resultFormat="txt"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_AGENT_SYSTEM_PROMPT = """You are an AI agent for file analysis and editing. You work autonomously by using tools to read files, search content, and propose edits.
|
||||||
|
|
||||||
|
## Available tools
|
||||||
|
{{TOOL_DEFINITIONS}}
|
||||||
|
|
||||||
|
## How to call tools
|
||||||
|
Use this exact format for each tool call:
|
||||||
|
|
||||||
|
```tool_call
|
||||||
|
tool: <tool_name>
|
||||||
|
args: {"param": "value"}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rules
|
||||||
|
- Read files ONE AT A TIME with read_file, never assume file contents
|
||||||
|
- First create a plan, then execute it step by step
|
||||||
|
- Use search_files to find relevant files before reading them
|
||||||
|
- Use list_files to discover what files are available
|
||||||
|
- For file changes, use ```file_edit``` blocks (same format as before)
|
||||||
|
- You may combine text explanations, tool calls, and file edits in one response
|
||||||
|
- When you are DONE and need no more tool calls, simply respond with text only (no tool_call blocks)
|
||||||
|
- Keep responses focused and efficient
|
||||||
|
|
||||||
|
## file_edit format (for changes)
|
||||||
|
```file_edit
|
||||||
|
fileName: <filename>
|
||||||
|
oldContent: |
|
||||||
|
<exact existing content>
|
||||||
|
newContent: |
|
||||||
|
<replacement content>
|
||||||
|
```"""
|
||||||
|
|
||||||
|
|
||||||
|
def _buildConversationHistory(history: List[Dict[str, Any]]) -> str:
|
||||||
|
"""Build the full conversation history for agent multi-turn context."""
|
||||||
|
parts = []
|
||||||
|
for msg in history:
|
||||||
|
role = msg.get("role", "unknown")
|
||||||
|
content = msg.get("content", "")
|
||||||
|
if role == "tool_result":
|
||||||
|
toolName = msg.get("toolName", "")
|
||||||
|
parts.append(f"[Tool Result - {toolName}]:\n{content}")
|
||||||
|
else:
|
||||||
|
parts.append(f"[{role}]:\n{content}")
|
||||||
|
return "\n\n".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def _buildChatHistory(chatHistory: List[Dict[str, Any]]) -> str:
|
||||||
|
"""Build a condensed chat history string for multi-turn context."""
|
||||||
|
if not chatHistory:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
for msg in chatHistory[-10:]:
|
||||||
|
role = msg.get("role", "unknown")
|
||||||
|
content = msg.get("content", "")
|
||||||
|
if len(content) > 500:
|
||||||
|
content = content[:500] + "..."
|
||||||
|
parts.append(f"[{role}]: {content}")
|
||||||
|
|
||||||
|
return "\n".join(parts)
|
||||||
184
modules/features/codeeditor/responseParser.py
Normal file
184
modules/features/codeeditor/responseParser.py
Normal file
|
|
@ -0,0 +1,184 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Response parser for the CodeEditor feature.
|
||||||
|
Parses AI responses into typed segments (text, code_block, file_edit, tool_call)."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import ResponseSegment, SegmentTypeEnum
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_FENCE_PATTERN = re.compile(r"^```(\w*)\s*$", re.MULTILINE)
|
||||||
|
|
||||||
|
|
||||||
|
def parseResponse(rawContent: str) -> List[ResponseSegment]:
|
||||||
|
"""Parse an AI response into typed segments."""
|
||||||
|
if not rawContent or not rawContent.strip():
|
||||||
|
return []
|
||||||
|
|
||||||
|
segments = []
|
||||||
|
lines = rawContent.split("\n")
|
||||||
|
i = 0
|
||||||
|
|
||||||
|
textBuffer = []
|
||||||
|
|
||||||
|
while i < len(lines):
|
||||||
|
line = lines[i]
|
||||||
|
|
||||||
|
match = _FENCE_PATTERN.match(line)
|
||||||
|
if match:
|
||||||
|
if textBuffer:
|
||||||
|
_flushTextBuffer(textBuffer, segments)
|
||||||
|
textBuffer = []
|
||||||
|
|
||||||
|
lang = match.group(1).strip()
|
||||||
|
blockLines, endIdx = _collectBlock(lines, i + 1)
|
||||||
|
blockContent = "\n".join(blockLines)
|
||||||
|
|
||||||
|
if lang == "file_edit":
|
||||||
|
segment = _parseFileEditBlock(blockContent)
|
||||||
|
if segment:
|
||||||
|
segments.append(segment)
|
||||||
|
else:
|
||||||
|
segments.append(ResponseSegment(
|
||||||
|
type=SegmentTypeEnum.CODE_BLOCK,
|
||||||
|
content=blockContent,
|
||||||
|
language="text"
|
||||||
|
))
|
||||||
|
elif lang == "tool_call":
|
||||||
|
segment = _parseToolCallBlock(blockContent)
|
||||||
|
if segment:
|
||||||
|
segments.append(segment)
|
||||||
|
else:
|
||||||
|
segments.append(ResponseSegment(
|
||||||
|
type=SegmentTypeEnum.CODE_BLOCK,
|
||||||
|
content=blockContent,
|
||||||
|
language="text"
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
segments.append(ResponseSegment(
|
||||||
|
type=SegmentTypeEnum.CODE_BLOCK,
|
||||||
|
content=blockContent,
|
||||||
|
language=lang or "text"
|
||||||
|
))
|
||||||
|
|
||||||
|
i = endIdx + 1
|
||||||
|
else:
|
||||||
|
textBuffer.append(line)
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
if textBuffer:
|
||||||
|
_flushTextBuffer(textBuffer, segments)
|
||||||
|
|
||||||
|
return segments
|
||||||
|
|
||||||
|
|
||||||
|
def hasToolCalls(segments: List[ResponseSegment]) -> bool:
|
||||||
|
"""Check if any segments contain tool calls."""
|
||||||
|
return any(s.type == SegmentTypeEnum.TOOL_CALL for s in segments)
|
||||||
|
|
||||||
|
|
||||||
|
def _collectBlock(lines: List[str], startIdx: int) -> tuple:
|
||||||
|
"""Collect lines inside a fenced code block until closing ```."""
|
||||||
|
blockLines = []
|
||||||
|
idx = startIdx
|
||||||
|
while idx < len(lines):
|
||||||
|
if lines[idx].strip() == "```":
|
||||||
|
return blockLines, idx
|
||||||
|
blockLines.append(lines[idx])
|
||||||
|
idx += 1
|
||||||
|
return blockLines, idx
|
||||||
|
|
||||||
|
|
||||||
|
def _flushTextBuffer(buffer: List[str], segments: List[ResponseSegment]):
|
||||||
|
"""Flush accumulated text lines into a text segment."""
|
||||||
|
text = "\n".join(buffer).strip()
|
||||||
|
buffer.clear()
|
||||||
|
if text:
|
||||||
|
segments.append(ResponseSegment(
|
||||||
|
type=SegmentTypeEnum.TEXT,
|
||||||
|
content=text
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def _parseFileEditBlock(blockContent: str) -> Optional[ResponseSegment]:
|
||||||
|
"""Parse a file_edit block into a ResponseSegment with fileName, oldContent, newContent."""
|
||||||
|
fields = {"fileName": None, "oldContent": None, "newContent": None}
|
||||||
|
currentField = None
|
||||||
|
currentLines = []
|
||||||
|
|
||||||
|
for line in blockContent.split("\n"):
|
||||||
|
stripped = line.strip()
|
||||||
|
|
||||||
|
newField = None
|
||||||
|
for key in ("fileName", "oldContent", "newContent"):
|
||||||
|
if stripped.startswith(f"{key}:"):
|
||||||
|
newField = key
|
||||||
|
break
|
||||||
|
|
||||||
|
if newField:
|
||||||
|
if currentField and currentLines:
|
||||||
|
fields[currentField] = "\n".join(currentLines)
|
||||||
|
currentField = newField
|
||||||
|
value = stripped[len(f"{newField}:"):].strip()
|
||||||
|
if newField == "fileName":
|
||||||
|
fields["fileName"] = value if value else None
|
||||||
|
currentField = None
|
||||||
|
currentLines = []
|
||||||
|
else:
|
||||||
|
currentLines = [value] if value and value != "|" else []
|
||||||
|
else:
|
||||||
|
if currentField in ("oldContent", "newContent"):
|
||||||
|
dedented = line[2:] if line.startswith(" ") else line
|
||||||
|
currentLines.append(dedented)
|
||||||
|
|
||||||
|
if currentField and currentLines:
|
||||||
|
fields[currentField] = "\n".join(currentLines)
|
||||||
|
|
||||||
|
if not fields["fileName"]:
|
||||||
|
logger.warning("file_edit block missing fileName")
|
||||||
|
return None
|
||||||
|
if fields["newContent"] is None:
|
||||||
|
logger.warning(f"file_edit block for {fields['fileName']} missing newContent")
|
||||||
|
return None
|
||||||
|
|
||||||
|
return ResponseSegment(
|
||||||
|
type=SegmentTypeEnum.FILE_EDIT,
|
||||||
|
content=f"Edit: {fields['fileName']}",
|
||||||
|
fileName=fields["fileName"],
|
||||||
|
oldContent=fields["oldContent"],
|
||||||
|
newContent=fields["newContent"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parseToolCallBlock(blockContent: str) -> Optional[ResponseSegment]:
|
||||||
|
"""Parse a tool_call block into a ResponseSegment with toolName and toolArgs."""
|
||||||
|
toolName = None
|
||||||
|
toolArgs = {}
|
||||||
|
|
||||||
|
for line in blockContent.split("\n"):
|
||||||
|
stripped = line.strip()
|
||||||
|
if stripped.startswith("tool:"):
|
||||||
|
toolName = stripped[len("tool:"):].strip()
|
||||||
|
elif stripped.startswith("args:"):
|
||||||
|
argsStr = stripped[len("args:"):].strip()
|
||||||
|
try:
|
||||||
|
toolArgs = json.loads(argsStr)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logger.warning(f"Could not parse tool args as JSON: {argsStr}")
|
||||||
|
toolArgs = {"raw": argsStr}
|
||||||
|
|
||||||
|
if not toolName:
|
||||||
|
logger.warning("tool_call block missing tool name")
|
||||||
|
return None
|
||||||
|
|
||||||
|
return ResponseSegment(
|
||||||
|
type=SegmentTypeEnum.TOOL_CALL,
|
||||||
|
content=f"Tool: {toolName}",
|
||||||
|
toolName=toolName,
|
||||||
|
toolArgs=toolArgs
|
||||||
|
)
|
||||||
395
modules/features/codeeditor/routeFeatureCodeeditor.py
Normal file
395
modules/features/codeeditor/routeFeatureCodeeditor.py
Normal file
|
|
@ -0,0 +1,395 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""
|
||||||
|
CodeEditor Feature Routes.
|
||||||
|
SSE-based endpoints for Cursor-style AI file editing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
import asyncio
|
||||||
|
from typing import Optional, Dict, Any, List
|
||||||
|
|
||||||
|
from fastapi import APIRouter, HTTPException, Depends, Body, Path, Query, Request
|
||||||
|
from fastapi.responses import StreamingResponse
|
||||||
|
|
||||||
|
from modules.auth import limiter, getRequestContext, RequestContext
|
||||||
|
from modules.interfaces import interfaceDbChat, interfaceDbManagement
|
||||||
|
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||||
|
from modules.datamodels.datamodelChat import UserInputRequest
|
||||||
|
from modules.features.chatbot.streaming.events import get_event_manager
|
||||||
|
from modules.features.codeeditor import codeEditorProcessor, fileContextManager
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import FileEditProposal, EditStatusEnum
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(
|
||||||
|
prefix="/api/codeeditor",
|
||||||
|
tags=["Code Editor Feature"],
|
||||||
|
responses={404: {"description": "Not found"}}
|
||||||
|
)
|
||||||
|
|
||||||
|
_aiObjects: Optional[AiObjects] = None
|
||||||
|
|
||||||
|
|
||||||
|
async def _getAiObjects() -> AiObjects:
|
||||||
|
"""Lazy-init singleton for AiObjects."""
|
||||||
|
global _aiObjects
|
||||||
|
if _aiObjects is None:
|
||||||
|
_aiObjects = await AiObjects.create()
|
||||||
|
return _aiObjects
|
||||||
|
|
||||||
|
|
||||||
|
def _getServiceChat(context: RequestContext, featureInstanceId: str = None):
|
||||||
|
"""Get chat interface with feature instance context."""
|
||||||
|
return interfaceDbChat.getInterface(
|
||||||
|
context.user,
|
||||||
|
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||||
|
featureInstanceId=featureInstanceId
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _getDbManagement(context: RequestContext, featureInstanceId: str = None):
|
||||||
|
"""Get management interface with user context for file access."""
|
||||||
|
return interfaceDbManagement.getInterface(
|
||||||
|
context.user,
|
||||||
|
mandateId=str(context.mandateId) if context.mandateId else None,
|
||||||
|
featureInstanceId=featureInstanceId
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
|
||||||
|
"""Validate user has access to the feature instance. Returns mandateId."""
|
||||||
|
from modules.interfaces.interfaceDbApp import getRootInterface
|
||||||
|
|
||||||
|
rootInterface = getRootInterface()
|
||||||
|
instance = rootInterface.getFeatureInstance(instanceId)
|
||||||
|
if not instance:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
|
||||||
|
|
||||||
|
featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
|
||||||
|
if not featureAccess or not featureAccess.enabled:
|
||||||
|
raise HTTPException(status_code=403, detail="Access denied to this feature instance")
|
||||||
|
|
||||||
|
return str(instance.mandateId) if instance.mandateId else None
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{instanceId}/start/stream")
|
||||||
|
@limiter.limit("60/minute")
|
||||||
|
async def streamCodeeditorStart(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
workflowId: Optional[str] = Query(None, description="Optional workflow ID to continue"),
|
||||||
|
mode: str = Query("simple", description="Processing mode: 'simple' (single AI call) or 'agent' (multi-step with tools)"),
|
||||||
|
userInput: UserInputRequest = Body(...),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
):
|
||||||
|
"""Start or continue a CodeEditor workflow with SSE streaming. Supports simple and agent mode."""
|
||||||
|
try:
|
||||||
|
mandateId = _validateInstanceAccess(instanceId, context)
|
||||||
|
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
|
||||||
|
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
|
||||||
|
aiObjects = await _getAiObjects()
|
||||||
|
eventManager = get_event_manager()
|
||||||
|
|
||||||
|
if workflowId:
|
||||||
|
workflow = chatInterface.getWorkflow(workflowId)
|
||||||
|
if not workflow:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
|
||||||
|
else:
|
||||||
|
workflow = chatInterface.createWorkflow({
|
||||||
|
"workflowMode": "CodeEditor",
|
||||||
|
"status": "running",
|
||||||
|
"label": userInput.prompt[:80] if userInput.prompt else "CodeEditor Session",
|
||||||
|
})
|
||||||
|
workflowId = workflow.get("id") if isinstance(workflow, dict) else workflow.id
|
||||||
|
|
||||||
|
queue = eventManager.create_queue(workflowId)
|
||||||
|
|
||||||
|
userMessage = {
|
||||||
|
"role": "user",
|
||||||
|
"content": userInput.prompt,
|
||||||
|
"selectedFiles": userInput.listFileId or []
|
||||||
|
}
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "message", "item": userMessage
|
||||||
|
})
|
||||||
|
|
||||||
|
selectedFileIds = userInput.listFileId or []
|
||||||
|
|
||||||
|
agentMode = mode.lower() == "agent"
|
||||||
|
|
||||||
|
asyncio.create_task(
|
||||||
|
codeEditorProcessor.processMessage(
|
||||||
|
workflowId=workflowId,
|
||||||
|
userPrompt=userInput.prompt,
|
||||||
|
selectedFileIds=selectedFileIds,
|
||||||
|
dbManagement=dbManagement,
|
||||||
|
interfaceAi=aiObjects,
|
||||||
|
chatInterface=chatInterface,
|
||||||
|
eventManager=eventManager,
|
||||||
|
agentMode=agentMode
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _eventStream():
|
||||||
|
streamTimeout = 300
|
||||||
|
lastActivity = asyncio.get_event_loop().time()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
now = asyncio.get_event_loop().time()
|
||||||
|
if now - lastActivity > streamTimeout:
|
||||||
|
yield f"data: {json.dumps({'type': 'error', 'error': 'Stream timeout'})}\n\n"
|
||||||
|
break
|
||||||
|
|
||||||
|
if await request.is_disconnected():
|
||||||
|
logger.info(f"Client disconnected for workflow {workflowId}")
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
event = await asyncio.wait_for(queue.get(), timeout=1.0)
|
||||||
|
lastActivity = asyncio.get_event_loop().time()
|
||||||
|
|
||||||
|
eventType = event.get("type", "")
|
||||||
|
|
||||||
|
if eventType == "chatdata":
|
||||||
|
yield f"data: {json.dumps(event.get('data', {}))}\n\n"
|
||||||
|
elif eventType in ("complete", "stopped", "error"):
|
||||||
|
yield f"data: {json.dumps({'type': eventType, **event.get('data', {})})}\n\n"
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
yield f"data: {json.dumps(event)}\n\n"
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
yield ": keepalive\n\n"
|
||||||
|
|
||||||
|
await eventManager.cleanup(workflowId)
|
||||||
|
|
||||||
|
return StreamingResponse(
|
||||||
|
_eventStream(),
|
||||||
|
media_type="text/event-stream",
|
||||||
|
headers={
|
||||||
|
"Cache-Control": "no-cache",
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
"X-Accel-Buffering": "no"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in streamCodeeditorStart: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{instanceId}/{workflowId}/stop")
|
||||||
|
@limiter.limit("120/minute")
|
||||||
|
async def stopWorkflow(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
workflowId: str = Path(..., description="Workflow ID"),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
):
|
||||||
|
"""Stop a running CodeEditor workflow."""
|
||||||
|
try:
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
eventManager = get_event_manager()
|
||||||
|
await eventManager.emit_event(workflowId, "stopped", {
|
||||||
|
"workflowId": workflowId
|
||||||
|
}, event_category="workflow", message="Workflow stopped by user")
|
||||||
|
return {"status": "stopped", "workflowId": workflowId}
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error stopping workflow: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{instanceId}/{workflowId}/chatData")
|
||||||
|
@limiter.limit("120/minute")
|
||||||
|
def getWorkflowChatData(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
workflowId: str = Path(..., description="Workflow ID"),
|
||||||
|
afterTimestamp: Optional[float] = Query(None, description="Unix timestamp for incremental fetch"),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Get chat data for a workflow (polling fallback)."""
|
||||||
|
try:
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
|
||||||
|
workflow = chatInterface.getWorkflow(workflowId)
|
||||||
|
if not workflow:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
|
||||||
|
return chatInterface.getUnifiedChatData(workflowId, afterTimestamp)
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting chat data: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{instanceId}/workflows")
|
||||||
|
@limiter.limit("120/minute")
|
||||||
|
def getWorkflows(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
page: int = Query(1, ge=1),
|
||||||
|
pageSize: int = Query(20, ge=1, le=100),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""List workflows for this feature instance."""
|
||||||
|
try:
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
chatInterface = _getServiceChat(context, featureInstanceId=instanceId)
|
||||||
|
from modules.datamodels.datamodelPagination import PaginationParams
|
||||||
|
pagination = PaginationParams(page=page, pageSize=pageSize)
|
||||||
|
return chatInterface.getWorkflows(pagination=pagination)
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting workflows: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{instanceId}/files")
|
||||||
|
@limiter.limit("120/minute")
|
||||||
|
def getFiles(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""List all text files accessible to the user."""
|
||||||
|
try:
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
|
||||||
|
textFiles = fileContextManager.listTextFiles(dbManagement)
|
||||||
|
return {
|
||||||
|
"files": [f.model_dump(exclude={"content"}) for f in textFiles],
|
||||||
|
"count": len(textFiles)
|
||||||
|
}
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error listing files: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{instanceId}/files/{fileId}/content")
|
||||||
|
@limiter.limit("120/minute")
|
||||||
|
def getFileContent(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
fileId: str = Path(..., description="File ID"),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Get the text content of a file."""
|
||||||
|
try:
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
|
||||||
|
|
||||||
|
fileItem = dbManagement.getFile(fileId)
|
||||||
|
if not fileItem:
|
||||||
|
raise HTTPException(status_code=404, detail=f"File {fileId} not found")
|
||||||
|
|
||||||
|
fileData = dbManagement.getFileData(fileId)
|
||||||
|
if not fileData:
|
||||||
|
raise HTTPException(status_code=404, detail=f"No data for file {fileId}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = fileData.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
raise HTTPException(status_code=400, detail="File is not valid UTF-8 text")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"fileId": fileId,
|
||||||
|
"fileName": fileItem.fileName,
|
||||||
|
"mimeType": fileItem.mimeType,
|
||||||
|
"content": content
|
||||||
|
}
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting file content: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{instanceId}/{workflowId}/apply")
|
||||||
|
@limiter.limit("60/minute")
|
||||||
|
async def applyEdit(
|
||||||
|
request: Request,
|
||||||
|
instanceId: str = Path(..., description="Feature instance ID"),
|
||||||
|
workflowId: str = Path(..., description="Workflow ID"),
|
||||||
|
proposalData: Dict[str, Any] = Body(...),
|
||||||
|
context: RequestContext = Depends(getRequestContext)
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Accept a file edit proposal. Updates existing file or creates new one."""
|
||||||
|
try:
|
||||||
|
_validateInstanceAccess(instanceId, context)
|
||||||
|
dbManagement = _getDbManagement(context, featureInstanceId=instanceId)
|
||||||
|
|
||||||
|
fileId = proposalData.get("fileId", "")
|
||||||
|
newContent = proposalData.get("newContent")
|
||||||
|
fileName = proposalData.get("fileName", "")
|
||||||
|
|
||||||
|
if newContent is None:
|
||||||
|
raise HTTPException(status_code=400, detail="newContent is required")
|
||||||
|
|
||||||
|
contentBytes = newContent.encode("utf-8")
|
||||||
|
isNewFile = not fileId or fileId.startswith("unknown-")
|
||||||
|
|
||||||
|
if isNewFile:
|
||||||
|
mimeType = _guessMimeType(fileName)
|
||||||
|
fileItem = dbManagement.createFile(fileName, mimeType, contentBytes)
|
||||||
|
resultFileId = fileItem.id
|
||||||
|
resultFileName = fileItem.fileName
|
||||||
|
else:
|
||||||
|
fileItem = dbManagement.getFile(fileId)
|
||||||
|
if not fileItem:
|
||||||
|
raise HTTPException(status_code=404, detail=f"File {fileId} not found")
|
||||||
|
success = dbManagement.createFileData(fileId, contentBytes)
|
||||||
|
if not success:
|
||||||
|
raise HTTPException(status_code=500, detail="Failed to store updated file content")
|
||||||
|
resultFileId = fileId
|
||||||
|
resultFileName = fileName or fileItem.fileName
|
||||||
|
|
||||||
|
eventManager = get_event_manager()
|
||||||
|
await eventManager.emit_event(workflowId, "chatdata", {
|
||||||
|
"type": "file_version",
|
||||||
|
"item": {
|
||||||
|
"fileId": resultFileId,
|
||||||
|
"fileName": resultFileName,
|
||||||
|
"status": "accepted",
|
||||||
|
"isNew": isNewFile
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "accepted",
|
||||||
|
"fileId": resultFileId,
|
||||||
|
"fileName": resultFileName,
|
||||||
|
"isNew": isNewFile
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error applying edit: {e}", exc_info=True)
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
_MIME_MAP = {
|
||||||
|
".md": "text/markdown", ".txt": "text/plain", ".json": "application/json",
|
||||||
|
".yaml": "application/yaml", ".yml": "application/yaml", ".xml": "application/xml",
|
||||||
|
".csv": "text/csv", ".py": "text/x-python", ".js": "text/javascript",
|
||||||
|
".ts": "text/x-typescript", ".html": "text/html", ".css": "text/css",
|
||||||
|
".sql": "text/x-sql", ".sh": "text/x-shellscript",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _guessMimeType(fileName: str) -> str:
|
||||||
|
"""Guess MIME type from file extension."""
|
||||||
|
if not fileName or "." not in fileName:
|
||||||
|
return "text/plain"
|
||||||
|
ext = "." + fileName.rsplit(".", 1)[-1].lower()
|
||||||
|
return _MIME_MAP.get(ext, "text/plain")
|
||||||
157
modules/features/codeeditor/toolRegistry.py
Normal file
157
modules/features/codeeditor/toolRegistry.py
Normal file
|
|
@ -0,0 +1,157 @@
|
||||||
|
# Copyright (c) 2025 Patrick Motsch
|
||||||
|
# All rights reserved.
|
||||||
|
"""Tool registry and dispatcher for the CodeEditor agent loop.
|
||||||
|
Defines available tools and executes them against the file context manager."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import fnmatch
|
||||||
|
from typing import Dict, Any, List
|
||||||
|
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import ToolResult
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
TOOL_DEFINITIONS = [
|
||||||
|
{
|
||||||
|
"name": "read_file",
|
||||||
|
"description": "Read the full content of a single file by its fileId.",
|
||||||
|
"parameters": {"fileId": "string (required)"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "list_files",
|
||||||
|
"description": "List all available text files with metadata (name, size, mimeType). Optionally filter by glob pattern.",
|
||||||
|
"parameters": {"filter": "string (optional, glob pattern e.g. '*.py')"}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "search_files",
|
||||||
|
"description": "Search all file contents for a text query. Returns matching lines with file name and line number.",
|
||||||
|
"parameters": {"query": "string (required)", "fileType": "string (optional, extension e.g. 'py')"}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def dispatch(toolName: str, toolArgs: Dict[str, Any], dbManagement) -> ToolResult:
|
||||||
|
"""Execute a tool and return the result."""
|
||||||
|
startTime = time.time()
|
||||||
|
try:
|
||||||
|
if toolName == "read_file":
|
||||||
|
result = await _toolReadFile(toolArgs, dbManagement)
|
||||||
|
elif toolName == "list_files":
|
||||||
|
result = _toolListFiles(toolArgs, dbManagement)
|
||||||
|
elif toolName == "search_files":
|
||||||
|
result = await _toolSearchFiles(toolArgs, dbManagement)
|
||||||
|
else:
|
||||||
|
result = f"Unknown tool: {toolName}"
|
||||||
|
return ToolResult(toolName=toolName, result=result, success=False,
|
||||||
|
executionTime=time.time() - startTime)
|
||||||
|
|
||||||
|
return ToolResult(toolName=toolName, result=result, success=True,
|
||||||
|
executionTime=time.time() - startTime)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Tool {toolName} failed: {e}", exc_info=True)
|
||||||
|
return ToolResult(toolName=toolName, result=f"Error: {str(e)}", success=False,
|
||||||
|
executionTime=time.time() - startTime)
|
||||||
|
|
||||||
|
|
||||||
|
async def _toolReadFile(args: Dict[str, Any], dbManagement) -> str:
|
||||||
|
"""Read a single file's content."""
|
||||||
|
fileId = args.get("fileId", "")
|
||||||
|
if not fileId:
|
||||||
|
return "Error: fileId is required"
|
||||||
|
|
||||||
|
fileItem = dbManagement.getFile(fileId)
|
||||||
|
if not fileItem:
|
||||||
|
return f"Error: File {fileId} not found"
|
||||||
|
|
||||||
|
fileData = dbManagement.getFileData(fileId)
|
||||||
|
if not fileData:
|
||||||
|
return f"Error: No data for file {fileId}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = fileData.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
return f"Error: File {fileItem.fileName} is not valid UTF-8"
|
||||||
|
|
||||||
|
lines = content.split("\n")
|
||||||
|
numbered = "\n".join([f"{i + 1}|{line}" for i, line in enumerate(lines)])
|
||||||
|
return f"--- FILE: {fileItem.fileName} (id: {fileId}) ---\n{numbered}\n--- END FILE ---"
|
||||||
|
|
||||||
|
|
||||||
|
def _toolListFiles(args: Dict[str, Any], dbManagement) -> str:
|
||||||
|
"""List all text files, optionally filtered by glob pattern."""
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import isTextFile
|
||||||
|
|
||||||
|
filterPattern = args.get("filter", "")
|
||||||
|
allFiles = dbManagement.getAllFiles()
|
||||||
|
if not allFiles:
|
||||||
|
return "No files found."
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
for f in allFiles:
|
||||||
|
if not isTextFile(f.mimeType, f.fileName):
|
||||||
|
continue
|
||||||
|
if filterPattern and not fnmatch.fnmatch(f.fileName, filterPattern):
|
||||||
|
continue
|
||||||
|
lines.append(f"- {f.fileName} (id: {f.id}, size: {f.fileSize}B, type: {f.mimeType})")
|
||||||
|
|
||||||
|
if not lines:
|
||||||
|
return "No matching text files found."
|
||||||
|
return f"Available files ({len(lines)}):\n" + "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
async def _toolSearchFiles(args: Dict[str, Any], dbManagement) -> str:
|
||||||
|
"""Search file contents for a query string."""
|
||||||
|
from modules.features.codeeditor.datamodelCodeeditor import isTextFile
|
||||||
|
|
||||||
|
query = args.get("query", "")
|
||||||
|
if not query:
|
||||||
|
return "Error: query is required"
|
||||||
|
|
||||||
|
fileType = args.get("fileType", "")
|
||||||
|
allFiles = dbManagement.getAllFiles()
|
||||||
|
if not allFiles:
|
||||||
|
return "No files to search."
|
||||||
|
|
||||||
|
hits = []
|
||||||
|
maxHits = 50
|
||||||
|
queryLower = query.lower()
|
||||||
|
|
||||||
|
for f in allFiles:
|
||||||
|
if not isTextFile(f.mimeType, f.fileName):
|
||||||
|
continue
|
||||||
|
if fileType and not f.fileName.endswith(f".{fileType}"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
fileData = dbManagement.getFileData(f.id)
|
||||||
|
if not fileData:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = fileData.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for lineNum, line in enumerate(content.split("\n"), 1):
|
||||||
|
if queryLower in line.lower():
|
||||||
|
hits.append(f"{f.fileName}:{lineNum}: {line.strip()}")
|
||||||
|
if len(hits) >= maxHits:
|
||||||
|
break
|
||||||
|
if len(hits) >= maxHits:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not hits:
|
||||||
|
return f"No matches found for '{query}'."
|
||||||
|
result = f"Search results for '{query}' ({len(hits)} matches):\n" + "\n".join(hits)
|
||||||
|
if len(hits) >= maxHits:
|
||||||
|
result += f"\n... (truncated at {maxHits} matches)"
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def formatToolDefinitions() -> str:
|
||||||
|
"""Format tool definitions for inclusion in the system prompt."""
|
||||||
|
parts = []
|
||||||
|
for tool in TOOL_DEFINITIONS:
|
||||||
|
params = ", ".join([f"{k}: {v}" for k, v in tool["parameters"].items()])
|
||||||
|
parts.append(f"- **{tool['name']}**: {tool['description']}\n Parameters: {{{params}}}")
|
||||||
|
return "\n".join(parts)
|
||||||
|
|
@ -401,14 +401,22 @@ class TeamsbotService:
|
||||||
if len(audioBytes) < 1000:
|
if len(audioBytes) < 1000:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Detect silent/all-zeros audio early to avoid expensive STT calls
|
||||||
|
if len(set(audioBytes[100:min(500, len(audioBytes))])) < 3:
|
||||||
|
logger.debug(f"[AudioChunk] Skipping silent audio ({len(audioBytes)} bytes, low byte variation)")
|
||||||
|
return
|
||||||
|
|
||||||
if not voiceInterface:
|
if not voiceInterface:
|
||||||
logger.warning(f"[AudioChunk] No voice interface available for session {sessionId}")
|
logger.warning(f"[AudioChunk] No voice interface available for session {sessionId}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Treat sampleRate=0 as unknown (triggers auto-detection)
|
||||||
|
effectiveSampleRate = sampleRate if sampleRate and sampleRate > 0 else None
|
||||||
|
|
||||||
sttResult = await voiceInterface.speechToText(
|
sttResult = await voiceInterface.speechToText(
|
||||||
audioContent=audioBytes,
|
audioContent=audioBytes,
|
||||||
language=self.config.language or "de-DE",
|
language=self.config.language or "de-DE",
|
||||||
sampleRate=sampleRate,
|
sampleRate=effectiveSampleRate,
|
||||||
)
|
)
|
||||||
|
|
||||||
if sttResult and sttResult.get("success") and sttResult.get("text"):
|
if sttResult and sttResult.get("success") and sttResult.get("text"):
|
||||||
|
|
|
||||||
|
|
@ -1932,6 +1932,7 @@ def _createStoreResourceRules(db: DatabaseConnector) -> None:
|
||||||
storeResources = [
|
storeResources = [
|
||||||
"resource.store.automation",
|
"resource.store.automation",
|
||||||
"resource.store.chatplayground",
|
"resource.store.chatplayground",
|
||||||
|
"resource.store.codeeditor",
|
||||||
"resource.store.teamsbot",
|
"resource.store.teamsbot",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,9 @@ def _getFeatureUiObjects(featureCode: str) -> List[Dict[str, Any]]:
|
||||||
elif featureCode == "chatplayground":
|
elif featureCode == "chatplayground":
|
||||||
from modules.features.chatplayground.mainChatplayground import UI_OBJECTS
|
from modules.features.chatplayground.mainChatplayground import UI_OBJECTS
|
||||||
return UI_OBJECTS
|
return UI_OBJECTS
|
||||||
|
elif featureCode == "codeeditor":
|
||||||
|
from modules.features.codeeditor.mainCodeeditor import UI_OBJECTS
|
||||||
|
return UI_OBJECTS
|
||||||
elif featureCode == "automation":
|
elif featureCode == "automation":
|
||||||
from modules.features.automation.mainAutomation import UI_OBJECTS
|
from modules.features.automation.mainAutomation import UI_OBJECTS
|
||||||
return UI_OBJECTS
|
return UI_OBJECTS
|
||||||
|
|
|
||||||
|
|
@ -442,6 +442,11 @@ RESOURCE_OBJECTS = [
|
||||||
"label": {"en": "Store: Chat Playground", "de": "Store: Chat Playground", "fr": "Store: Chat Playground"},
|
"label": {"en": "Store: Chat Playground", "de": "Store: Chat Playground", "fr": "Store: Chat Playground"},
|
||||||
"meta": {"category": "store", "featureCode": "chatplayground"}
|
"meta": {"category": "store", "featureCode": "chatplayground"}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"objectKey": "resource.store.codeeditor",
|
||||||
|
"label": {"en": "Store: Code Editor", "de": "Store: Code Editor", "fr": "Store: Code Editor"},
|
||||||
|
"meta": {"category": "store", "featureCode": "codeeditor"}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"objectKey": "resource.store.teamsbot",
|
"objectKey": "resource.store.teamsbot",
|
||||||
"label": {"en": "Store: Teams Bot", "de": "Store: Teams Bot", "fr": "Store: Teams Bot"},
|
"label": {"en": "Store: Teams Bot", "de": "Store: Teams Bot", "fr": "Store: Teams Bot"},
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue