From e24ef426174e1759b4a513d620cf94b649cc4aa8 Mon Sep 17 00:00:00 2001
From: patrick-motsch
Date: Sun, 15 Feb 2026 10:08:50 +0100
Subject: [PATCH] feat(teamsbot): AI voice test, config save, camelCase
mapping, default voices
- Voice test endpoint generates sample text dynamically via AI in selected language
- Fixed config save: added "config" to allowed update fields in interfaceFeatures
- Clean camelCase mapping in interfaceVoiceObjects (audio_content -> audioContent)
- Default TTS voices for common languages in connectorVoiceGoogle
- Fixed updateFeatureInstanceConfig -> updateFeatureInstance with config field
Co-authored-by: Cursor
---
modules/connectors/connectorVoiceGoogle.py | 12 +++++--
.../features/teamsbot/routeFeatureTeamsbot.py | 34 ++++++++++++++++---
modules/interfaces/interfaceFeatures.py | 2 +-
modules/interfaces/interfaceVoiceObjects.py | 11 ++++--
4 files changed, 50 insertions(+), 9 deletions(-)
diff --git a/modules/connectors/connectorVoiceGoogle.py b/modules/connectors/connectorVoiceGoogle.py
index 85a19bf7..42efffcf 100644
--- a/modules/connectors/connectorVoiceGoogle.py
+++ b/modules/connectors/connectorVoiceGoogle.py
@@ -778,9 +778,17 @@ class ConnectorGoogleSpeech:
def _getDefaultVoice(self, languageCode: str) -> str:
"""
Get default voice name for a language code.
- Returns None - no defaults, let the frontend handle voice selection.
+ Falls back to a Wavenet voice for common languages.
"""
- return None
+ _defaults = {
+ "de-DE": "de-DE-Wavenet-A",
+ "de-CH": "de-DE-Wavenet-A",
+ "en-US": "en-US-Wavenet-C",
+ "en-GB": "en-GB-Wavenet-A",
+ "fr-FR": "fr-FR-Wavenet-A",
+ "it-IT": "it-IT-Wavenet-A",
+ }
+ return _defaults.get(languageCode)
async def getAvailableLanguages(self) -> Dict[str, Any]:
"""
diff --git a/modules/features/teamsbot/routeFeatureTeamsbot.py b/modules/features/teamsbot/routeFeatureTeamsbot.py
index 91cd4cec..2abd2cf7 100644
--- a/modules/features/teamsbot/routeFeatureTeamsbot.py
+++ b/modules/features/teamsbot/routeFeatureTeamsbot.py
@@ -391,7 +391,7 @@ async def updateConfig(
# Save to FeatureInstance.config
rootInterface = getRootInterface()
featureInterface = getFeatureInterface(rootInterface.db)
- featureInterface.updateFeatureInstanceConfig(instanceId, mergedConfig.model_dump())
+ featureInterface.updateFeatureInstance(instanceId, {"config": mergedConfig.model_dump()})
logger.info(f"Teamsbot config updated for instance {instanceId}: {list(updateDict.keys())}")
return {"config": mergedConfig.model_dump()}
@@ -408,20 +408,45 @@ async def testVoice(
instanceId: str,
context: RequestContext = Depends(getRequestContext),
):
- """Test TTS voice with a sample text. Returns base64-encoded audio."""
+ """Test TTS voice with AI-generated sample text in the correct language."""
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+ from modules.services.serviceAi.mainServiceAi import AiService
+ from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
mandateId = _validateInstanceAccess(instanceId, context)
body = await request.json()
- text = body.get("text", "Hallo, ich bin der AI-Assistent. Wie kann ich helfen?")
language = body.get("language", "de-DE")
voiceId = body.get("voiceId")
+ botName = body.get("botName", "AI Assistant")
try:
+ # Generate test text dynamically via AI in the correct language
+ serviceContext = type('Ctx', (), {
+ 'user': context.user, 'mandateId': mandateId,
+ 'featureInstanceId': instanceId, 'featureCode': 'teamsbot'
+ })()
+ aiService = AiService(serviceCenter=serviceContext)
+ await aiService.ensureAiObjectsInitialized()
+
+ aiRequest = AiCallRequest(
+ prompt=f"Generate a short, friendly introduction sentence (max 2 sentences) for a meeting bot named '{botName}'. "
+ f"Write ONLY in the language '{language}'. No quotes, no explanation, just the text to speak.",
+ context="",
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_ANALYSE,
+ priority=PriorityEnum.SPEED,
+ )
+ )
+ aiResponse = await aiService.callAi(aiRequest)
+ testText = aiResponse.content.strip().strip('"').strip("'") if aiResponse and aiResponse.errorCount == 0 else f"Hello, I am {botName}."
+
+ logger.info(f"Voice test: generated text in {language}: '{testText[:60]}...'")
+
+ # Convert to speech
voiceInterface = getVoiceInterface(context.user, mandateId)
result = await voiceInterface.textToSpeech(
- text=text,
+ text=testText,
languageCode=language,
voiceName=voiceId,
)
@@ -439,6 +464,7 @@ async def testVoice(
"format": "mp3",
"language": language,
"voiceId": voiceId,
+ "text": testText,
}
return {"success": False, "error": "TTS returned no audio"}
diff --git a/modules/interfaces/interfaceFeatures.py b/modules/interfaces/interfaceFeatures.py
index 4866a0d6..b261e76e 100644
--- a/modules/interfaces/interfaceFeatures.py
+++ b/modules/interfaces/interfaceFeatures.py
@@ -406,7 +406,7 @@ class FeatureInterface:
return None
# Only allow updating specific fields
- allowedFields = {"label", "enabled"}
+ allowedFields = {"label", "enabled", "config"}
filteredData = {k: v for k, v in updateData.items() if k in allowedFields}
if not filteredData:
diff --git a/modules/interfaces/interfaceVoiceObjects.py b/modules/interfaces/interfaceVoiceObjects.py
index cccebce4..6efe51cf 100644
--- a/modules/interfaces/interfaceVoiceObjects.py
+++ b/modules/interfaces/interfaceVoiceObjects.py
@@ -271,10 +271,17 @@ class VoiceObjects:
if result["success"]:
logger.info(f"✅ Text-to-Speech successful: {len(result['audio_content'])} bytes")
+ # Map connector snake_case keys to camelCase for consistent API
+ return {
+ "success": True,
+ "audioContent": result["audio_content"],
+ "audioFormat": result.get("audio_format", "mp3"),
+ "languageCode": result.get("language_code", languageCode),
+ "voiceName": result.get("voice_name", voiceName),
+ }
else:
logger.warning(f"⚠️ Text-to-Speech failed: {result.get('error', 'Unknown error')}")
-
- return result
+ return result
except Exception as e:
logger.error(f"❌ Text-to-Speech error: {e}")