commit
923b05b410
35 changed files with 846 additions and 64 deletions
9
app.py
9
app.py
|
|
@ -154,10 +154,10 @@ async def lifespan(app: FastAPI):
|
|||
scheduler = AsyncIOScheduler(timezone=ZoneInfo("Europe/Zurich"))
|
||||
try:
|
||||
from modules.services.serviceDeltaSync import perform_sync_jira_delta_group
|
||||
# Schedule hourly sync at minute 0
|
||||
# Schedule sync every 20 minutes (at minutes 00, 20, 40)
|
||||
scheduler.add_job(
|
||||
perform_sync_jira_delta_group,
|
||||
CronTrigger(minute="0"),
|
||||
CronTrigger(minute="0,20,40"),
|
||||
id="jira_delta_group_sync",
|
||||
replace_existing=True,
|
||||
coalesce=True,
|
||||
|
|
@ -165,7 +165,7 @@ async def lifespan(app: FastAPI):
|
|||
misfire_grace_time=1800,
|
||||
)
|
||||
scheduler.start()
|
||||
logger.info("APScheduler started (jira_delta_group_sync hourly)")
|
||||
logger.info("APScheduler started (jira_delta_group_sync every 20 minutes at 00, 20, 40)")
|
||||
|
||||
# Run initial sync on startup (non-blocking failure)
|
||||
try:
|
||||
|
|
@ -248,3 +248,6 @@ app.include_router(msftRouter)
|
|||
|
||||
from modules.routes.routeSecurityGoogle import router as googleRouter
|
||||
app.include_router(googleRouter)
|
||||
|
||||
from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter
|
||||
app.include_router(voiceGoogleRouter)
|
||||
|
|
@ -48,6 +48,9 @@ Service_GOOGLE_CLIENT_SECRET = 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
|
|||
# Tavily Web Search configuration
|
||||
Connector_WebTavily_API_KEY = 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
|
||||
|
||||
# Google Cloud Speech Services configuration
|
||||
# Set GOOGLE_APPLICATION_CREDENTIALS environment variable or place credentials file in project directory
|
||||
|
||||
# Web Search configuration
|
||||
Web_Search_MAX_QUERY_LENGTH = 400
|
||||
Web_Search_MAX_RESULTS = 20
|
||||
|
|
|
|||
BIN
debug_audio/audio_20250913_223438.wav
Normal file
BIN
debug_audio/audio_20250913_223438.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_223658.wav
Normal file
BIN
debug_audio/audio_20250913_223658.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_224003.wav
Normal file
BIN
debug_audio/audio_20250913_224003.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_224258.wav
Normal file
BIN
debug_audio/audio_20250913_224258.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_224524.wav
Normal file
BIN
debug_audio/audio_20250913_224524.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_224801.wav
Normal file
BIN
debug_audio/audio_20250913_224801.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_230817.wav
Normal file
BIN
debug_audio/audio_20250913_230817.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_230927.wav
Normal file
BIN
debug_audio/audio_20250913_230927.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_231253.wav
Normal file
BIN
debug_audio/audio_20250913_231253.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_231321.wav
Normal file
BIN
debug_audio/audio_20250913_231321.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_231611.wav
Normal file
BIN
debug_audio/audio_20250913_231611.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_231935.wav
Normal file
BIN
debug_audio/audio_20250913_231935.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_232141.wav
Normal file
BIN
debug_audio/audio_20250913_232141.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_232309.wav
Normal file
BIN
debug_audio/audio_20250913_232309.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_232518.wav
Normal file
BIN
debug_audio/audio_20250913_232518.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_232659.wav
Normal file
BIN
debug_audio/audio_20250913_232659.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_232941.wav
Normal file
BIN
debug_audio/audio_20250913_232941.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_233053.wav
Normal file
BIN
debug_audio/audio_20250913_233053.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_233155.wav
Normal file
BIN
debug_audio/audio_20250913_233155.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_233607.wav
Normal file
BIN
debug_audio/audio_20250913_233607.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_234106.wav
Normal file
BIN
debug_audio/audio_20250913_234106.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_234245.wav
Normal file
BIN
debug_audio/audio_20250913_234245.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_234843.wav
Normal file
BIN
debug_audio/audio_20250913_234843.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_235136.wav
Normal file
BIN
debug_audio/audio_20250913_235136.wav
Normal file
Binary file not shown.
BIN
debug_audio/audio_20250913_235409.wav
Normal file
BIN
debug_audio/audio_20250913_235409.wav
Normal file
Binary file not shown.
|
|
@ -1,44 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script to force Microsoft re-authentication for SharePoint access.
|
||||
This will disconnect the existing connection and provide a new login URL.
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
# Configuration
|
||||
BASE_URL = "http://localhost:8000" # Adjust if your server runs on different port
|
||||
CONNECTION_ID = "cc62583d-3a68-44b6-8283-726725916a7e" # From the logs
|
||||
|
||||
def force_reauth():
|
||||
"""Force Microsoft re-authentication by disconnecting and providing new login URL."""
|
||||
|
||||
print("🔄 Forcing Microsoft re-authentication for SharePoint access...")
|
||||
|
||||
# Step 1: Disconnect existing connection
|
||||
print(f"1. Disconnecting connection {CONNECTION_ID}...")
|
||||
disconnect_url = f"{BASE_URL}/api/connections/{CONNECTION_ID}/disconnect"
|
||||
|
||||
try:
|
||||
response = requests.post(disconnect_url)
|
||||
if response.status_code == 200:
|
||||
print("✅ Connection disconnected successfully")
|
||||
else:
|
||||
print(f"❌ Failed to disconnect: {response.status_code} - {response.text}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"❌ Error disconnecting: {e}")
|
||||
return
|
||||
|
||||
# Step 2: Get new login URL
|
||||
print("2. Getting new Microsoft login URL...")
|
||||
login_url = f"{BASE_URL}/api/msft/login?state=connection&connectionId={CONNECTION_ID}"
|
||||
|
||||
print(f"\n🔗 Please visit this URL to re-authenticate with SharePoint permissions:")
|
||||
print(f" {login_url}")
|
||||
print("\nAfter re-authentication, the JIRA sync should work with SharePoint access.")
|
||||
print("\nNote: The new token will include Sites.ReadWrite.All and Files.ReadWrite.All scopes.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
force_reauth()
|
||||
285
modules/connectors/connectorGoogleSpeech.py
Normal file
285
modules/connectors/connectorGoogleSpeech.py
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
"""
|
||||
Google Cloud Speech-to-Text and Translation Connector
|
||||
Replaces Azure Speech Services with Google Cloud APIs
|
||||
"""
|
||||
|
||||
import os
|
||||
import io
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Optional, Any
|
||||
from google.cloud import speech
|
||||
from google.cloud import translate_v2 as translate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ConnectorGoogleSpeech:
|
||||
"""
|
||||
Google Cloud Speech-to-Text and Translation connector.
|
||||
Handles audio processing, speech recognition, and translation.
|
||||
"""
|
||||
|
||||
def __init__(self, credentials_path: Optional[str] = None):
|
||||
"""
|
||||
Initialize Google Cloud Speech and Translation clients.
|
||||
|
||||
Args:
|
||||
credentials_path: Path to Google Cloud service account JSON file
|
||||
"""
|
||||
try:
|
||||
# Set up authentication
|
||||
if credentials_path:
|
||||
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials_path
|
||||
|
||||
# Initialize clients
|
||||
self.speech_client = speech.SpeechClient()
|
||||
self.translate_client = translate.Client()
|
||||
|
||||
logger.info("✅ Google Cloud Speech and Translation clients initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to initialize Google Cloud clients: {e}")
|
||||
raise
|
||||
|
||||
async def speech_to_text(self, audio_content: bytes, language: str = "de-DE",
|
||||
sample_rate: int = 16000, channels: int = 1) -> Dict:
|
||||
"""
|
||||
Convert speech to text using Google Cloud Speech-to-Text API.
|
||||
|
||||
Args:
|
||||
audio_content: Raw audio data (PCM format)
|
||||
language: Language code (e.g., 'de-DE', 'en-US')
|
||||
sample_rate: Audio sample rate (default: 16000 Hz)
|
||||
channels: Number of audio channels (default: 1)
|
||||
|
||||
Returns:
|
||||
Dict containing transcribed text, confidence, and metadata
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🎤 Processing audio with Google Cloud Speech-to-Text")
|
||||
logger.info(f"📊 Audio: {len(audio_content)} bytes, {sample_rate}Hz, {channels}ch")
|
||||
|
||||
# Configure audio settings
|
||||
audio = speech.RecognitionAudio(content=audio_content)
|
||||
config = speech.RecognitionConfig(
|
||||
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
|
||||
sample_rate_hertz=sample_rate,
|
||||
audio_channel_count=channels,
|
||||
language_code=language,
|
||||
enable_automatic_punctuation=True,
|
||||
model="latest_long" # Use the latest model
|
||||
)
|
||||
|
||||
# Perform speech recognition
|
||||
logger.info("🔄 Sending audio to Google Cloud Speech-to-Text...")
|
||||
response = self.speech_client.recognize(config=config, audio=audio)
|
||||
|
||||
# Process results
|
||||
if response.results:
|
||||
result = response.results[0]
|
||||
if result.alternatives:
|
||||
alternative = result.alternatives[0]
|
||||
transcribed_text = alternative.transcript
|
||||
confidence = alternative.confidence
|
||||
|
||||
logger.info(f"✅ Transcription successful: '{transcribed_text}' (confidence: {confidence:.2f})")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"text": transcribed_text,
|
||||
"confidence": confidence,
|
||||
"language": language,
|
||||
"raw_result": {
|
||||
"transcript": transcribed_text,
|
||||
"confidence": confidence,
|
||||
"language_code": language
|
||||
}
|
||||
}
|
||||
else:
|
||||
logger.warning("⚠️ No transcription alternatives found")
|
||||
return {
|
||||
"success": False,
|
||||
"text": "",
|
||||
"confidence": 0.0,
|
||||
"error": "No transcription alternatives found"
|
||||
}
|
||||
else:
|
||||
logger.warning("⚠️ No recognition results from Google Cloud")
|
||||
return {
|
||||
"success": False,
|
||||
"text": "",
|
||||
"confidence": 0.0,
|
||||
"error": "No recognition results"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Google Cloud Speech-to-Text error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"text": "",
|
||||
"confidence": 0.0,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def translate_text(self, text: str, target_language: str = "en",
|
||||
source_language: str = "de") -> Dict:
|
||||
"""
|
||||
Translate text using Google Cloud Translation API.
|
||||
|
||||
Args:
|
||||
text: Text to translate
|
||||
target_language: Target language code (e.g., 'en', 'de')
|
||||
source_language: Source language code (e.g., 'de', 'en')
|
||||
|
||||
Returns:
|
||||
Dict containing translated text and metadata
|
||||
"""
|
||||
try:
|
||||
if not text.strip():
|
||||
logger.warning("⚠️ Empty text provided for translation")
|
||||
return {
|
||||
"success": False,
|
||||
"translated_text": "",
|
||||
"error": "Empty text provided"
|
||||
}
|
||||
|
||||
logger.info(f"🌐 Translating: '{text}' ({source_language} -> {target_language})")
|
||||
|
||||
# Perform translation
|
||||
result = self.translate_client.translate(
|
||||
text,
|
||||
source_language=source_language,
|
||||
target_language=target_language
|
||||
)
|
||||
|
||||
translated_text = result['translatedText']
|
||||
detected_language = result.get('detectedSourceLanguage', source_language)
|
||||
|
||||
logger.info(f"✅ Translation successful: '{translated_text}'")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"translated_text": translated_text,
|
||||
"source_language": detected_language,
|
||||
"target_language": target_language,
|
||||
"original_text": text
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Google Cloud Translation error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"translated_text": "",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def speech_to_translated_text(self, audio_content: bytes,
|
||||
from_language: str = "de-DE",
|
||||
to_language: str = "en") -> Dict:
|
||||
"""
|
||||
Complete pipeline: Speech-to-Text + Translation.
|
||||
|
||||
Args:
|
||||
audio_content: Raw audio data
|
||||
from_language: Source language for speech recognition
|
||||
to_language: Target language for translation
|
||||
|
||||
Returns:
|
||||
Dict containing original text, translated text, and metadata
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🔄 Starting speech-to-translation pipeline: {from_language} -> {to_language}")
|
||||
|
||||
# Step 1: Speech-to-Text
|
||||
speech_result = await self.speech_to_text(
|
||||
audio_content=audio_content,
|
||||
language=from_language
|
||||
)
|
||||
|
||||
if not speech_result["success"]:
|
||||
return {
|
||||
"success": False,
|
||||
"original_text": "",
|
||||
"translated_text": "",
|
||||
"error": f"Speech recognition failed: {speech_result.get('error', 'Unknown error')}"
|
||||
}
|
||||
|
||||
original_text = speech_result["text"]
|
||||
|
||||
# Step 2: Translation
|
||||
translation_result = await self.translate_text(
|
||||
text=original_text,
|
||||
source_language=from_language.split('-')[0], # Convert 'de-DE' to 'de'
|
||||
target_language=to_language.split('-')[0] # Convert 'en-US' to 'en'
|
||||
)
|
||||
|
||||
if not translation_result["success"]:
|
||||
return {
|
||||
"success": False,
|
||||
"original_text": original_text,
|
||||
"translated_text": "",
|
||||
"error": f"Translation failed: {translation_result.get('error', 'Unknown error')}"
|
||||
}
|
||||
|
||||
translated_text = translation_result["translated_text"]
|
||||
|
||||
logger.info(f"✅ Complete pipeline successful:")
|
||||
logger.info(f" Original: '{original_text}'")
|
||||
logger.info(f" Translated: '{translated_text}'")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"original_text": original_text,
|
||||
"translated_text": translated_text,
|
||||
"confidence": speech_result["confidence"],
|
||||
"source_language": from_language,
|
||||
"target_language": to_language
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Speech-to-translation pipeline error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"original_text": "",
|
||||
"translated_text": "",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
def validate_audio_format(self, audio_content: bytes) -> Dict:
|
||||
"""
|
||||
Validate audio format for Google Cloud Speech-to-Text.
|
||||
|
||||
Args:
|
||||
audio_content: Raw audio data
|
||||
|
||||
Returns:
|
||||
Dict containing validation results
|
||||
"""
|
||||
try:
|
||||
# Google Cloud Speech-to-Text supports various formats
|
||||
# We'll do basic validation
|
||||
if len(audio_content) < 100:
|
||||
return {
|
||||
"valid": False,
|
||||
"error": "Audio too short (less than 100 bytes)"
|
||||
}
|
||||
|
||||
# Check if it looks like PCM audio (basic check)
|
||||
if len(audio_content) % 2 != 0:
|
||||
return {
|
||||
"valid": False,
|
||||
"error": "Audio data length is odd (not 16-bit PCM)"
|
||||
}
|
||||
|
||||
return {
|
||||
"valid": True,
|
||||
"format": "pcm",
|
||||
"size": len(audio_content),
|
||||
"estimated_duration": len(audio_content) / (16000 * 2) # Rough estimate for 16kHz, 16-bit
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"valid": False,
|
||||
"error": f"Validation error: {e}"
|
||||
}
|
||||
|
|
@ -6,7 +6,7 @@ Handles user access management and permission checks.
|
|||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
from modules.interfaces.interfaceAppModel import User, UserInDB
|
||||
from modules.interfaces.interfaceComponentModel import Prompt, FileItem, FileData
|
||||
from modules.interfaces.interfaceComponentModel import Prompt, FileItem, FileData, VoiceSettings
|
||||
from modules.interfaces.interfaceChatModel import ChatWorkflow, ChatMessage, ChatLog
|
||||
|
||||
# Configure logger
|
||||
|
|
@ -81,6 +81,9 @@ class ComponentAccess:
|
|||
elif table_name == "UserInDB":
|
||||
# For users table, users can only see their own record
|
||||
filtered_records = [r for r in recordset if r.get("id") == self.userId]
|
||||
elif table_name == "VoiceSettings":
|
||||
# For voice settings, users can only see their own settings
|
||||
filtered_records = [r for r in recordset if r.get("userId") == self.userId]
|
||||
else:
|
||||
# Users see only their records for other tables
|
||||
filtered_records = [
|
||||
|
|
@ -128,6 +131,11 @@ class ComponentAccess:
|
|||
for conn in record["connections"]:
|
||||
conn["_hideEdit"] = record_id != self.userId
|
||||
conn["_hideDelete"] = record_id != self.userId
|
||||
elif table_name == "VoiceSettings":
|
||||
# For voice settings, users can only access their own settings
|
||||
record["_hideView"] = False
|
||||
record["_hideEdit"] = record.get("userId") != self.userId
|
||||
record["_hideDelete"] = record.get("userId") != self.userId
|
||||
else:
|
||||
# Default access control for other tables
|
||||
record["_hideView"] = False
|
||||
|
|
@ -168,6 +176,12 @@ class ComponentAccess:
|
|||
return True
|
||||
return False
|
||||
|
||||
# Special case for voice settings - users can modify their own settings
|
||||
if model_class.__name__ == "VoiceSettings":
|
||||
if record.get("userId") == self.userId:
|
||||
return True
|
||||
return False
|
||||
|
||||
# Admins can modify anything in their mandate, if mandate is specified for a record
|
||||
if userPrivilege == "admin" and record.get("mandateId","-") == self.mandateId:
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -169,3 +169,96 @@ register_model_labels(
|
|||
}
|
||||
)
|
||||
|
||||
class VoiceSettings(BaseModel, ModelMixin):
|
||||
"""Data model for voice service settings per user"""
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid.uuid4()),
|
||||
description="Primary key",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
userId: str = Field(
|
||||
description="ID of the user these settings belong to",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=True
|
||||
)
|
||||
mandateId: str = Field(
|
||||
description="ID of the mandate these settings belong to",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
sttLanguage: str = Field(
|
||||
default="de-DE",
|
||||
description="Speech-to-Text language",
|
||||
frontend_type="select",
|
||||
frontend_readonly=False,
|
||||
frontend_required=True
|
||||
)
|
||||
ttsLanguage: str = Field(
|
||||
default="de-DE",
|
||||
description="Text-to-Speech language",
|
||||
frontend_type="select",
|
||||
frontend_readonly=False,
|
||||
frontend_required=True
|
||||
)
|
||||
ttsVoice: str = Field(
|
||||
default="de-DE-KatjaNeural",
|
||||
description="Text-to-Speech voice",
|
||||
frontend_type="select",
|
||||
frontend_readonly=False,
|
||||
frontend_required=True
|
||||
)
|
||||
translationEnabled: bool = Field(
|
||||
default=True,
|
||||
description="Whether translation is enabled",
|
||||
frontend_type="checkbox",
|
||||
frontend_readonly=False,
|
||||
frontend_required=False
|
||||
)
|
||||
targetLanguage: str = Field(
|
||||
default="en-US",
|
||||
description="Target language for translation",
|
||||
frontend_type="select",
|
||||
frontend_readonly=False,
|
||||
frontend_required=False
|
||||
)
|
||||
creationDate: float = Field(
|
||||
default_factory=get_utc_timestamp,
|
||||
description="Date when the settings were created (UTC timestamp in seconds)",
|
||||
frontend_type="timestamp",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
lastModified: float = Field(
|
||||
default_factory=get_utc_timestamp,
|
||||
description="Date when the settings were last modified (UTC timestamp in seconds)",
|
||||
frontend_type="timestamp",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert model to dictionary"""
|
||||
return super().to_dict()
|
||||
|
||||
# Register labels for VoiceSettings
|
||||
register_model_labels(
|
||||
"VoiceSettings",
|
||||
{"en": "Voice Settings", "fr": "Paramètres vocaux"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"userId": {"en": "User ID", "fr": "ID utilisateur"},
|
||||
"mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
|
||||
"sttLanguage": {"en": "STT Language", "fr": "Langue STT"},
|
||||
"ttsLanguage": {"en": "TTS Language", "fr": "Langue TTS"},
|
||||
"ttsVoice": {"en": "TTS Voice", "fr": "Voix TTS"},
|
||||
"translationEnabled": {"en": "Translation Enabled", "fr": "Traduction activée"},
|
||||
"targetLanguage": {"en": "Target Language", "fr": "Langue cible"},
|
||||
"creationDate": {"en": "Creation Date", "fr": "Date de création"},
|
||||
"lastModified": {"en": "Last Modified", "fr": "Dernière modification"}
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import hashlib
|
|||
|
||||
from modules.interfaces.interfaceComponentAccess import ComponentAccess
|
||||
from modules.interfaces.interfaceComponentModel import (
|
||||
FilePreview, Prompt, FileItem, FileData
|
||||
FilePreview, Prompt, FileItem, FileData, VoiceSettings
|
||||
)
|
||||
from modules.interfaces.interfaceAppModel import User, Mandate
|
||||
|
||||
|
|
@ -869,6 +869,155 @@ class ComponentObjects:
|
|||
logger.error(f"Error in saveUploadedFile for {fileName}: {str(e)}", exc_info=True)
|
||||
raise FileStorageError(f"Error saving file: {str(e)}")
|
||||
|
||||
# VoiceSettings methods
|
||||
|
||||
def getVoiceSettings(self, userId: Optional[str] = None) -> Optional[VoiceSettings]:
|
||||
"""Returns voice settings for a user if user has access."""
|
||||
try:
|
||||
targetUserId = userId or self.userId
|
||||
if not targetUserId:
|
||||
logger.error("No user ID provided for voice settings")
|
||||
return None
|
||||
|
||||
# Get voice settings for the user
|
||||
settings = self.db.getRecordset(VoiceSettings, recordFilter={"userId": targetUserId})
|
||||
if not settings:
|
||||
logger.debug(f"No voice settings found for user {targetUserId}")
|
||||
return None
|
||||
|
||||
# Apply access control
|
||||
filteredSettings = self._uam(VoiceSettings, settings)
|
||||
if not filteredSettings:
|
||||
logger.warning(f"No access to voice settings for user {targetUserId}")
|
||||
return None
|
||||
|
||||
# Ensure timestamps are set for validation
|
||||
settings_data = filteredSettings[0]
|
||||
if not settings_data.get("creationDate"):
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
settings_data["creationDate"] = get_utc_timestamp()
|
||||
if not settings_data.get("lastModified"):
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
settings_data["lastModified"] = get_utc_timestamp()
|
||||
|
||||
return VoiceSettings.from_dict(settings_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting voice settings: {str(e)}")
|
||||
return None
|
||||
|
||||
def createVoiceSettings(self, settingsData: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Creates voice settings for a user if user has permission."""
|
||||
try:
|
||||
if not self._canModify(VoiceSettings):
|
||||
raise PermissionError("No permission to create voice settings")
|
||||
|
||||
# Ensure userId is set
|
||||
if "userId" not in settingsData:
|
||||
settingsData["userId"] = self.userId
|
||||
|
||||
# Ensure mandateId is set
|
||||
if "mandateId" not in settingsData:
|
||||
settingsData["mandateId"] = self.currentUser.mandateId if self.currentUser else "default"
|
||||
|
||||
# Check if settings already exist for this user
|
||||
existingSettings = self.getVoiceSettings(settingsData["userId"])
|
||||
if existingSettings:
|
||||
raise ValueError(f"Voice settings already exist for user {settingsData['userId']}")
|
||||
|
||||
# Create voice settings record
|
||||
createdRecord = self.db.recordCreate(VoiceSettings, settingsData)
|
||||
if not createdRecord or not createdRecord.get("id"):
|
||||
raise ValueError("Failed to create voice settings record")
|
||||
|
||||
logger.info(f"Created voice settings for user {settingsData['userId']}")
|
||||
return createdRecord
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating voice settings: {str(e)}")
|
||||
raise
|
||||
|
||||
def updateVoiceSettings(self, userId: str, updateData: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Updates voice settings for a user if user has access."""
|
||||
try:
|
||||
# Get existing settings
|
||||
existingSettings = self.getVoiceSettings(userId)
|
||||
if not existingSettings:
|
||||
raise ValueError(f"Voice settings not found for user {userId}")
|
||||
|
||||
# Update lastModified timestamp
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
updateData["lastModified"] = get_utc_timestamp()
|
||||
|
||||
# Update voice settings record
|
||||
success = self.db.recordModify(VoiceSettings, existingSettings.id, updateData)
|
||||
if not success:
|
||||
raise ValueError("Failed to update voice settings record")
|
||||
|
||||
# Get updated settings
|
||||
updatedSettings = self.getVoiceSettings(userId)
|
||||
if not updatedSettings:
|
||||
raise ValueError("Failed to retrieve updated voice settings")
|
||||
|
||||
logger.info(f"Updated voice settings for user {userId}")
|
||||
return updatedSettings.to_dict()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating voice settings: {str(e)}")
|
||||
raise
|
||||
|
||||
def deleteVoiceSettings(self, userId: str) -> bool:
|
||||
"""Deletes voice settings for a user if user has access."""
|
||||
try:
|
||||
# Get existing settings
|
||||
existingSettings = self.getVoiceSettings(userId)
|
||||
if not existingSettings:
|
||||
logger.warning(f"Voice settings not found for user {userId}")
|
||||
return False
|
||||
|
||||
# Delete voice settings
|
||||
success = self.db.recordDelete(VoiceSettings, existingSettings.id)
|
||||
if success:
|
||||
logger.info(f"Deleted voice settings for user {userId}")
|
||||
else:
|
||||
logger.error(f"Failed to delete voice settings for user {userId}")
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting voice settings: {str(e)}")
|
||||
return False
|
||||
|
||||
def getOrCreateVoiceSettings(self, userId: Optional[str] = None) -> VoiceSettings:
|
||||
"""Gets existing voice settings or creates default ones for a user."""
|
||||
try:
|
||||
targetUserId = userId or self.userId
|
||||
if not targetUserId:
|
||||
raise ValueError("No user ID provided for voice settings")
|
||||
|
||||
# Try to get existing settings
|
||||
existingSettings = self.getVoiceSettings(targetUserId)
|
||||
if existingSettings:
|
||||
return existingSettings
|
||||
|
||||
# Create default settings
|
||||
defaultSettings = {
|
||||
"userId": targetUserId,
|
||||
"mandateId": self.currentUser.mandateId if self.currentUser else "default",
|
||||
"sttLanguage": "de-DE",
|
||||
"ttsLanguage": "de-DE",
|
||||
"ttsVoice": "de-DE-KatjaNeural",
|
||||
"translationEnabled": True,
|
||||
"targetLanguage": "en-US"
|
||||
}
|
||||
|
||||
createdRecord = self.createVoiceSettings(defaultSettings)
|
||||
return VoiceSettings.from_dict(createdRecord)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting or creating voice settings: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
def getInterface(currentUser: Optional[User] = None) -> 'ComponentObjects':
|
||||
"""
|
||||
|
|
|
|||
268
modules/routes/routeVoiceGoogle.py
Normal file
268
modules/routes/routeVoiceGoogle.py
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
"""
|
||||
Google Cloud Voice Services Routes
|
||||
Replaces Azure voice services with Google Cloud Speech-to-Text and Translation
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
from fastapi import APIRouter, File, Form, UploadFile, Depends, HTTPException
|
||||
from typing import Optional
|
||||
from modules.connectors.connectorGoogleSpeech import ConnectorGoogleSpeech
|
||||
from modules.security.auth import getCurrentUser
|
||||
from modules.interfaces.interfaceAppModel import User
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/voice-google", tags=["voice-google"])
|
||||
|
||||
# Global connector instance
|
||||
_google_speech_connector = None
|
||||
|
||||
def get_google_speech_connector() -> ConnectorGoogleSpeech:
|
||||
"""Get or create Google Cloud Speech connector instance."""
|
||||
global _google_speech_connector
|
||||
|
||||
if _google_speech_connector is None:
|
||||
try:
|
||||
# Get credentials path from environment or config
|
||||
credentials_path = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
||||
if not credentials_path:
|
||||
# Try to find credentials in common locations
|
||||
possible_paths = [
|
||||
"credentials/google-service-account.json",
|
||||
"config/google-credentials.json",
|
||||
"google-credentials.json"
|
||||
]
|
||||
|
||||
for path in possible_paths:
|
||||
if os.path.exists(path):
|
||||
credentials_path = path
|
||||
break
|
||||
|
||||
if not credentials_path:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Google Cloud credentials not found. Please set GOOGLE_APPLICATION_CREDENTIALS environment variable or place credentials file in project directory."
|
||||
)
|
||||
|
||||
_google_speech_connector = ConnectorGoogleSpeech(credentials_path)
|
||||
logger.info("✅ Google Cloud Speech connector initialized")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to initialize Google Cloud Speech connector: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to initialize Google Cloud Speech connector: {str(e)}"
|
||||
)
|
||||
|
||||
return _google_speech_connector
|
||||
|
||||
@router.post("/speech-to-text")
|
||||
async def speech_to_text(
|
||||
audio_file: UploadFile = File(...),
|
||||
language: str = Form("de-DE"),
|
||||
current_user: User = Depends(getCurrentUser)
|
||||
):
|
||||
"""Convert speech to text using Google Cloud Speech-to-Text API."""
|
||||
try:
|
||||
logger.info(f"🎤 Speech-to-text request: {audio_file.filename}, language: {language}")
|
||||
|
||||
# Read audio file
|
||||
audio_content = await audio_file.read()
|
||||
logger.info(f"📊 Audio file size: {len(audio_content)} bytes")
|
||||
|
||||
# Validate audio format
|
||||
connector = get_google_speech_connector()
|
||||
validation = connector.validate_audio_format(audio_content)
|
||||
|
||||
if not validation["valid"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid audio format: {validation.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Perform speech recognition
|
||||
result = await connector.speech_to_text(
|
||||
audio_content=audio_content,
|
||||
language=language
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
return {
|
||||
"success": True,
|
||||
"text": result["text"],
|
||||
"confidence": result["confidence"],
|
||||
"language": result["language"],
|
||||
"audio_info": {
|
||||
"size": len(audio_content),
|
||||
"format": validation["format"],
|
||||
"estimated_duration": validation.get("estimated_duration", 0)
|
||||
}
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Speech recognition failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Speech-to-text error: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Speech-to-text processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
@router.post("/translate")
|
||||
async def translate_text(
|
||||
text: str = Form(...),
|
||||
source_language: str = Form("de"),
|
||||
target_language: str = Form("en"),
|
||||
current_user: User = Depends(getCurrentUser)
|
||||
):
|
||||
"""Translate text using Google Cloud Translation API."""
|
||||
try:
|
||||
logger.info(f"🌐 Translation request: '{text}' ({source_language} -> {target_language})")
|
||||
|
||||
if not text.strip():
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Empty text provided for translation"
|
||||
)
|
||||
|
||||
# Perform translation
|
||||
connector = get_google_speech_connector()
|
||||
result = await connector.translate_text(
|
||||
text=text,
|
||||
source_language=source_language,
|
||||
target_language=target_language
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
return {
|
||||
"success": True,
|
||||
"original_text": result["original_text"],
|
||||
"translated_text": result["translated_text"],
|
||||
"source_language": result["source_language"],
|
||||
"target_language": result["target_language"]
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Translation failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Translation error: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Translation processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
@router.post("/realtime-interpreter")
|
||||
async def realtime_interpreter(
|
||||
audio_file: UploadFile = File(...),
|
||||
from_language: str = Form("de-DE"),
|
||||
to_language: str = Form("en-US"),
|
||||
connection_id: str = Form(None),
|
||||
current_user: User = Depends(getCurrentUser)
|
||||
):
|
||||
"""Real-time interpreter: speech to translated text using Google Cloud APIs."""
|
||||
try:
|
||||
logger.info(f"🔄 Real-time interpreter request: {audio_file.filename}")
|
||||
logger.info(f" From: {from_language} -> To: {to_language}")
|
||||
|
||||
# Read audio file
|
||||
audio_content = await audio_file.read()
|
||||
logger.info(f"📊 Audio file size: {len(audio_content)} bytes")
|
||||
|
||||
# Save audio file for debugging
|
||||
debug_filename = f"debug_audio/audio_google_{audio_file.filename}"
|
||||
os.makedirs("debug_audio", exist_ok=True)
|
||||
with open(debug_filename, "wb") as f:
|
||||
f.write(audio_content)
|
||||
logger.info(f"💾 Saved audio file for debugging: {debug_filename}")
|
||||
|
||||
# Validate audio format
|
||||
connector = get_google_speech_connector()
|
||||
validation = connector.validate_audio_format(audio_content)
|
||||
|
||||
if not validation["valid"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid audio format: {validation.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Perform complete pipeline: Speech-to-Text + Translation
|
||||
result = await connector.speech_to_translated_text(
|
||||
audio_content=audio_content,
|
||||
from_language=from_language,
|
||||
to_language=to_language
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Real-time interpreter successful:")
|
||||
logger.info(f" Original: '{result['original_text']}'")
|
||||
logger.info(f" Translated: '{result['translated_text']}'")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"original_text": result["original_text"],
|
||||
"translated_text": result["translated_text"],
|
||||
"confidence": result["confidence"],
|
||||
"source_language": result["source_language"],
|
||||
"target_language": result["target_language"],
|
||||
"audio_info": {
|
||||
"size": len(audio_content),
|
||||
"format": validation["format"],
|
||||
"estimated_duration": validation.get("estimated_duration", 0)
|
||||
}
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Real-time interpreter failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Real-time interpreter error: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Real-time interpreter processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/health")
|
||||
async def health_check(current_user: User = Depends(getCurrentUser)):
|
||||
"""Health check for Google Cloud voice services."""
|
||||
try:
|
||||
connector = get_google_speech_connector()
|
||||
|
||||
# Test with a simple translation
|
||||
test_result = await connector.translate_text(
|
||||
text="Hello",
|
||||
source_language="en",
|
||||
target_language="de"
|
||||
)
|
||||
|
||||
if test_result["success"]:
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "Google Cloud Speech-to-Text & Translation",
|
||||
"test_translation": test_result["translated_text"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"error": test_result.get("error", "Unknown error")
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Health check failed: {e}")
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"error": str(e)
|
||||
}
|
||||
|
|
@ -57,20 +57,25 @@ class ManagerSyncDelta:
|
|||
JIRA_ISSUE_TYPE = "Task"
|
||||
|
||||
# Task sync definition for field mapping (like original synchronizer)
|
||||
TASK_SYNC_DEFINITION = {
|
||||
"ID": ["get", ["key"]],
|
||||
"Summary": ["get", ["fields", "summary"]],
|
||||
"Status": ["get", ["fields", "status", "name"]],
|
||||
"Assignee": ["get", ["fields", "assignee", "displayName"]],
|
||||
"Reporter": ["get", ["fields", "reporter", "displayName"]],
|
||||
"Created": ["get", ["fields", "created"]],
|
||||
"Updated": ["get", ["fields", "updated"]],
|
||||
"Priority": ["get", ["fields", "priority", "name"]],
|
||||
"IssueType": ["get", ["fields", "issuetype", "name"]],
|
||||
"Project": ["get", ["fields", "project", "name"]],
|
||||
"Description": ["get", ["fields", "description"]],
|
||||
}
|
||||
|
||||
task_sync_definition={
|
||||
#key=excel-header, [get:jira>excel | put: excel>jira, jira-xml-field-list]
|
||||
'ID': ['get', ['key']],
|
||||
'Module Category': ['get', ['fields', 'customfield_10058', 'value']],
|
||||
'Summary': ['get', ['fields', 'summary']],
|
||||
'Description': ['get', ['fields', 'description']],
|
||||
'References': ['get', ['fields', 'customfield_10066']],
|
||||
'Priority': ['get', ['fields', 'priority', 'name']],
|
||||
'Issue Status': ['get', ['fields', 'customfield_10062']],
|
||||
'Assignee': ['get', ['fields', 'assignee', 'displayName']],
|
||||
'Issue Created': ['get', ['fields', 'created']],
|
||||
'Due Date': ['get', ['fields', 'duedate']],
|
||||
'DELTA Comments': ['get', ['fields', 'customfield_10060']],
|
||||
'SELISE Ticket References': ['put', ['fields', 'customfield_10067']],
|
||||
'SELISE Status Values': ['put', ['fields', 'customfield_10065']],
|
||||
'SELISE Comments': ['put', ['fields', 'customfield_10064']],
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the sync manager with hardcoded Delta Group credentials."""
|
||||
self.root_interface = getRootInterface()
|
||||
|
|
@ -249,8 +254,7 @@ async def perform_sync_jira_delta_group() -> bool:
|
|||
bool: True if synchronization was successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
#TODO: ADAPT to prod
|
||||
if APP_ENV_TYPE != "dev" and APP_ENV_TYPE != "prod":
|
||||
if APP_ENV_TYPE != "prod":
|
||||
logger.info("JIRA to SharePoint synchronization: TASK to run only in PROD")
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -48,6 +48,9 @@ Office365-REST-Python-Client==2.6.2 # Easy Sharepoint integration
|
|||
## Image Processing
|
||||
Pillow>=10.0.0 # Für Bildverarbeitung (als PIL importiert)
|
||||
|
||||
## Audio Processing
|
||||
# Audio format conversion handled by pure Python implementation
|
||||
|
||||
## Utilities & Timezone Support
|
||||
python-dateutil==2.8.2
|
||||
python-dotenv==1.0.0
|
||||
|
|
@ -56,6 +59,10 @@ pytz>=2023.3 # For timezone handling and UTC operations
|
|||
## Dependencies for trio (used by httpx)
|
||||
sortedcontainers>=2.4.0 # Required by trio
|
||||
|
||||
## Google Cloud Integration
|
||||
google-cloud-speech==2.21.0
|
||||
google-cloud-translate==3.11.1
|
||||
|
||||
## MSFT Integration
|
||||
msal==1.24.1
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue