diff --git a/app.py b/app.py index 8268377a..f29436cc 100644 --- a/app.py +++ b/app.py @@ -21,6 +21,7 @@ from datetime import datetime from modules.shared.configuration import APP_CONFIG from modules.shared.eventManagement import eventManager from modules.workflows.automation import subAutomationSchedule +from modules.workflows.automation2 import subAutomation2Schedule from modules.features.automation2.emailPoller import start as startAutomation2EmailPoller from modules.features.automation2.emailPoller import stop as stopAutomation2EmailPoller from modules.interfaces.interfaceDbApp import getRootInterface @@ -355,7 +356,15 @@ async def lifespan(app: FastAPI): logger.warning(f"Could not initialize feature containers: {e}") # --- Init Managers --- + import asyncio + try: + main_loop = asyncio.get_running_loop() + eventManager.set_event_loop(main_loop) + subAutomation2Schedule.set_main_loop(main_loop) + except RuntimeError: + pass subAutomationSchedule.start(eventUser) # Automation scheduler + subAutomation2Schedule.start(eventUser) # Automation2 schedule trigger (cron) # Automation2 email poller: started on-demand when a run pauses for email.checkEmail eventManager.start() @@ -374,7 +383,7 @@ async def lifespan(app: FastAPI): if settingsCreated > 0: logger.info(f"Billing startup: Created {settingsCreated} missing mandate billing settings") - # Step 2: Ensure all users have billing accounts (for PREPAY_USER mandates) + # Step 2: Ensure all users have billing audit accounts accountsCreated = billingInterface.ensureAllUserAccountsExist() if accountsCreated > 0: logger.info(f"Billing startup: Created {accountsCreated} missing user accounts") @@ -386,6 +395,7 @@ async def lifespan(app: FastAPI): # --- Stop Managers --- stopAutomation2EmailPoller(eventUser) # Automation2 email poller (no-op if not running) + subAutomation2Schedule.stop(eventUser) # Automation2 schedule eventManager.stop() subAutomationSchedule.stop(eventUser) # Automation scheduler @@ -479,18 +489,6 @@ def getAllowedOrigins(): CORS_ORIGIN_REGEX = r"https://.*\.(poweron\.swiss|poweron-center\.net)" -# CORS configuration using environment variables -app.add_middleware( - CORSMiddleware, - allow_origins=getAllowedOrigins(), - allow_origin_regex=CORS_ORIGIN_REGEX, - allow_credentials=True, - allow_methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"], - allow_headers=["*"], - expose_headers=["*"], - max_age=86400, # Increased caching for preflight requests -) - # SlowAPI rate limiter initialization from modules.auth import limiter from slowapi.errors import RateLimitExceeded @@ -500,7 +498,7 @@ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) async def _insufficientBalanceHandler(request: Request, exc: Exception): - """HTTP 402 with structured billing hint (PREPAY_USER vs PREPAY_MANDATE).""" + """HTTP 402 with structured billing hint.""" payload = exc.toClientDict() if hasattr(exc, "toClientDict") else {"error": "INSUFFICIENT_BALANCE", "message": str(exc)} return JSONResponse(status_code=402, content={"detail": payload}) @@ -528,6 +526,19 @@ app.add_middleware( ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5 ) +# CORS must be registered LAST so it wraps the whole stack: every response (errors, CSRF 403, +# rate limits) still gets Access-Control-Allow-Origin for browser cross-origin calls. +app.add_middleware( + CORSMiddleware, + allow_origins=getAllowedOrigins(), + allow_origin_regex=CORS_ORIGIN_REGEX, + allow_credentials=True, + allow_methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"], + allow_headers=["*"], + expose_headers=["*"], + max_age=86400, +) + # Include all routers from modules.routes.routeAdmin import router as generalRouter @@ -545,6 +556,9 @@ app.include_router(userRouter) from modules.routes.routeDataFiles import router as fileRouter app.include_router(fileRouter) +from modules.routes.routeDataSources import router as dataSourceRouter +app.include_router(dataSourceRouter) + from modules.routes.routeDataPrompts import router as promptRouter app.include_router(promptRouter) @@ -560,9 +574,18 @@ app.include_router(msftRouter) from modules.routes.routeSecurityGoogle import router as googleRouter app.include_router(googleRouter) +from modules.routes.routeSecurityClickup import router as clickupRouter +app.include_router(clickupRouter) + +from modules.routes.routeClickup import router as clickupApiRouter +app.include_router(clickupApiRouter) + from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter app.include_router(voiceGoogleRouter) +from modules.routes.routeVoiceUser import router as voiceUserRouter +app.include_router(voiceUserRouter) + from modules.routes.routeSecurityAdmin import router as adminSecurityRouter app.include_router(adminSecurityRouter) diff --git a/env_dev.env b/env_dev.env index e6643ca9..30ffd079 100644 --- a/env_dev.env +++ b/env_dev.env @@ -46,15 +46,20 @@ Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.ap Service_GOOGLE_DATA_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM= Service_GOOGLE_DATA_REDIRECT_URI = http://localhost:8000/api/google/auth/connect/callback +# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. +Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 +Service_CLICKUP_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd4ZWVBeHVtRnpIT0VBN0tSZDhLRmFmN05DOVBOelJtLWhkVnJDRVBqUkh3bDFTZFRWaWQ1cWowdGNLUk5IQzlGN1J6RFVCaW8zRnBwLVBnclJfdWgxV3pVRzFEV2lwcW5Rc19Xa1ROWXNJcUF0ajZaYUxOUXk0WHRsRmJLM25FaHV5T2IxdV92ZW1nRjhzaGpwU0l2Wm9FTkRnY2lJVjhuNHUwT29salAxYV8wPQ== +Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback + # Stripe Billing (both end with _SECRET for encryption script) -STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGWDkxSldfM0NCZ3dmbHY5cS1nQlI3UWZ4ZWRrNVdUdEFKa25RckRiQWY0c1E5MjVsZzlfRkZEU0VFU2tNQ01qZnRNQ0pZVU9hVFN6OEU0RXhwdTl3algzLWJlSXRhYmZlMHltSC1XejlGWEU5TDF1LUlYNEh1aG9tRFI4YmlCYzUyei02U1dabWoyb0N2dVFSb1RhWTNnQjBCZkFjV0FfOWdYdDVpX1k5R2pYM1R6SHRiaE10V1l1dnQybjVHWDRiQUJLM0UxRDZnczhJZGFsc3JhOU82QT09 -STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGcHNWTWpBWkFHRExtdU01N3RyZzNsMjhUS3NiVTNCZmMwN2NEcFZ6UkQ1a2I0aUkyNU4wR2dUdHJXYmtkaEFRUnFpcThObHBEQmJkdEFnT1FXeUxOTlU3UDFNRzl6LWdpRFpYdExvY3FTTG9MTkswdEhrVkNKQVFucnBjSnhLNm4= +STRIPE_SECRET_KEY_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5aHNGejgzQmpTdmprdzQxR19KZkh3MlhYUTNseFN3WnlaWjh2SDZyalN6aU9xSktkbUQwUnZrVnlvbGVRQm4yZFdiRU5aSEk5WVJuUnR4VUwtTm9OVk1WWmJQeU5QaDdib0hfVWV5U1BfYTFXRmdoOWdnOWxkb3JFQmF3bm45UjFUVUxmWGtGRkFKUGd6bmhpQlFnaVI3Q2lLdDlsY1VESk1vOEM0ZFBJNW1qcVZ0N2tPYmRLNmVKajZ2M3o3S05lWnRRVG5LdkRseW4wQ3VjNHNQZTZUdz09 +STRIPE_WEBHOOK_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5dDJMSHBrVk8wTzJhU2xzTTZCZWdvWmU2NGI2WklfRXRJZVUzaVYyOU9GLUZsalUwa2lPdEgtUHo0dVVvRDU1cy1saHJyU0Rxa2xQZjBuakExQzk3bmxBcU9WbEIxUEtpR1JoUFMxZG9ISGRZUXFhdFpSMGxvQUV3a0VLQllfUUtCOHZwTGdteV9rYTFOazBfSlN3ekNWblFpakJlZVlCTmNkWWQ4Sm01a1RCWTlnTlFHWVA0MkZYMlprUExrWFN2V0NVU1BTd1NKczFJbVo3VHpLdlc4UT09 STRIPE_API_VERSION = 2026-01-28.clover # AI configuration Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQnBaSnM4TWFRRmxVQmNQblVIYmc1Y0Q3aW9zZUtDWlNWdGZjbFpncGp2NHN2QjkxMWxibUJnZDBId252MWk5TXN3Yk14ajFIdi1CTkx2ZWx2QzF5OFR6LUx5azQ3dnNLaXJBOHNxc0tlWmtZcTFVelF4eXBSM2JkbHd2eTM0VHNXdHNtVUprZWtPVzctNlJsZHNmM20tU1N6Q1Q2cHFYSi1tNlhZNDNabTVuaEVGWmIydEhadTcyMlBURmw2aUJxOF9GTzR0dTZiNGZfOFlHaVpPZ1A1LXhhOEFtN1J5TEVNNWtMcGpyNkMzSl8xRnZsaTF1WTZrOUZmb0cxVURjSGFLS2dIYTQyZEJtTm90bEYxVWxNNXVPdTVjaVhYbXhxT3JsVDM5VjZMVFZKSE1tZnM9 Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09 -Connector_AiPerplexity_API_SECRET = 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 +Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQnB5dkd5ZmdDZ3hrSElrMnQzNFAtel9wX191VjVzN2g1LWZoa0V1YklubEdmMEJDdEZiR1RWeVZrM3V3enBHX3p6WUtTS0kwYkFyVEF0Nm8zX05CelVQcFJUc0lwVW5iNFczc1p1WWJ2WFBmd0lpLUxxWndEeUh0b2hGUHVpN19vb19nMTBnV1A1VmNpWERVX05lQ29VS20wTjZ3PT0= Connector_AiTavily_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI= Connector_AiPrivateLlm_API_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGRHM5eFdUVmVZU1R1cHBwN1RlMUx4T0NlLTJLUFFVX3J2OElDWFpuZmJHVmp4Z3BNNWMwZUVVZUd2TFhRSjVmVkVlcFlVRWtybXh0ZHloZ01ZcnVvX195YjdlWVdEcjZSWFFTTlNBWUlaTlNoLWhqVFBIb0thVlBiaWhjYjFQOFY= Connector_AiMistral_API_SECRET = DEV_ENC:Z0FBQUFBQnBudkpGeEQxYUIxOHhia0JlQWpWQ2dWQWZzY3l6SWwyUnJoR1hRQWloX2lxb2lGNkc4UnA4U2tWNjJaYzB1d1hvNG9fWUp1N3V4OW9FMGhaWVhjSlVwWEc1X2loVDBSZDEtdHdfcTA5QkcxQTR4OHc4RkRzclJrU2d1RFZpNDJkRDRURlE= diff --git a/env_int.env b/env_int.env index d7105469..fc9c0efd 100644 --- a/env_int.env +++ b/env_int.env @@ -46,15 +46,20 @@ Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.ap Service_GOOGLE_DATA_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo= Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/connect/callback +# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. +Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 +Service_CLICKUP_CLIENT_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5SE1uVURMNVE3NkM4cHBKa2R2TjBnLWdpSXI5dHpKWGExZVFiUF95TFNnZ1NwLWFLdmh6eWFZTHVHYTBzU2FGRUpLYkVyM1NvZjZkWDZHN21qUER5ZVNOaGpCc3NrUGd3VnFTclF3OW1nUlVuWXQ1UVhDLVpyb1BwRExOeFpDeVhtbEhDVnd4TVdpbzNBNk5QQWFPdjdza0xBWGxFY1E3WFpCSUlNa1l4RDlBPQ== +Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback + # Stripe Billing (both end with _SECRET for encryption script) -STRIPE_SECRET_KEY_SECRET = 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 -STRIPE_WEBHOOK_SECRET = INT_ENC:Z0FBQUFBQnBudkpGamJBNW91VUdEaThWRTFiTWpyb3NqSDJJcGtjNkhUVVZqVElxUWExY05KcllSYVk1SkRuS1NjYWpZUk1uU29nb2pzdXUxRzBsOEgyRWtmUEw3dUF4ejFIXzNwTVZRM1R1bVVhTUs4ZHJMT0V4Xy1pcHVfWlBaQV9wVXo5MGlQYXA= +STRIPE_SECRET_KEY_SECRET = INT_ENC:Z0FBQUFBQnB5dkd5ekdBaGNGVUlOQUpncTlzLWlTV0V5OWZzQkpDczhCUGw4U1JpTHZ0d3pfYlFNWElLRlNiNlNsaDRYTGZUTkg2OUFrTW1GZXpOUjBVbmRQWjN6ekhHd2ZSQ195OHlaeWh1TmxrUm10V2R3YmdncmFLbFMzVjdqcWJMSUJPR2xuSEozclNoZG1rZVBTaWg3OFQ1Qzdxb0wyQ2RKazc2dG1aZXBUTXlvbDZqLS1KOVI5M3BGc3NQZkZRbnFpRjIwWmh2ZHlVNlpxZVo2dWNmMjQ5eW02QmtzUT09 +STRIPE_WEBHOOK_SECRET = whsec_2agCQEbDPSOn2C40EJcwoPCqlvaPLF7M STRIPE_API_VERSION = 2026-01-28.clover # AI configuration Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQnBaSnM4MENkQ2xJVmE5WFZKUkh2SHJFby1YVXN3ZmVxRkptS3ZWRmlwdU93ZEJjSjlMV2NGbU5mS3NCdmFfcmFYTEJNZXFIQ3ozTWE4ZC1pemlQNk9wbjU1d3BPS0ZCTTZfOF8yWmVXMWx0TU1DamlJLVFhSTJXclZsY3hMVWlPcXVqQWtMdER4T252NHZUWEhUOTdIN1VGR3ltazEweXFqQ0lvb0hYWmxQQnpxb0JwcFNhRDNGWXdoRTVJWm9FalZpTUF5b1RqZlRaYnVKYkp0NWR5Vko1WWJ0Wmg2VWJzYXZ0Z3Q4UkpsTldDX2dsekhKMmM4YjRoa2RwemMwYVQwM2cyMFlvaU5mOTVTWGlROU8xY2ZVRXlxZzJqWkxURWlGZGI2STZNb0NpdEtWUnM9 Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09 -Connector_AiPerplexity_API_SECRET = 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 +Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQnB5dkd6UkhtU3lhYmZMSlo0bklQZ2s3UTFBSkprZTNwWkg5Q2lVa0wtenhxWXpva21xVDVMRjdKSmhpTmxWS05IUTRoRHdCbktSRVVjcVFnY1RfV0N2S2dyV0dTMlhxQlRFVm41RkFTWVQzQThuVkZwdlNuVC05QlVRVXB6Qjk3akNpYmY1MFR6R1ByMzlIMllRZlRRYVVRN2ZBPT0= Connector_AiTavily_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk= Connector_AiPrivateLlm_API_SECRET = INT_ENC:Z0FBQUFBQnBudkpGSjZ1NWh0aWc1R3Z4MHNaeS1HamtUbndhcUZFZDlqUDhjSmg5eHFfdlVkU0RsVkJ2UVRaMWs3aWhraG5jSlc0YkxNWHVmR2JoSW5ENFFCdkJBM0VienlKSnhzNnBKbTJOUTFKczRfWlQ3bWpmUkRTT1I1OGNUSTlQdExacGRpeXg= Connector_AiMistral_API_SECRET = INT_ENC:Z0FBQUFBQnBudkpGZTNtZ1E4TWIxSEU1OUlreUpxZkJIR0Vxcm9xRHRUbnBxbTQ1cXlkbnltWkJVdTdMYWZ4c3Fsam42TERWUTVhNzZFMU9xVjdyRGFCYml6bmZsZFd2YmJzemlrSWN6Q3o3X0NXX2xXNUQteTNONHdKYzJ5YVpLLWdhU2JhSTJQZnI= diff --git a/env_prod.env b/env_prod.env index f10b996e..093b6509 100644 --- a/env_prod.env +++ b/env_prod.env @@ -46,15 +46,20 @@ Service_GOOGLE_DATA_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.ap Service_GOOGLE_DATA_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3eWFwSEZ4YnRJcjU1OW5kcXZKdkt1Z3gzWDFhVW5Eelh3VnpnNlppcWxweHY5UUQzeDIyVk83cW1XNVE4bllVWnR2MjlSQzFrV1UyUVV6OUt5b3Vqa3QzMUIwNFBqc2FVSXRxTlQ1OHVJZVFibnhBQ2puXzBwSXp5NUZhZjM1d1o= Service_GOOGLE_DATA_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/connect/callback +# ClickUp OAuth (Verbindungen / automation). Create an app in ClickUp: Settings → Apps → API; set redirect URL to Service_CLICKUP_OAUTH_REDIRECT_URI exactly. +Service_CLICKUP_CLIENT_ID = O3FX3H602A30MQN4I4SBNGJLIDBD5SL4 +Service_CLICKUP_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6VGw5WDdhdDRsVENSalhSSUV0OFFxbEx0V1l6aktNV0E5Y18xU3JHLUlqMWVJdmxyajAydVZRaDJkZzJOVXhxRV9ROFRZbWxlRjh4c3NtQnRFMmRtZWpzTWVsdngtWldlNXRKTURHQjJCOEt6alMwQlkwOFYyVVJWNURJUGJIZDIxYVlfNnBrMU54M0Q3TVdVbFZqRkJKTUtqa05wUkV4eGZvbXNsVi1nNVdBPQ== +Service_CLICKUP_OAUTH_REDIRECT_URI = http://localhost:8000/api/clickup/auth/connect/callback + # Stripe Billing (both end with _SECRET for encryption script) -STRIPE_SECRET_KEY_SECRET = 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 +STRIPE_SECRET_KEY_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6aVA3R3VRS3VHMUgzUEVjYkR4eUZKWFhPUzFTTVlHNnBvT3FienNQaUlBWVpPLXJyVGpGMWk4LXktMXphX0J6ZTVESkJxdjNNa3ZJbF9wX2ppYzdjYlF0cmdVamlEWWJDSmJYYkJseHctTlh4dnNoQWs4SG5haVl2TTNDdXpuaFpqeDBtNkFCbUxMa0RaWG14dmxyOEdILTNrZ2licmNpbXVkN2lFSWoxZW1BODNpV0ZTQ0VaeXRmR1d4RjExMlVFS3MtQU9zZXZlZE1mTmY3OWctUXJHdz09 STRIPE_WEBHOOK_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGNUpTWldsakYydFhFelBrR1lSaWxYT3kyMENOMUljZTJUZHBWcEhhdWVCMzYxZXQ5b3VlTFVRalFiTVdsbGxrdUx0RDFwSEpsOC1sTDJRTEJNQlA3S3ZaQzBtV1h6bWp5VnlMZUgwUlF3cXYxcnljZVE5SWdzLVg3V0syOWRYS08= STRIPE_API_VERSION = 2026-01-28.clover # AI configuration Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQnBaSnM4TWJOVm4xVkx6azRlNDdxN3UxLUdwY2hhdGYxRGp4VFJqYXZIcmkxM1ZyOWV2M0Z4MHdFNkVYQ0ROb1d6LUZFUEdvMHhLMEtXYVBCRzM5TlYyY3ROYWtJRk41cDZxd0tYYi00MjVqMTh4QVcyTXl0bmVocEFHbXQwREpwNi1vODdBNmwzazE5bkpNelE2WXpvblIzWlQwbGdEelI2WXFqT1RibXVHcjNWbVhwYzBOM25XTzNmTDAwUjRvYk4yNjIyZHc5c2RSZzREQUFCdUwyb0ZuOXN1dzI2c2FKdXI4NGxEbk92czZWamJXU3ZSbUlLejZjRklRRk4tLV9aVUFZekI2bTU4OHYxNTUybDg3RVo0ZTh6dXNKRW5GNXVackZvcm9laGI0X3R6V3M9 Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3TnhYdlhSLW5RbXJyMHFXX0V0bHhuTDlTaFJsRDl2dTdIUTFtVFAwTE8tY3hLbzNSMnVTLXd3RUZualN3MGNzc1kwOTIxVUN2WW1rYi1TendFRVVBSVNqRFVjckEzNExyTGNaUkJLMmozazUwemI1cnhrcEtZVXJrWkdaVFFramp3MWZ6RmY2aGlRMXVEYjM2M3ZlbmxMdnNCRDM1QWR0Wmd6MWVnS1I1c01nV3hRLXg3d2NTZXVfTi1Wdm16UnRyNGsyRTZ0bG9TQ1g1OFB5Z002bmQ3QT09 -Connector_AiPerplexity_API_SECRET = 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 +Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQnB5dkd6NG5CTm9QOFZRV1BIVC0tV2RKTGtCQWFOUXlpRnhEdjN1U2x3VUdDamtIZV9CQzQ5ZmRmcUh3ZUVUa0NxbGhlenVVdWtaYjdpcnhvUlNFLXZfOWh2dWFZai0xUGU5cWpuYmpnRVRWakh0RVNUUTFyX0w5V0NXVWFrQlZuOTd5TkI0eVRoQ0ZBSm9HYUlYamoyY1FCMmlBPT0= Connector_AiTavily_API_SECRET = PROD_ENC:Z0FBQUFBQnBDM1Z3NmItcDh6V0JpcE5Jc0NlUWZqcmllRHB5eDlNZmVnUlNVenhNTm5xWExzbjJqdE1GZ0hTSUYtb2dvdWNhTnlQNmVWQ2NGVDgwZ0MwMWZBMlNKWEhzdlF3TlZzTXhCZWM4Z1Uwb18tSTRoU1JBVTVkSkJHOTJwX291b3dPaVphVFg= Connector_AiPrivateLlm_API_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGanZ6U3pzZWkwXzVPWGtIQ040XzFrTXc5QWRnazdEeEktaUJ0akJmNnEzbWUzNHczLTJfc2dIdzBDY0FTaXZYcDhxNFdNbTNtbEJTb2VRZ0ZYd05hdlNLR1h6SUFzVml2Z1FLY1BjTl90UWozUGxtak1URnhhZmNDRWFTb0dKVUo= Connector_AiMistral_API_SECRET = PROD_ENC:Z0FBQUFBQnBudkpGc2tQc2lvMk1YZk01Q1dob1U5cnR0dG03WWE3WkpoOWo0SEpvLU9Rc2lCNDExdy1wZExaN3lpT2FEQkxnaHRmWmZUUUZUUUJmblZreGlpaFpOdnFhbzlEd1RsVVJtX216cmhxTm5BcTN2eUZ2T054cDE5bmlEamJ3NGR6MVpFQnA= diff --git a/modules/aicore/aicoreBase.py b/modules/aicore/aicoreBase.py index 70dd67c4..e107beb3 100644 --- a/modules/aicore/aicoreBase.py +++ b/modules/aicore/aicoreBase.py @@ -18,7 +18,9 @@ from typing import List, Dict, Any, Optional, AsyncGenerator, Union from modules.datamodels.datamodelAi import AiModel, AiModelCall, AiModelResponse -_RETRY_AFTER_PATTERN = _re.compile(r"try again in (\d+(?:\.\d+)?)\s*s", _re.IGNORECASE) +_RETRY_AFTER_PATTERN = _re.compile( + r"(?:try again in|retry after)\s+(\d+(?:\.\d+)?)\s*s", _re.IGNORECASE +) def _parseRetryAfterSeconds(message: str) -> float: diff --git a/modules/aicore/aicorePluginPrivateLlm.py b/modules/aicore/aicorePluginPrivateLlm.py index 718c5905..79853652 100644 --- a/modules/aicore/aicorePluginPrivateLlm.py +++ b/modules/aicore/aicorePluginPrivateLlm.py @@ -7,9 +7,9 @@ Connects to the private-llm service running on-premise with Ollama backend. Provides OCR and Vision capabilities via local AI models. Models: -- poweron-ocr-general: Text extraction and OCR (deepseek backend) -- poweron-vision-general: General vision tasks (qwen2.5vl backend) -- poweron-vision-deep: Deep vision analysis (granite3.2 backend) +- poweron-text-general: Text (qwen2.5); NEUTRALIZATION_TEXT + data/plan ops +- poweron-vision-general: Vision (qwen2.5vl); IMAGE_ANALYSE + NEUTRALIZATION_IMAGE +- poweron-vision-deep: Vision (granite3.2); IMAGE_ANALYSE + NEUTRALIZATION_IMAGE Pricing (CHF per call): - Text models: CHF 0.010 @@ -22,7 +22,7 @@ import time from typing import List, Optional, Dict, Any from fastapi import HTTPException from modules.shared.configuration import APP_CONFIG -from .aicoreBase import BaseConnectorAi +from .aicoreBase import BaseConnectorAi, RateLimitExceededException from modules.datamodels.datamodelAi import ( AiModel, PriorityEnum, @@ -245,6 +245,7 @@ class AiPrivateLlm(BaseConnectorAi): (OperationTypeEnum.DATA_ANALYSE, 8), (OperationTypeEnum.DATA_GENERATE, 8), (OperationTypeEnum.DATA_EXTRACT, 8), + (OperationTypeEnum.NEUTRALIZATION_TEXT, 9), ), version="qwen2.5:7b", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_TEXT_PER_CALL @@ -270,6 +271,7 @@ class AiPrivateLlm(BaseConnectorAi): processingMode=ProcessingModeEnum.ADVANCED, operationTypes=createOperationTypeRatings( (OperationTypeEnum.IMAGE_ANALYSE, 9), + (OperationTypeEnum.NEUTRALIZATION_IMAGE, 9), ), version="qwen2.5vl:7b", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_VISION_PER_CALL @@ -295,6 +297,7 @@ class AiPrivateLlm(BaseConnectorAi): processingMode=ProcessingModeEnum.DETAILED, operationTypes=createOperationTypeRatings( (OperationTypeEnum.IMAGE_ANALYSE, 9), + (OperationTypeEnum.NEUTRALIZATION_IMAGE, 9), ), version="granite3.2-vision", calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: PRICE_VISION_PER_CALL @@ -367,6 +370,9 @@ class AiPrivateLlm(BaseConnectorAi): if response.status_code != 200: errorMessage = f"Private-LLM API error: {response.status_code} - {response.text}" + if response.status_code == 429: + logger.warning(errorMessage) + raise RateLimitExceededException(errorMessage) logger.error(errorMessage) raise HTTPException(status_code=500, detail=errorMessage) @@ -458,6 +464,9 @@ class AiPrivateLlm(BaseConnectorAi): if response.status_code != 200: errorMessage = f"Private-LLM API error: {response.status_code} - {response.text}" + if response.status_code == 429: + logger.warning(errorMessage) + raise RateLimitExceededException(errorMessage) logger.error(errorMessage) raise HTTPException(status_code=500, detail=errorMessage) diff --git a/modules/auth/csrf.py b/modules/auth/csrf.py index ba21435b..bac4b0c3 100644 --- a/modules/auth/csrf.py +++ b/modules/auth/csrf.py @@ -35,6 +35,8 @@ class CSRFMiddleware(BaseHTTPMiddleware): "/api/google/auth/login/callback", "/api/google/auth/connect", "/api/google/auth/connect/callback", + "/api/clickup/auth/connect", + "/api/clickup/auth/connect/callback", "/api/billing/webhook/stripe", # Stripe webhook (auth via Stripe-Signature) } @@ -86,12 +88,15 @@ class CSRFMiddleware(BaseHTTPMiddleware): content={"detail": "Invalid CSRF token format"} ) - # Additional CSRF validation could be added here: - # - Check token against session - # - Validate token expiration - # - Verify token origin - - return await call_next(request) + try: + return await call_next(request) + except Exception as exc: + logger.error("Unhandled exception in %s %s: %s", request.method, request.url.path, exc) + from fastapi.responses import JSONResponse + return JSONResponse( + status_code=500, + content={"detail": "Internal server error"}, + ) def _is_valid_csrf_token(self, token: str) -> bool: """ diff --git a/modules/auth/tokenManager.py b/modules/auth/tokenManager.py index 5740a2ac..940de055 100644 --- a/modules/auth/tokenManager.py +++ b/modules/auth/tokenManager.py @@ -181,7 +181,7 @@ class TokenManager: # Only allow a new refresh if at least 10 minutes passed since the token was created/refreshed try: nowTs = getUtcTimestamp() - createdTs = parseTimestamp(oldToken.createdAt, default=0.0) + createdTs = parseTimestamp(oldToken.sysCreatedAt, default=0.0) secondsSinceLastRefresh = nowTs - createdTs if secondsSinceLastRefresh < 10 * 60: logger.info( diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index 67cceb45..6bd661b4 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -5,13 +5,14 @@ import re import psycopg2 import psycopg2.extras import logging -from typing import List, Dict, Any, Optional, Union, get_origin, get_args, Type +from typing import List, Dict, Any, Optional, Union, get_origin, get_args, Type, Set, Tuple import uuid from pydantic import BaseModel, Field import threading from modules.shared.timeUtils import getUtcTimestamp from modules.shared.configuration import APP_CONFIG +from modules.datamodels.datamodelBase import PowerOnModel from modules.datamodels.datamodelUam import User, AccessLevel, UserPermissions from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext @@ -20,7 +21,7 @@ logger = logging.getLogger(__name__) # No mapping needed - table name = Pydantic model name exactly -class SystemTable(BaseModel): +class SystemTable(PowerOnModel): """Data model for system table entries""" table_name: str = Field( @@ -157,6 +158,88 @@ def _parseRecordFields(record: Dict[str, Any], fields: Dict[str, str], context: logger.warning(f"Could not parse JSONB field {fieldName}, keeping as string ({context})") +# Legacy column names (historical _* internal names and old camelCase audit fields) -> PowerOn sys* columns. +# Order matters: more specific / underscore names first; first successful copy wins per cell via IS NULL on target. +_LEGACY_FIELD_TO_SYS: Tuple[Tuple[str, str], ...] = ( + ("_createdAt", "sysCreatedAt"), + ("_createdBy", "sysCreatedBy"), + ("_modifiedAt", "sysModifiedAt"), + ("_modifiedBy", "sysModifiedBy"), + ("createdAt", "sysCreatedAt"), + ("creationDate", "sysCreatedAt"), + ("updatedAt", "sysModifiedAt"), + ("lastModified", "sysModifiedAt"), +) + + +def _quotePgIdent(name: str) -> str: + return '"' + str(name).replace('"', '""') + '"' + + +def _resolveColumnCaseInsensitive(cols: Set[str], logicalName: str) -> Optional[str]: + """Match information_schema column_name to logical CamelCase (PG folds unquoted legacy names to lowercase).""" + if not logicalName or not cols: + return None + for c in cols: + if c.lower() == logicalName.lower(): + return c + return None + + +def _pgColumnDataType(cursor, tablePg: str, colPg: str) -> Optional[str]: + cursor.execute( + """ + SELECT data_type FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = %s AND column_name = %s + """, + (tablePg, colPg), + ) + row = cursor.fetchone() + return row["data_type"] if row else None + + +def _legacySourceToSysSqlExpr(srcIdent: str, srcType: Optional[str], tgtType: Optional[str]) -> str: + """Build RHS for UPDATE sys* = expr from legacy _* column (handles text/timestamp -> double precision).""" + s = _quotePgIdent(srcIdent) + sl = (srcType or "").lower() + tl = (tgtType or "").lower() + if "double" in tl or tl == "real" or tl == "numeric": + if any(x in sl for x in ("double precision", "real", "numeric", "integer", "bigint", "smallint")): + return f"{s}::double precision" + if "timestamp" in sl or sl == "date": + return f"EXTRACT(EPOCH FROM {s}::timestamptz)" + if "text" in sl or "character" in sl or sl == "uuid": + return ( + f"CASE WHEN trim({s}::text) ~ '^[+-]?[0-9]+(\\.[0-9]*)?([eE][+-]?[0-9]+)?$' " + f"THEN trim({s}::text)::double precision " + f"ELSE EXTRACT(EPOCH FROM trim({s}::text)::timestamptz) END" + ) + return s + return s + + +def _listPublicBaseTableNames(cursor) -> List[str]: + cursor.execute( + """ + SELECT table_name FROM information_schema.tables + WHERE table_schema = 'public' AND table_type = 'BASE TABLE' + ORDER BY table_name + """ + ) + return [row["table_name"] for row in cursor.fetchall()] + + +def _listTableColumnNames(cursor, tableName: str) -> Set[str]: + cursor.execute( + """ + SELECT column_name FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = %s + """, + (tableName,), + ) + return {row["column_name"] for row in cursor.fetchall()} + + # Cache connectors by (host, database, port) to avoid duplicate inits for same database. # Thread safety: _connector_cache_lock protects cache access. userId is request-scoped via # contextvars to avoid races when concurrent requests share the same connector. @@ -178,7 +261,7 @@ def _get_cached_connector( userId: str = None, ) -> "DatabaseConnector": """Return cached DatabaseConnector for same (host, database, port) to avoid duplicate PostgreSQL inits. - Uses contextvars for userId so concurrent requests sharing the same connector get correct _createdBy/_modifiedBy. + Uses contextvars for userId so concurrent requests sharing the same connector get correct sysCreatedBy/sysModifiedBy. """ port = int(dbPort) if dbPort is not None else 5432 key = (dbHost, dbDatabase, port) @@ -327,8 +410,10 @@ class DatabaseConnector: id SERIAL PRIMARY KEY, table_name VARCHAR(255) UNIQUE NOT NULL, initial_id VARCHAR(255) NOT NULL, - _createdAt DOUBLE PRECISION, - _modifiedAt DOUBLE PRECISION + "sysCreatedAt" DOUBLE PRECISION, + "sysCreatedBy" VARCHAR(255), + "sysModifiedAt" DOUBLE PRECISION, + "sysModifiedBy" VARCHAR(255) ) """) conn.close() @@ -371,6 +456,63 @@ class DatabaseConnector: logger.warning(f"Connection lost, reconnecting: {e}") self._connect() + def migrateLegacyUnderscoreSysColumns(self) -> int: + """ + Scan all public base tables on this connection's database. Where both a legacy + source column (any case: _createdAt, createdAt, creationDate, …) and the matching + sys* column exist, UPDATE sys* from legacy where sys* IS NULL AND legacy IS NOT NULL. + Idempotent; run after schema adds sys* columns (see _ensureTableExists). + """ + self._ensure_connection() + total = 0 + try: + with self.connection.cursor() as cursor: + tableNames = _listPublicBaseTableNames(cursor) + for table in tableNames: + with self.connection.cursor() as cursor: + cols = _listTableColumnNames(cursor, table) + for legacyLogical, sysLogical in _LEGACY_FIELD_TO_SYS: + src = _resolveColumnCaseInsensitive(cols, legacyLogical) + tgt = _resolveColumnCaseInsensitive(cols, sysLogical) + if not src or not tgt or src == tgt: + continue + try: + with self.connection.cursor() as cursor: + srcType = _pgColumnDataType(cursor, table, src) + tgtType = _pgColumnDataType(cursor, table, tgt) + expr = _legacySourceToSysSqlExpr(src, srcType, tgtType) + tq = _quotePgIdent(table) + tr = _quotePgIdent(tgt) + sr = _quotePgIdent(src) + sql = ( + f"UPDATE {tq} SET {tr} = {expr} " + f"WHERE {tr} IS NULL AND {sr} IS NOT NULL" + ) + cursor.execute(sql) + n = cursor.rowcount + self.connection.commit() + total += n + except Exception as e: + try: + self.connection.rollback() + except Exception: + pass + logger.debug( + f"migrateLegacyUnderscoreSysColumns skip {self.dbDatabase}.{table} " + f"{src}->{tgt}: {e}" + ) + except Exception as e: + logger.error(f"migrateLegacyUnderscoreSysColumns failed on {self.dbDatabase}: {e}") + try: + self.connection.rollback() + except Exception: + pass + if total: + logger.info( + f"migrateLegacyUnderscoreSysColumns: {total} cell(s) in {self.dbDatabase}" + ) + return total + def _initializeSystemTable(self): """Initializes the system table if it doesn't exist yet.""" try: @@ -416,7 +558,7 @@ class DatabaseConnector: for table_name, initial_id in data.items(): cursor.execute( """ - INSERT INTO "_system" ("table_name", "initial_id", "_modifiedAt") + INSERT INTO "_system" ("table_name", "initial_id", "sysModifiedAt") VALUES (%s, %s, %s) """, (table_name, initial_id, getUtcTimestamp()), @@ -448,8 +590,10 @@ class DatabaseConnector: CREATE TABLE "{self._systemTableName}" ( "table_name" VARCHAR(255) PRIMARY KEY, "initial_id" VARCHAR(255), - "_createdAt" DOUBLE PRECISION, - "_modifiedAt" DOUBLE PRECISION + "sysCreatedAt" DOUBLE PRECISION, + "sysCreatedBy" VARCHAR(255), + "sysModifiedAt" DOUBLE PRECISION, + "sysModifiedBy" VARCHAR(255) ) """) logger.info("System table created successfully") @@ -464,10 +608,16 @@ class DatabaseConnector: ) existing_columns = [row["column_name"] for row in cursor.fetchall()] - if "_modifiedAt" not in existing_columns: - cursor.execute( - f'ALTER TABLE "{self._systemTableName}" ADD COLUMN "_modifiedAt" DOUBLE PRECISION' - ) + for sys_col, sys_sql in [ + ("sysCreatedAt", "DOUBLE PRECISION"), + ("sysCreatedBy", "VARCHAR(255)"), + ("sysModifiedAt", "DOUBLE PRECISION"), + ("sysModifiedBy", "VARCHAR(255)"), + ]: + if sys_col not in existing_columns: + cursor.execute( + f'ALTER TABLE "{self._systemTableName}" ADD COLUMN "{sys_col}" {sys_sql}' + ) return True except Exception as e: @@ -484,6 +634,7 @@ class DatabaseConnector: try: self._ensure_connection() + schemaTouched = False with self.connection.cursor() as cursor: # Check if table exists by querying information_schema with case-insensitive search @@ -502,6 +653,7 @@ class DatabaseConnector: logger.info( f"Created table '{table}' with columns from Pydantic model" ) + schemaTouched = True else: # Table exists: ensure all columns from model are present (simple additive migration) try: @@ -518,11 +670,7 @@ class DatabaseConnector: # Desired columns based on model model_fields = _get_model_fields(model_class) - desired_columns = ( - set(["id"]) - | set(model_fields.keys()) - | {"_createdAt", "_modifiedAt", "_createdBy", "_modifiedBy"} - ) + desired_columns = set(["id"]) | set(model_fields.keys()) # Add missing columns for col in sorted(desired_columns - existing_columns): @@ -530,12 +678,6 @@ class DatabaseConnector: if col in ["id"]: continue # primary key exists already sql_type = model_fields.get(col) - if col in ["_createdAt"]: - sql_type = "DOUBLE PRECISION" - elif col in ["_modifiedAt"]: - sql_type = "DOUBLE PRECISION" - elif col in ["_createdBy", "_modifiedBy"]: - sql_type = "VARCHAR(255)" if not sql_type: sql_type = "TEXT" try: @@ -545,6 +687,7 @@ class DatabaseConnector: logger.info( f"Added missing column '{col}' ({sql_type}) to '{table}'" ) + schemaTouched = True except Exception as add_err: logger.warning( f"Could not add column '{col}' to '{table}': {add_err}" @@ -555,6 +698,23 @@ class DatabaseConnector: ) self.connection.commit() + if schemaTouched: + try: + n = self.migrateLegacyUnderscoreSysColumns() + if n: + logger.info( + "After schema change on %s.%s: legacy -> sys* migration wrote %s cell(s)", + self.dbDatabase, + table, + n, + ) + except Exception as mig_err: + logger.error( + "migrateLegacyUnderscoreSysColumns failed after schema change %s.%s: %s", + self.dbDatabase, + table, + mig_err, + ) return True except Exception as e: logger.error(f"Error ensuring table {table} exists: {e}") @@ -594,16 +754,6 @@ class DatabaseConnector: if field_name != "id": # Skip id, already defined columns.append(f'"{field_name}" {sql_type}') - # Add metadata columns - columns.extend( - [ - '"_createdAt" DOUBLE PRECISION', - '"_modifiedAt" DOUBLE PRECISION', - '"_createdBy" VARCHAR(255)', - '"_modifiedBy" VARCHAR(255)', - ] - ) - # Create table sql = f'CREATE TABLE IF NOT EXISTS "{table}" ({", ".join(columns)})' cursor.execute(sql) @@ -626,11 +776,7 @@ class DatabaseConnector: """Save record to normalized table with explicit columns.""" # Get columns from Pydantic model instead of database schema fields = _get_model_fields(model_class) - columns = ( - ["id"] - + [field for field in fields.keys() if field != "id"] - + ["_createdAt", "_createdBy", "_modifiedAt", "_modifiedBy"] - ) + columns = ["id"] + [field for field in fields.keys() if field != "id"] if not columns: logger.error(f"No columns found for table {table}") @@ -648,7 +794,7 @@ class DatabaseConnector: value = filtered_record.get(col) # Handle timestamp fields - store as Unix timestamps (floats) for consistency - if col in ["_createdAt", "_modifiedAt"] and value is not None: + if col in ["sysCreatedAt", "sysModifiedAt"] and value is not None: if isinstance(value, str): # Try to parse string as timestamp try: @@ -690,7 +836,7 @@ class DatabaseConnector: [ f'"{col}" = EXCLUDED."{col}"' for col in columns[1:] - if col not in ["_createdAt", "_createdBy"] + if col not in ["sysCreatedAt", "sysCreatedBy"] ] ) @@ -723,6 +869,10 @@ class DatabaseConnector: logger.error(f"Error loading record {recordId} from table {table}: {e}") return None + def getRecord(self, model_class: type, recordId: str) -> Optional[Dict[str, Any]]: + """Load one row by primary key (routes / services; wraps _loadRecord).""" + return self._loadRecord(model_class, str(recordId)) + def _saveRecord( self, model_class: type, recordId: str, record: Dict[str, Any] ) -> bool: @@ -742,17 +892,19 @@ class DatabaseConnector: if effective_user_id is None: effective_user_id = self.userId currentTime = getUtcTimestamp() - # Set _createdAt and _createdBy if this is a new record (record doesn't have _createdAt) - if "_createdAt" not in record: - record["_createdAt"] = currentTime + # Set sysCreatedAt/sysCreatedBy on first persist; always refresh modified fields. + # Treat None and 0 as unset (legacy rows / bad defaults); model_dump often has sysCreatedAt=None. + createdTs = record.get("sysCreatedAt") + if createdTs is None or createdTs == 0 or createdTs == 0.0: + record["sysCreatedAt"] = currentTime if effective_user_id: - record["_createdBy"] = effective_user_id - elif "_createdBy" not in record or not record.get("_createdBy"): + record["sysCreatedBy"] = effective_user_id + elif not record.get("sysCreatedBy"): if effective_user_id: - record["_createdBy"] = effective_user_id - record["_modifiedAt"] = currentTime + record["sysCreatedBy"] = effective_user_id + record["sysModifiedAt"] = currentTime if effective_user_id: - record["_modifiedBy"] = effective_user_id + record["sysModifiedBy"] = effective_user_id with self.connection.cursor() as cursor: self._save_record(cursor, table, recordId, record, model_class) @@ -840,6 +992,26 @@ class DatabaseConnector: logger.error(f"Error removing initial ID for table {table}: {e}") return False + def buildRbacWhereClause( + self, + permissions: UserPermissions, + currentUser: User, + table: str, + mandateId: Optional[str] = None, + featureInstanceId: Optional[str] = None, + ) -> Optional[Dict[str, Any]]: + """Delegate to interfaceRbac.buildRbacWhereClause (tests and call sites use connector as entry).""" + from modules.interfaces.interfaceRbac import buildRbacWhereClause as _buildRbacWhereClause + + return _buildRbacWhereClause( + permissions, + currentUser, + table, + self, + mandateId=mandateId, + featureInstanceId=featureInstanceId, + ) + def updateContext(self, userId: str) -> None: """Updates the context of the database connector. Sets both instance userId and contextvar for request-scoped use when connector is shared. @@ -992,10 +1164,6 @@ class DatabaseConnector: Returns (where_clause, order_clause, limit_clause, values, count_values). """ fields = _get_model_fields(model_class) - fields["_createdAt"] = "DOUBLE PRECISION" - fields["_modifiedAt"] = "DOUBLE PRECISION" - fields["_createdBy"] = "TEXT" - fields["_modifiedBy"] = "TEXT" validColumns = set(fields.keys()) where_parts: List[str] = [] values: List[Any] = [] @@ -1026,6 +1194,9 @@ class DatabaseConnector: continue colType = fields.get(key, "TEXT") logger.debug(f"_buildPaginationClauses: filter key='{key}' val={val!r} type(val)={type(val).__name__} colType={colType}") + if val is None: + where_parts.append(f'"{key}" IS NULL') + continue if isinstance(val, dict): op = val.get("operator", "equals") v = val.get("value", "") @@ -1190,10 +1361,6 @@ class DatabaseConnector: """ table = model_class.__name__ fields = _get_model_fields(model_class) - fields["_createdAt"] = "DOUBLE PRECISION" - fields["_modifiedAt"] = "DOUBLE PRECISION" - fields["_createdBy"] = "TEXT" - fields["_modifiedBy"] = "TEXT" if column not in fields: return [] diff --git a/modules/connectors/connectorResolver.py b/modules/connectors/connectorResolver.py index 4304378e..8ffdd73f 100644 --- a/modules/connectors/connectorResolver.py +++ b/modules/connectors/connectorResolver.py @@ -52,6 +52,12 @@ class ConnectorResolver: except ImportError: logger.debug("FtpConnector not available (stub)") + try: + from modules.connectors.providerClickup.connectorClickup import ClickupConnector + ConnectorResolver._providerRegistry["clickup"] = ClickupConnector + except ImportError: + logger.warning("ClickupConnector not available") + async def resolve(self, connectionId: str) -> ProviderConnector: """Resolve connectionId to a ProviderConnector with a fresh access token.""" connection = await self._loadConnection(connectionId) diff --git a/modules/connectors/connectorTicketsClickup.py b/modules/connectors/connectorTicketsClickup.py index 37480aa9..af02b44a 100644 --- a/modules/connectors/connectorTicketsClickup.py +++ b/modules/connectors/connectorTicketsClickup.py @@ -9,6 +9,7 @@ from typing import Optional import logging import aiohttp from modules.datamodels.datamodelTickets import TicketBase, TicketFieldAttribute +from modules.serviceCenter.services.serviceClickup.mainServiceClickup import clickup_authorization_header logger = logging.getLogger(__name__) @@ -30,7 +31,7 @@ class ConnectorTicketClickup(TicketBase): def _headers(self) -> dict: return { - "Authorization": self.apiToken, + "Authorization": clickup_authorization_header(self.apiToken), "Content-Type": "application/json", } diff --git a/modules/connectors/connectorVoiceGoogle.py b/modules/connectors/connectorVoiceGoogle.py index ddb0d864..0dbb46a5 100644 --- a/modules/connectors/connectorVoiceGoogle.py +++ b/modules/connectors/connectorVoiceGoogle.py @@ -18,6 +18,11 @@ from modules.shared.configuration import APP_CONFIG logger = logging.getLogger(__name__) +# Gemini-TTS speaker IDs from voices.list use short names (e.g. "Kore") and require model_name + prompt. +_GEMINI_TTS_DEFAULT_MODEL = "gemini-2.5-flash-tts" +_GEMINI_TTS_NEUTRAL_PROMPT = "Say the following" + + class ConnectorGoogleSpeech: """ Google Cloud Speech-to-Text and Translation connector. @@ -902,6 +907,13 @@ class ConnectorGoogleSpeech: "error": f"Validation error: {e}" } + def _isGeminiTtsSpeakerVoiceName(self, voiceName: str) -> bool: + """True when voice name is a Gemini-TTS speaker id (no BCP-47 prefix like en-US-...).""" + if not voiceName or not isinstance(voiceName, str): + return False + stripped = voiceName.strip() + return bool(stripped) and "-" not in stripped + async def textToSpeech(self, text: str, languageCode: str = "de-DE", voiceName: str = None) -> Dict[str, Any]: """ Convert text to speech using Google Cloud Text-to-Speech. @@ -917,9 +929,6 @@ class ConnectorGoogleSpeech: try: logger.info(f"Converting text to speech: '{text[:50]}...' in {languageCode}") - # Set up the synthesis input - synthesisInput = texttospeech.SynthesisInput(text=text) - # Build the voice request selectedVoice = voiceName or self._getDefaultVoice(languageCode) @@ -931,11 +940,24 @@ class ConnectorGoogleSpeech: logger.info(f"Using TTS voice: {selectedVoice} for language: {languageCode}") - voice = texttospeech.VoiceSelectionParams( - language_code=languageCode, - name=selectedVoice, - ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL - ) + if self._isGeminiTtsSpeakerVoiceName(selectedVoice): + synthesisInput = texttospeech.SynthesisInput( + text=text, + prompt=_GEMINI_TTS_NEUTRAL_PROMPT, + ) + voice = texttospeech.VoiceSelectionParams( + language_code=languageCode, + name=selectedVoice, + model_name=_GEMINI_TTS_DEFAULT_MODEL, + ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL, + ) + else: + synthesisInput = texttospeech.SynthesisInput(text=text) + voice = texttospeech.VoiceSelectionParams( + language_code=languageCode, + name=selectedVoice, + ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL, + ) # Select the type of audio file to return audioConfig = texttospeech.AudioConfig( @@ -1059,7 +1081,8 @@ class ConnectorGoogleSpeech: "language_codes": list(voice.language_codes) if voice.language_codes else [], "gender": gender, "ssml_gender": voice.ssml_gender.name if voice.ssml_gender else "NEUTRAL", - "natural_sample_rate_hertz": voice.natural_sample_rate_hertz + "natural_sample_rate_hertz": voice.natural_sample_rate_hertz, + "geminiTts": self._isGeminiTtsSpeakerVoiceName(voice.name or ""), } # Include any additional fields if available from Google API diff --git a/modules/connectors/providerClickup/__init__.py b/modules/connectors/providerClickup/__init__.py new file mode 100644 index 00000000..12439593 --- /dev/null +++ b/modules/connectors/providerClickup/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp provider connector.""" + +from .connectorClickup import ClickupConnector + +__all__ = ["ClickupConnector"] diff --git a/modules/connectors/providerClickup/connectorClickup.py b/modules/connectors/providerClickup/connectorClickup.py new file mode 100644 index 00000000..cd49570e --- /dev/null +++ b/modules/connectors/providerClickup/connectorClickup.py @@ -0,0 +1,268 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp ProviderConnector — virtual paths for teams → lists → tasks (table rows). + +Path convention (leading slash, no trailing slash except root): + / — authorized workspaces (teams) + /team/{teamId} — spaces in the workspace + /team/{teamId}/space/{spaceId} — folders + folderless lists + /team/{teamId}/space/{spaceId}/folder/{folderId} — lists in folder + /team/{teamId}/list/{listId} — tasks in list (rows) + /team/{teamId}/list/{listId}/task/{taskId} — single task (download = JSON) +""" + +from __future__ import annotations + +import json +import logging +import re +from typing import Any, Dict, List, Optional + +from modules.connectors.connectorProviderBase import ( + ProviderConnector, + ServiceAdapter, + DownloadResult, +) +from modules.datamodels.datamodelDataSource import ExternalEntry +from modules.serviceCenter.services.serviceClickup.mainServiceClickup import ClickupService + +logger = logging.getLogger(__name__) + +# type metadata for ExternalEntry.metadata["cuType"] +_CU_TEAM = "team" +_CU_SPACE = "space" +_CU_FOLDER = "folder" +_CU_LIST = "list" +_CU_TASK = "task" + + +def _norm(path: str) -> str: + p = (path or "").strip() or "/" + if not p.startswith("/"): + p = "/" + p + if p != "/" and p.endswith("/"): + p = p.rstrip("/") + return p + + +class ClickupListsAdapter(ServiceAdapter): + """Maps ClickUp hierarchy + list tasks to browse/download/upload/search.""" + + def __init__(self, access_token: str): + self._token = access_token + # Minimal service instance for API calls (no ServiceCenter context) + self._svc = ClickupService(context=None, get_service=lambda _: None) + self._svc.setAccessToken(access_token) + + async def browse(self, path: str, filter: Optional[str] = None) -> List[ExternalEntry]: + p = _norm(path) + out: List[ExternalEntry] = [] + + if p == "/": + data = await self._svc.getAuthorizedTeams() + if isinstance(data, dict) and data.get("error"): + logger.warning(f"ClickUp browse root: {data.get('error')}") + return [] + teams = data.get("teams", []) if isinstance(data, dict) else [] + for t in teams: + tid = str(t.get("id", "")) + name = t.get("name") or tid + out.append( + ExternalEntry( + name=name, + path=f"/team/{tid}", + isFolder=True, + metadata={"cuType": _CU_TEAM, "id": tid, "raw": t}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)$", p) + if m: + team_id = m.group(1) + data = await self._svc.getSpaces(team_id) + if isinstance(data, dict) and data.get("error"): + return [] + spaces = data.get("spaces", []) if isinstance(data, dict) else [] + for s in spaces: + sid = str(s.get("id", "")) + name = s.get("name") or sid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/space/{sid}", + isFolder=True, + metadata={"cuType": _CU_SPACE, "id": sid, "raw": s}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)/space/([^/]+)$", p) + if m: + team_id, space_id = m.group(1), m.group(2) + folders_r = await self._svc.getFolders(space_id) + lists_r = await self._svc.getFolderlessLists(space_id) + if isinstance(folders_r, dict) and not folders_r.get("error"): + for f in folders_r.get("folders", []) or []: + fid = str(f.get("id", "")) + name = f.get("name") or fid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/space/{space_id}/folder/{fid}", + isFolder=True, + metadata={"cuType": _CU_FOLDER, "id": fid, "raw": f}, + ) + ) + if isinstance(lists_r, dict) and not lists_r.get("error"): + for lst in lists_r.get("lists", []) or []: + lid = str(lst.get("id", "")) + name = lst.get("name") or lid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{lid}", + isFolder=True, + metadata={"cuType": _CU_LIST, "id": lid, "raw": lst}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)/space/([^/]+)/folder/([^/]+)$", p) + if m: + team_id, _space_id, folder_id = m.group(1), m.group(2), m.group(3) + data = await self._svc.getListsInFolder(folder_id) + if isinstance(data, dict) and data.get("error"): + return [] + for lst in data.get("lists", []) or []: + lid = str(lst.get("id", "")) + name = lst.get("name") or lid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{lid}", + isFolder=True, + metadata={"cuType": _CU_LIST, "id": lid, "raw": lst}, + ) + ) + return out + + m = re.match(r"^/team/([^/]+)/list/([^/]+)$", p) + if m: + team_id, list_id = m.group(1), m.group(2) + page = 0 + while True: + data = await self._svc.getTasksInList(list_id, page=page) + if isinstance(data, dict) and data.get("error"): + break + tasks = data.get("tasks", []) if isinstance(data, dict) else [] + for task in tasks: + tid = str(task.get("id", "")) + name = task.get("name") or tid + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{list_id}/task/{tid}", + isFolder=False, + metadata={ + "cuType": _CU_TASK, + "id": tid, + "task": task, + }, + ) + ) + if len(tasks) < 100: + break + page += 1 + return out + + m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p) + if m: + team_id, list_id, task_id = m.group(1), m.group(2), m.group(3) + out.append( + ExternalEntry( + name=f"task-{task_id}.json", + path=p, + isFolder=False, + metadata={"cuType": _CU_TASK, "id": task_id, "listId": list_id, "teamId": team_id}, + ) + ) + return out + + logger.warning(f"ClickUp browse: unsupported path {p}") + return [] + + async def download(self, path: str) -> Any: + p = _norm(path) + m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p) + if not m: + return b"" + task_id = m.group(3) + data = await self._svc.getTask(task_id) + if isinstance(data, dict) and data.get("error"): + return json.dumps(data).encode("utf-8") + payload = json.dumps(data, indent=2).encode("utf-8") + return DownloadResult(data=payload, fileName=f"task-{task_id}.json", mimeType="application/json") + + async def upload(self, path: str, data: bytes, fileName: str) -> dict: + """Upload attachment to a task. Path must be .../list/{listId}/task/{taskId}.""" + p = _norm(path) + m = re.match(r"^/team/([^/]+)/list/([^/]+)/task/([^/]+)$", p) + if not m: + return {"error": "Path must be /team/{teamId}/list/{listId}/task/{taskId} for upload"} + task_id = m.group(3) + return await self._svc.uploadTaskAttachment(task_id, data, fileName) + + async def search(self, query: str, path: Optional[str] = None) -> List[ExternalEntry]: + base = _norm(path or "/") + team_id: Optional[str] = None + mt = re.match(r"^/team/([^/]+)", base) + if mt: + team_id = mt.group(1) + if not team_id: + teams = await self._svc.getAuthorizedTeams() + if not isinstance(teams, dict) or teams.get("error"): + return [] + tl = teams.get("teams") or [] + if not tl: + return [] + team_id = str(tl[0].get("id", "")) + + out: List[ExternalEntry] = [] + page = 0 + while True: + data = await self._svc.searchTeamTasks(team_id, query=query, page=page) + if isinstance(data, dict) and data.get("error"): + break + tasks = data.get("tasks", []) if isinstance(data, dict) else [] + for task in tasks: + tid = str(task.get("id", "")) + name = task.get("name") or tid + list_obj = task.get("list") or {} + lid = str(list_obj.get("id", "")) if list_obj else "" + if not lid: + continue + out.append( + ExternalEntry( + name=name, + path=f"/team/{team_id}/list/{lid}/task/{tid}", + isFolder=False, + metadata={"cuType": _CU_TASK, "id": tid, "task": task}, + ) + ) + if len(tasks) < 25: + break + page += 1 + return out + + +class ClickupConnector(ProviderConnector): + """One ClickUp connection → clickup virtual file service.""" + + def getAvailableServices(self) -> List[str]: + return ["clickup"] + + def getServiceAdapter(self, service: str) -> ServiceAdapter: + if service != "clickup": + raise ValueError(f"ClickUp only supports 'clickup' service, got '{service}'") + return ClickupListsAdapter(self.accessToken) diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py index 296500aa..662eded2 100644 --- a/modules/datamodels/datamodelAi.py +++ b/modules/datamodels/datamodelAi.py @@ -22,6 +22,10 @@ class OperationTypeEnum(str, Enum): IMAGE_ANALYSE = "imageAnalyse" IMAGE_GENERATE = "imageGenerate" + # Neutralization (dedicated model selection; text vs vision backends) + NEUTRALIZATION_TEXT = "neutralizationText" + NEUTRALIZATION_IMAGE = "neutralizationImage" + # Web Operations WEB_SEARCH_DATA = "webSearch" # Returns list of URLs only WEB_CRAWL = "webCrawl" # Web crawl for a given URL @@ -168,6 +172,8 @@ class AiCallRequest(BaseModel): contentParts: Optional[List['ContentPart']] = None # Content parts for model-aware chunking messages: Optional[List[Dict[str, Any]]] = Field(default=None, description="OpenAI-style messages for multi-turn agent conversations") tools: Optional[List[Dict[str, Any]]] = Field(default=None, description="Tool definitions for native function calling") + toolChoice: Optional[Any] = Field(default=None, description="Tool choice: 'auto', 'none', or specific tool (passed through to model call)") + requireNeutralization: Optional[bool] = Field(default=None, description="Per-request neutralization override: True=force, False=skip, None=use config") class AiCallResponse(BaseModel): diff --git a/modules/datamodels/datamodelBase.py b/modules/datamodels/datamodelBase.py new file mode 100644 index 00000000..862f177b --- /dev/null +++ b/modules/datamodels/datamodelBase.py @@ -0,0 +1,68 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""Base Pydantic model with system-managed fields (DB + API + UI metadata).""" + +from typing import Optional + +from pydantic import BaseModel, Field + +from modules.shared.attributeUtils import registerModelLabels + + +class PowerOnModel(BaseModel): + sysCreatedAt: Optional[float] = Field( + default=None, + description="Record creation timestamp (UTC, set by system)", + json_schema_extra={ + "frontend_type": "timestamp", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + sysCreatedBy: Optional[str] = Field( + default=None, + description="User ID who created this record (set by system)", + json_schema_extra={ + "frontend_type": "text", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + sysModifiedAt: Optional[float] = Field( + default=None, + description="Record last modification timestamp (UTC, set by system)", + json_schema_extra={ + "frontend_type": "timestamp", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + sysModifiedBy: Optional[str] = Field( + default=None, + description="User ID who last modified this record (set by system)", + json_schema_extra={ + "frontend_type": "text", + "frontend_readonly": True, + "frontend_required": False, + "frontend_visible": False, + "system": True, + }, + ) + + +registerModelLabels( + "PowerOnModel", + {"en": "Base Record", "de": "Basisdatensatz"}, + { + "sysCreatedAt": {"en": "Created At", "de": "Erstellt am", "fr": "Cree le"}, + "sysCreatedBy": {"en": "Created By", "de": "Erstellt von", "fr": "Cree par"}, + "sysModifiedAt": {"en": "Modified At", "de": "Geaendert am", "fr": "Modifie le"}, + "sysModifiedBy": {"en": "Modified By", "de": "Geaendert von", "fr": "Modifie par"}, + }, +) diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py index 995ac75d..ccf1f4a1 100644 --- a/modules/datamodels/datamodelBilling.py +++ b/modules/datamodels/datamodelBilling.py @@ -6,24 +6,12 @@ from typing import List, Dict, Any, Optional from enum import Enum from datetime import date, datetime, timezone from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid - -class BillingModelEnum(str, Enum): - """Billing model types (prepaid only; legacy UNLIMITED in DB maps to PREPAY_MANDATE).""" - PREPAY_MANDATE = "PREPAY_MANDATE" # Prepaid budget shared by all users in mandate - PREPAY_USER = "PREPAY_USER" # Prepaid budget per user within mandate - - -# Nur fuer initRootMandateBilling (Root-Mandant PREPAY_USER + Startguthaben in Settings). -DEFAULT_USER_CREDIT_CHF = 5.0 - - -class AccountTypeEnum(str, Enum): - """Account type for billing accounts.""" - MANDATE = "MANDATE" # Account for entire mandate - USER = "USER" # Account for specific user within mandate +# End-customer price for storage above plan-included volume (CHF per GB per month). +STORAGE_PRICE_PER_GB_CHF = 0.50 class TransactionTypeEnum(str, Enum): @@ -39,6 +27,8 @@ class ReferenceTypeEnum(str, Enum): PAYMENT = "PAYMENT" # Payment/top-up ADMIN = "ADMIN" # Admin adjustment SYSTEM = "SYSTEM" # System credit (e.g., initial credit) + STORAGE = "STORAGE" # Metered storage overage (prepay pool) + SUBSCRIPTION = "SUBSCRIPTION" # AI budget credit from subscription plan class PeriodTypeEnum(str, Enum): @@ -48,14 +38,13 @@ class PeriodTypeEnum(str, Enum): YEAR = "YEAR" -class BillingAccount(BaseModel): +class BillingAccount(PowerOnModel): """Billing account for mandate or user-mandate combination.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" ) mandateId: str = Field(..., description="Foreign key to Mandate") - userId: Optional[str] = Field(None, description="Foreign key to User (only for PREPAY_USER)") - accountType: AccountTypeEnum = Field(..., description="Account type: MANDATE or USER") + userId: Optional[str] = Field(None, description="Foreign key to User (None = mandate pool account, set = user audit account)") balance: float = Field(default=0.0, description="Current balance in CHF") warningThreshold: float = Field(default=0.0, description="Warning threshold in CHF") lastWarningAt: Optional[datetime] = Field(None, description="Last warning sent timestamp") @@ -69,7 +58,6 @@ registerModelLabels( "id": {"en": "ID", "de": "ID"}, "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"}, "userId": {"en": "User ID", "de": "Benutzer-ID"}, - "accountType": {"en": "Account Type", "de": "Kontotyp"}, "balance": {"en": "Balance (CHF)", "de": "Guthaben (CHF)"}, "warningThreshold": {"en": "Warning Threshold (CHF)", "de": "Warnschwelle (CHF)"}, "lastWarningAt": {"en": "Last Warning", "de": "Letzte Warnung"}, @@ -78,7 +66,7 @@ registerModelLabels( ) -class BillingTransaction(BaseModel): +class BillingTransaction(PowerOnModel): """Single billing transaction (credit, debit, adjustment).""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -129,30 +117,43 @@ registerModelLabels( class BillingSettings(BaseModel): - """Billing settings per mandate.""" + """Billing settings per mandate. Only PREPAY_MANDATE model.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" ) mandateId: str = Field(..., description="Foreign key to Mandate (UNIQUE)") - billingModel: BillingModelEnum = Field(..., description="Billing model") - - # Configuration - defaultUserCredit: float = Field( - default=0.0, - description="Automatic initial credit (CHF) for PREPAY_USER only when a user is newly added to the root mandate; other mandates use 0 on join.", - ) + warningThresholdPercent: float = Field(default=10.0, description="Warning threshold as percentage") # Stripe stripeCustomerId: Optional[str] = Field(None, description="Stripe Customer ID (cus_xxx) — one per mandate") - # Notifications (e.g. mandate owner / finance — also used when PREPAY_MANDATE pool is exhausted) + # Auto-Recharge for AI budget + autoRechargeEnabled: bool = Field(default=False, description="Auto-buy AI budget when low") + rechargeAmountCHF: float = Field(default=10.0, description="Amount per auto-recharge (CHF, prepaid via Stripe)") + rechargeMaxPerMonth: int = Field(default=3, description="Max auto-recharges per month") + rechargesThisMonth: int = Field(default=0, description="Counter: auto-recharges used this month") + monthResetAt: Optional[datetime] = Field(None, description="When rechargesThisMonth was last reset") + + # Notifications notifyEmails: List[str] = Field( default_factory=list, - description="Email addresses for billing alerts (mandate pool exhausted, warnings, etc.)", + description="Email addresses for billing alerts (pool exhausted, warnings, etc.)", ) notifyOnWarning: bool = Field(default=True, description="Send email when warning threshold is reached") + # Storage overage (high-watermark within subscription period; resets on new period) + storageHighWatermarkMB: float = Field( + default=0.0, description="Peak indexed data volume MB this billing period" + ) + storagePeriodStartAt: Optional[datetime] = Field( + None, description="Subscription billing period start used for storage reset" + ) + storageBilledUpToMB: float = Field( + default=0.0, + description="Overage MB already debited this period (above plan-included volume)", + ) + registerModelLabels( "BillingSettings", @@ -160,18 +161,22 @@ registerModelLabels( { "id": {"en": "ID", "de": "ID"}, "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"}, - "billingModel": {"en": "Billing Model", "de": "Abrechnungsmodell"}, - "defaultUserCredit": { - "en": "Root start credit (CHF)", - "de": "Startguthaben nur Root-Mandant (CHF)", - }, "warningThresholdPercent": {"en": "Warning Threshold (%)", "de": "Warnschwelle (%)"}, "stripeCustomerId": {"en": "Stripe Customer ID", "de": "Stripe-Kunden-ID"}, + "autoRechargeEnabled": {"en": "Auto-Recharge", "de": "Auto-Nachladung"}, + "rechargeAmountCHF": {"en": "Recharge Amount (CHF)", "de": "Nachladebetrag (CHF)"}, + "rechargeMaxPerMonth": {"en": "Max Recharges/Month", "de": "Max. Nachladungen/Monat"}, "notifyEmails": { "en": "Billing notification emails (owner / admin)", - "de": "E-Mails für Billing-Alerts (Inhaber/Admin)", + "de": "E-Mails fuer Billing-Alerts (Inhaber/Admin)", }, "notifyOnWarning": {"en": "Notify on Warning", "de": "Bei Warnung benachrichtigen"}, + "storageHighWatermarkMB": {"en": "Storage peak (MB)", "de": "Speicher-Peak (MB)"}, + "storagePeriodStartAt": {"en": "Storage period start", "de": "Speicher-Periodenbeginn"}, + "storageBilledUpToMB": { + "en": "Storage billed overage (MB)", + "de": "Speicher abgerechneter Überhang (MB)", + }, }, ) @@ -238,7 +243,6 @@ class BillingBalanceResponse(BaseModel): """Response model for balance endpoint.""" mandateId: str mandateName: str - billingModel: BillingModelEnum balance: float currency: str = "CHF" warningThreshold: float @@ -269,20 +273,8 @@ class BillingCheckResult(BaseModel): reason: Optional[str] = None currentBalance: Optional[float] = None requiredAmount: Optional[float] = None - billingModel: Optional[BillingModelEnum] = None upgradeRequired: Optional[bool] = None subscriptionUiPath: Optional[str] = None userAction: Optional[str] = None -def parseBillingModelFromStoredValue(raw: Optional[str]) -> BillingModelEnum: - """Map DB string to enum. Legacy UNLIMITED / unknown values become PREPAY_MANDATE.""" - if raw is None or (isinstance(raw, str) and raw.strip() == ""): - return BillingModelEnum.PREPAY_MANDATE - s = str(raw).strip().upper() - if s == "UNLIMITED": - return BillingModelEnum.PREPAY_MANDATE - try: - return BillingModelEnum(raw) - except ValueError: - return BillingModelEnum.PREPAY_MANDATE diff --git a/modules/datamodels/datamodelChat.py b/modules/datamodels/datamodelChat.py index 7002187a..7154e57e 100644 --- a/modules/datamodels/datamodelChat.py +++ b/modules/datamodels/datamodelChat.py @@ -5,12 +5,13 @@ from typing import List, Dict, Any, Optional from enum import Enum from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp import uuid -class ChatLog(BaseModel): +class ChatLog(PowerOnModel): """Log entries for chat workflows. User-owned, no mandate context.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -56,7 +57,7 @@ registerModelLabels( ) -class ChatDocument(BaseModel): +class ChatDocument(PowerOnModel): """Documents attached to chat messages. User-owned, no mandate context.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -163,7 +164,7 @@ registerModelLabels( ) -class ChatMessage(BaseModel): +class ChatMessage(PowerOnModel): """Messages in chat workflows. User-owned, no mandate context.""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key" @@ -260,7 +261,7 @@ registerModelLabels( ) -class ChatWorkflow(BaseModel): +class ChatWorkflow(PowerOnModel): """Chat workflow container. User-owned, no mandate context.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) featureInstanceId: Optional[str] = Field(None, description="Feature instance ID for multi-tenancy isolation", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) diff --git a/modules/datamodels/datamodelDataSource.py b/modules/datamodels/datamodelDataSource.py index f8238fab..1d432041 100644 --- a/modules/datamodels/datamodelDataSource.py +++ b/modules/datamodels/datamodelDataSource.py @@ -8,16 +8,18 @@ Google Drive folder, FTP directory, etc.) for agent-accessible data containers. from typing import Dict, Any, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class DataSource(BaseModel): +class DataSource(PowerOnModel): """Configured external data source linked to a UserConnection.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") connectionId: str = Field(description="FK to UserConnection") - sourceType: str = Field(description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder") + sourceType: str = Field( + description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder, clickupList (path under /team/...)" + ) path: str = Field(description="External path (e.g. '/sites/MySite/Documents/Reports')") label: str = Field(description="User-visible label (often the last path segment)") displayPath: Optional[str] = Field( @@ -29,7 +31,21 @@ class DataSource(BaseModel): userId: str = Field(default="", description="Owner user ID") autoSync: bool = Field(default=False, description="Automatically sync on schedule") lastSynced: Optional[float] = Field(default=None, description="Last sync timestamp") - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]} + ) + neutralize: bool = Field( + default=False, + description="Whether this data source should be neutralized before AI processing", + json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} + ) registerModelLabels( @@ -47,7 +63,8 @@ registerModelLabels( "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"}, "autoSync": {"en": "Auto Sync", "de": "Auto-Sync", "fr": "Synchro auto"}, "lastSynced": {"en": "Last Synced", "de": "Letzter Sync", "fr": "Dernier sync"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, + "scope": {"en": "Scope", "de": "Sichtbarkeit"}, + "neutralize": {"en": "Neutralize", "de": "Neutralisieren"}, }, ) diff --git a/modules/datamodels/datamodelFeatureDataSource.py b/modules/datamodels/datamodelFeatureDataSource.py index 89b8b372..02de0a67 100644 --- a/modules/datamodels/datamodelFeatureDataSource.py +++ b/modules/datamodels/datamodelFeatureDataSource.py @@ -6,14 +6,14 @@ A FeatureDataSource links a FeatureInstance table (DATA_OBJECT) to a workspace so the agent can query structured feature data (e.g. TrusteePosition rows). """ -from typing import Optional +from typing import Dict, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class FeatureDataSource(BaseModel): +class FeatureDataSource(PowerOnModel): """A feature-instance table attached as data source in the AI workspace.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") featureInstanceId: str = Field(description="FK to FeatureInstance") @@ -24,7 +24,25 @@ class FeatureDataSource(BaseModel): mandateId: str = Field(default="", description="Mandate scope") userId: str = Field(default="", description="Owner user ID") workspaceInstanceId: str = Field(description="Workspace instance where this source is used") - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]} + ) + neutralize: bool = Field( + default=False, + description="Whether this data source should be neutralized before AI processing", + json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} + ) + recordFilter: Optional[Dict[str, str]] = Field( + default=None, + description="Record-level filter applied when querying this table, e.g. {'sessionId': 'abc-123'}", + ) registerModelLabels( @@ -40,6 +58,5 @@ registerModelLabels( "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"}, "userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"}, "workspaceInstanceId": {"en": "Workspace", "de": "Workspace", "fr": "Espace de travail"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, }, ) diff --git a/modules/datamodels/datamodelFeatures.py b/modules/datamodels/datamodelFeatures.py index 0a5dc441..3134a18e 100644 --- a/modules/datamodels/datamodelFeatures.py +++ b/modules/datamodels/datamodelFeatures.py @@ -5,11 +5,12 @@ import uuid from typing import Optional, Dict, Any from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.datamodels.datamodelUtils import TextMultilingual -class Feature(BaseModel): +class Feature(PowerOnModel): """ Feature-Definition (global, z.B. 'trustee', 'chatbot'). Features sind die verfügbaren Funktionalitäten der Plattform. @@ -40,7 +41,7 @@ registerModelLabels( ) -class FeatureInstance(BaseModel): +class FeatureInstance(PowerOnModel): """ Instanz eines Features in einem Mandanten. Ein Mandant kann mehrere Instanzen desselben Features haben. diff --git a/modules/datamodels/datamodelFileFolder.py b/modules/datamodels/datamodelFileFolder.py index b7a19915..23cd197b 100644 --- a/modules/datamodels/datamodelFileFolder.py +++ b/modules/datamodels/datamodelFileFolder.py @@ -4,18 +4,17 @@ from typing import Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid -class FileFolder(BaseModel): +class FileFolder(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) name: str = Field(description="Folder name", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}) parentId: Optional[str] = Field(default=None, description="Parent folder ID (null = root)", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) mandateId: Optional[str] = Field(default=None, description="Mandate context", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) featureInstanceId: Optional[str] = Field(default=None, description="Feature instance context", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}) registerModelLabels( @@ -27,6 +26,5 @@ registerModelLabels( "parentId": {"en": "Parent Folder", "fr": "Dossier parent"}, "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"}, "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"}, - "createdAt": {"en": "Created At", "fr": "Créé le"}, }, ) diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py index afaad996..b8a44d2c 100644 --- a/modules/datamodels/datamodelFiles.py +++ b/modules/datamodels/datamodelFiles.py @@ -3,15 +3,14 @@ """File-related datamodels: FileItem, FilePreview, FileData.""" from typing import Dict, Any, List, Optional, Union -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp import uuid import base64 -class FileItem(BaseModel): - model_config = ConfigDict(extra='allow') # Preserve system fields (_createdBy, _createdAt, etc.) +class FileItem(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) mandateId: Optional[str] = Field(default="", description="ID of the mandate this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) featureInstanceId: Optional[str] = Field(default="", description="ID of the feature instance this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "frontend_fk_source": "/api/features/instances", "frontend_fk_display_field": "label"}) @@ -19,11 +18,25 @@ class FileItem(BaseModel): mimeType: str = Field(description="MIME type of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) fileHash: str = Field(description="Hash of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) fileSize: int = Field(description="Size of the file in bytes", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False}) - creationDate: float = Field(default_factory=getUtcTimestamp, description="Date when the file was created (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}) tags: Optional[List[str]] = Field(default=None, description="Tags for categorization and search", json_schema_extra={"frontend_type": "tags", "frontend_readonly": False, "frontend_required": False}) folderId: Optional[str] = Field(default=None, description="ID of the parent folder", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}) description: Optional[str] = Field(default=None, description="User-provided description of the file", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False}) status: Optional[str] = Field(default=None, description="Processing status: pending, extracted, embedding, indexed, failed", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [ + {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}}, + {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}}, + {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}}, + {"value": "global", "label": {"en": "Global", "de": "Global"}}, + ]} + ) + neutralize: bool = Field( + default=False, + description="Whether this file should be neutralized before AI processing", + json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} + ) registerModelLabels( "FileItem", @@ -36,11 +49,12 @@ registerModelLabels( "mimeType": {"en": "MIME Type", "fr": "Type MIME"}, "fileHash": {"en": "File Hash", "fr": "Hash du fichier"}, "fileSize": {"en": "File Size", "fr": "Taille du fichier"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, "tags": {"en": "Tags", "fr": "Tags"}, "folderId": {"en": "Folder ID", "fr": "ID du dossier"}, "description": {"en": "Description", "fr": "Description"}, "status": {"en": "Status", "fr": "Statut"}, + "scope": {"en": "Scope", "de": "Sichtbarkeit"}, + "neutralize": {"en": "Neutralize", "de": "Neutralisieren"}, }, ) @@ -71,7 +85,7 @@ registerModelLabels( }, ) -class FileData(BaseModel): +class FileData(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") data: str = Field(description="File data content") base64Encoded: bool = Field(description="Whether the data is base64 encoded") diff --git a/modules/datamodels/datamodelInvitation.py b/modules/datamodels/datamodelInvitation.py index 472318af..709e5021 100644 --- a/modules/datamodels/datamodelInvitation.py +++ b/modules/datamodels/datamodelInvitation.py @@ -9,11 +9,11 @@ import uuid import secrets from typing import Optional, List from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp -class Invitation(BaseModel): +class Invitation(PowerOnModel): """ Einladungs-Token für neue User. Ermöglicht Self-Service Onboarding zu Mandanten und Feature-Instanzen. @@ -56,15 +56,6 @@ class Invitation(BaseModel): description="Email address to send invitation link (optional)", json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": False} ) - createdBy: str = Field( - description="User ID of the person who created the invitation", - json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True} - ) - createdAt: float = Field( - default_factory=getUtcTimestamp, - description="When the invitation was created (UTC timestamp)", - json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False} - ) expiresAt: float = Field( description="When the invitation expires (UTC timestamp)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True} @@ -121,8 +112,6 @@ registerModelLabels( "roleIds": {"en": "Roles", "de": "Rollen", "fr": "Rôles"}, "targetUsername": {"en": "Target Username", "de": "Ziel-Benutzername", "fr": "Nom d'utilisateur cible"}, "email": {"en": "Email (optional)", "de": "E-Mail (optional)", "fr": "Email (optionnel)"}, - "createdBy": {"en": "Created By", "de": "Erstellt von", "fr": "Créé par"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "expiresAt": {"en": "Expires At", "de": "Gültig bis", "fr": "Expire le"}, "usedBy": {"en": "Used By", "de": "Verwendet von", "fr": "Utilisé par"}, "usedAt": {"en": "Used At", "de": "Verwendet am", "fr": "Utilisé le"}, diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py index d03e9d5a..7ac12c15 100644 --- a/modules/datamodels/datamodelKnowledge.py +++ b/modules/datamodels/datamodelKnowledge.py @@ -3,8 +3,10 @@ """Knowledge Store data models: FileContentIndex, ContentChunk, WorkflowMemory. These models support the 3-tier RAG architecture: -- Shared Layer: mandateId-scoped, isShared=True -- Instance Layer: userId + featureInstanceId-scoped +- Personal Layer: scope=personal, userId-scoped +- Instance Layer: scope=featureInstance, featureInstanceId-scoped +- Mandate Layer: scope=mandate, mandateId-scoped (visible to all mandate users) +- Global Layer: scope=global (sysAdmin only) - Workflow Layer: workflowId-scoped (WorkflowMemory) Vector fields use json_schema_extra={"db_type": "vector(1536)"} for pgvector. @@ -12,19 +14,19 @@ Vector fields use json_schema_extra={"db_type": "vector(1536)"} for pgvector. from typing import Dict, Any, List, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp import uuid -class FileContentIndex(BaseModel): +class FileContentIndex(PowerOnModel): """Structural index of a file's content objects. Created without AI. - Lives in the Instance Layer; optionally promoted to Shared Layer via isShared.""" + Scope is mirrored from FileItem (poweron_management) at indexing time.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key (typically = fileId)") userId: str = Field(description="Owner user ID") featureInstanceId: str = Field(default="", description="Feature instance scope") mandateId: str = Field(default="", description="Mandate scope") - isShared: bool = Field(default=False, description="Visible in Shared Layer for all mandate users") fileName: str = Field(description="Original file name") mimeType: str = Field(description="MIME type of the file") containerPath: Optional[str] = Field(default=None, description="Path within a container (e.g. 'archive.zip/folder/report.pdf')") @@ -34,6 +36,18 @@ class FileContentIndex(BaseModel): objectSummary: List[Dict[str, Any]] = Field(default_factory=list, description="Compact summary per content object") extractedAt: float = Field(default_factory=getUtcTimestamp, description="Extraction timestamp") status: str = Field(default="pending", description="Processing status: pending, extracted, embedding, indexed, failed") + scope: str = Field( + default="personal", + description="Data visibility scope: personal, featureInstance, mandate, global", + ) + neutralizationStatus: Optional[str] = Field( + default=None, + description="Neutralization status: completed, failed, skipped, None = not required", + ) + isNeutralized: bool = Field( + default=False, + description="True if content was neutralized before indexing", + ) registerModelLabels( @@ -44,7 +58,6 @@ registerModelLabels( "userId": {"en": "User ID", "fr": "ID utilisateur"}, "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"}, "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"}, - "isShared": {"en": "Shared", "fr": "Partagé"}, "fileName": {"en": "File Name", "fr": "Nom de fichier"}, "mimeType": {"en": "MIME Type", "fr": "Type MIME"}, "containerPath": {"en": "Container Path", "fr": "Chemin du conteneur"}, @@ -54,11 +67,14 @@ registerModelLabels( "objectSummary": {"en": "Object Summary", "fr": "Résumé des objets"}, "extractedAt": {"en": "Extracted At", "fr": "Extrait le"}, "status": {"en": "Status", "fr": "Statut"}, + "scope": {"en": "Scope", "de": "Sichtbarkeit"}, + "neutralizationStatus": {"en": "Neutralization Status", "de": "Neutralisierungsstatus"}, + "isNeutralized": {"en": "Is Neutralized", "de": "Neutralisiert"}, }, ) -class ContentChunk(BaseModel): +class ContentChunk(PowerOnModel): """Persisted content chunk with embedding vector. Reusable across workflows. Scalar content object (or chunk thereof) with pgvector embedding.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") @@ -96,7 +112,7 @@ registerModelLabels( ) -class RoundMemory(BaseModel): +class RoundMemory(PowerOnModel): """Persistent per-round memory for agent tool results, file refs, and decisions. Stored after each agent round so that RAG can retrieve relevant context @@ -120,7 +136,6 @@ class RoundMemory(BaseModel): description="Embedding of summary for semantic retrieval", json_schema_extra={"db_type": "vector(1536)"}, ) - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") registerModelLabels( @@ -136,12 +151,11 @@ registerModelLabels( "fullData": {"en": "Full Data", "fr": "Données complètes"}, "fileIds": {"en": "File IDs", "fr": "IDs de fichier"}, "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"}, - "createdAt": {"en": "Created At", "fr": "Créé le"}, }, ) -class WorkflowMemory(BaseModel): +class WorkflowMemory(PowerOnModel): """Workflow-scoped key-value cache for entities and facts. Extracted during agent rounds, persisted for cross-round and cross-workflow reuse.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") @@ -151,7 +165,6 @@ class WorkflowMemory(BaseModel): key: str = Field(description="Key identifier (e.g. 'entity:companyName')") value: str = Field(description="Extracted value") source: str = Field(default="extraction", description="Origin: extraction, tool, conversation, summary") - createdAt: float = Field(default_factory=getUtcTimestamp, description="Creation timestamp") embedding: Optional[List[float]] = Field( default=None, description="Optional embedding for semantic lookup", json_schema_extra={"db_type": "vector(1536)"} @@ -169,7 +182,6 @@ registerModelLabels( "key": {"en": "Key", "fr": "Clé"}, "value": {"en": "Value", "fr": "Valeur"}, "source": {"en": "Source", "fr": "Source"}, - "createdAt": {"en": "Created At", "fr": "Créé le"}, "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"}, }, ) diff --git a/modules/datamodels/datamodelMembership.py b/modules/datamodels/datamodelMembership.py index 5e8b8814..ce753d15 100644 --- a/modules/datamodels/datamodelMembership.py +++ b/modules/datamodels/datamodelMembership.py @@ -9,10 +9,11 @@ Rollen werden über Junction Tables verknüpft für saubere CASCADE DELETE. import uuid from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -class UserMandate(BaseModel): +class UserMandate(PowerOnModel): """ User-Mitgliedschaft in einem Mandanten. Kein User gehört direkt zu einem Mandanten - Zugehörigkeit wird über dieses Model gesteuert. @@ -50,7 +51,7 @@ registerModelLabels( ) -class FeatureAccess(BaseModel): +class FeatureAccess(PowerOnModel): """ User-Zugriff auf eine Feature-Instanz. Definiert welche User auf welche Feature-Instanzen zugreifen können. @@ -88,7 +89,7 @@ registerModelLabels( ) -class UserMandateRole(BaseModel): +class UserMandateRole(PowerOnModel): """ Junction Table: UserMandate zu Role. Ermöglicht CASCADE DELETE auf Datenbankebene. @@ -119,7 +120,7 @@ registerModelLabels( ) -class FeatureAccessRole(BaseModel): +class FeatureAccessRole(PowerOnModel): """ Junction Table: FeatureAccess zu Role. Ermöglicht CASCADE DELETE auf Datenbankebene. diff --git a/modules/datamodels/datamodelMessaging.py b/modules/datamodels/datamodelMessaging.py index 1c2206b7..ebacc9d4 100644 --- a/modules/datamodels/datamodelMessaging.py +++ b/modules/datamodels/datamodelMessaging.py @@ -6,8 +6,8 @@ import uuid from typing import Optional from enum import Enum from pydantic import BaseModel, Field, ConfigDict +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp class MessagingChannel(str, Enum): @@ -26,7 +26,7 @@ class DeliveryStatus(str, Enum): FAILED = "failed" -class MessagingSubscription(BaseModel): +class MessagingSubscription(PowerOnModel): """Data model for messaging subscriptions""" id: str = Field( default_factory=lambda: str(uuid.uuid4()), @@ -64,26 +64,6 @@ class MessagingSubscription(BaseModel): description="Whether the subscription is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} ) - creationDate: float = Field( - default_factory=getUtcTimestamp, - description="When the subscription was created (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) - lastModified: float = Field( - default_factory=getUtcTimestamp, - description="When the subscription was last modified (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) - createdBy: Optional[str] = Field( - default=None, - description="User ID who created the subscription", - json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False} - ) - modifiedBy: Optional[str] = Field( - default=None, - description="User ID who last modified the subscription", - json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False} - ) model_config = ConfigDict(use_enum_values=True) @@ -100,10 +80,6 @@ registerModelLabels( "description": {"en": "Description", "fr": "Description"}, "isSystemSubscription": {"en": "System Subscription", "fr": "Abonnement système"}, "enabled": {"en": "Enabled", "fr": "Activé"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, - "lastModified": {"en": "Last Modified", "fr": "Dernière modification"}, - "createdBy": {"en": "Created By", "fr": "Créé par"}, - "modifiedBy": {"en": "Modified By", "fr": "Modifié par"}, }, ) @@ -155,16 +131,6 @@ class MessagingSubscriptionRegistration(BaseModel): description="Whether this registration is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False} ) - creationDate: float = Field( - default_factory=getUtcTimestamp, - description="When the registration was created (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) - lastModified: float = Field( - default_factory=getUtcTimestamp, - description="When the registration was last modified (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) model_config = ConfigDict(use_enum_values=True) @@ -181,8 +147,6 @@ registerModelLabels( "channel": {"en": "Channel", "fr": "Canal"}, "channelConfig": {"en": "Channel Config", "fr": "Configuration du canal"}, "enabled": {"en": "Enabled", "fr": "Activé"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, - "lastModified": {"en": "Last Modified", "fr": "Dernière modification"}, }, ) @@ -248,11 +212,6 @@ class MessagingDelivery(BaseModel): description="When the delivery was sent (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} ) - creationDate: float = Field( - default_factory=getUtcTimestamp, - description="When the delivery record was created (UTC timestamp in seconds)", - json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False} - ) model_config = ConfigDict(use_enum_values=True) @@ -270,7 +229,6 @@ registerModelLabels( "status": {"en": "Status", "fr": "Statut"}, "errorMessage": {"en": "Error Message", "fr": "Message d'erreur"}, "sentAt": {"en": "Sent At", "fr": "Envoyé le"}, - "creationDate": {"en": "Creation Date", "fr": "Date de création"}, }, ) @@ -349,4 +307,3 @@ class MessagingSubscriptionExecutionResult(BaseModel): description="Error message if execution failed", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False} ) - model_config = ConfigDict(extra="allow") # Allow additional fields for custom results diff --git a/modules/datamodels/datamodelNotification.py b/modules/datamodels/datamodelNotification.py index b1475767..f5af0f55 100644 --- a/modules/datamodels/datamodelNotification.py +++ b/modules/datamodels/datamodelNotification.py @@ -9,8 +9,8 @@ import uuid from typing import Optional, List from enum import Enum from pydantic import BaseModel, Field, ConfigDict +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels -from modules.shared.timeUtils import getUtcTimestamp class NotificationType(str, Enum): @@ -43,7 +43,7 @@ class NotificationAction(BaseModel): ) -class UserNotification(BaseModel): +class UserNotification(PowerOnModel): """ In-app notification for a user. Supports actionable notifications with accept/decline buttons. @@ -137,11 +137,6 @@ class UserNotification(BaseModel): ) # Timestamps - createdAt: float = Field( - default_factory=getUtcTimestamp, - description="When the notification was created (UTC timestamp)", - json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False} - ) readAt: Optional[float] = Field( default=None, description="When the notification was read (UTC timestamp)", @@ -177,7 +172,6 @@ registerModelLabels( "actions": {"en": "Actions", "de": "Aktionen", "fr": "Actions"}, "actionTaken": {"en": "Action Taken", "de": "Durchgeführte Aktion", "fr": "Action effectuée"}, "actionResult": {"en": "Action Result", "de": "Aktions-Ergebnis", "fr": "Résultat de l'action"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "readAt": {"en": "Read At", "de": "Gelesen am", "fr": "Lu le"}, "actionedAt": {"en": "Actioned At", "de": "Bearbeitet am", "fr": "Traité le"}, "expiresAt": {"en": "Expires At", "de": "Gültig bis", "fr": "Expire le"}, diff --git a/modules/datamodels/datamodelRbac.py b/modules/datamodels/datamodelRbac.py index 978c3be6..b9e0cb91 100644 --- a/modules/datamodels/datamodelRbac.py +++ b/modules/datamodels/datamodelRbac.py @@ -13,6 +13,7 @@ import uuid from typing import Optional from enum import Enum from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.datamodels.datamodelUtils import TextMultilingual from modules.datamodels.datamodelUam import AccessLevel @@ -25,7 +26,7 @@ class AccessRuleContext(str, Enum): RESOURCE = "RESOURCE" # System resources (AI models, actions, etc.) -class Role(BaseModel): +class Role(PowerOnModel): """ Data model for RBAC roles. @@ -90,7 +91,7 @@ registerModelLabels( ) -class AccessRule(BaseModel): +class AccessRule(PowerOnModel): """ Data model for access control rules. diff --git a/modules/datamodels/datamodelSecurity.py b/modules/datamodels/datamodelSecurity.py index 5caafe1b..dc8c26e6 100644 --- a/modules/datamodels/datamodelSecurity.py +++ b/modules/datamodels/datamodelSecurity.py @@ -11,6 +11,7 @@ Multi-Tenant Design: from typing import Optional, Any from pydantic import BaseModel, Field, ConfigDict, model_validator +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp from .datamodelUam import AuthAuthority @@ -30,7 +31,7 @@ class TokenPurpose(str, Enum): DATA_CONNECTION = "dataConnection" -class Token(BaseModel): +class Token(PowerOnModel): """ Authentication Token model. @@ -55,9 +56,6 @@ class Token(BaseModel): description="When the token expires (UTC timestamp in seconds)" ) tokenRefresh: Optional[str] = None - createdAt: Optional[float] = Field( - None, description="When the token was created (UTC timestamp in seconds)" - ) status: TokenStatus = Field( default=TokenStatus.ACTIVE, description="Token status: active/revoked" ) @@ -106,7 +104,6 @@ registerModelLabels( "tokenType": {"en": "Token Type", "de": "Token-Typ", "fr": "Type de jeton"}, "expiresAt": {"en": "Expires At", "de": "Läuft ab am", "fr": "Expire le"}, "tokenRefresh": {"en": "Refresh Token", "de": "Refresh-Token", "fr": "Jeton de rafraîchissement"}, - "createdAt": {"en": "Created At", "de": "Erstellt am", "fr": "Créé le"}, "status": {"en": "Status", "de": "Status", "fr": "Statut"}, "revokedAt": {"en": "Revoked At", "de": "Widerrufen am", "fr": "Révoqué le"}, "revokedBy": {"en": "Revoked By", "de": "Widerrufen von", "fr": "Révoqué par"}, @@ -116,7 +113,7 @@ registerModelLabels( ) -class AuthEvent(BaseModel): +class AuthEvent(PowerOnModel): """Authentication event for audit logging.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the auth event", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) userId: str = Field(description="ID of the user this event belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}) diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py index 1c1435d8..1791e7a9 100644 --- a/modules/datamodels/datamodelSubscription.py +++ b/modules/datamodels/datamodelSubscription.py @@ -10,6 +10,7 @@ from typing import Dict, List, Optional from enum import Enum from datetime import datetime, timezone from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid @@ -30,6 +31,7 @@ OPERATIVE_STATUSES = {SubscriptionStatusEnum.ACTIVE, SubscriptionStatusEnum.TRIA ALLOWED_TRANSITIONS = { (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.ACTIVE), + (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.TRIALING), (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.SCHEDULED), (SubscriptionStatusEnum.PENDING, SubscriptionStatusEnum.EXPIRED), (SubscriptionStatusEnum.SCHEDULED, SubscriptionStatusEnum.ACTIVE), @@ -70,6 +72,8 @@ class SubscriptionPlan(BaseModel): maxUsers: Optional[int] = Field(None, description="Hard cap on active users (None = unlimited)") maxFeatureInstances: Optional[int] = Field(None, description="Hard cap on active feature instances (None = unlimited)") trialDays: Optional[int] = Field(None, description="Trial duration in days (only for trial plans)") + maxDataVolumeMB: Optional[int] = Field(None, description="Soft-limit for data volume in MB per mandate (None = unlimited)") + budgetAiCHF: float = Field(default=0.0, description="AI budget (CHF) included in subscription price per billing period") successorPlanKey: Optional[str] = Field(None, description="Plan to transition to when trial ends") @@ -84,6 +88,8 @@ registerModelLabels( "pricePerFeatureInstanceCHF": {"en": "Price per Instance (CHF)", "de": "Preis pro Instanz (CHF)"}, "maxUsers": {"en": "Max Users", "de": "Max. Benutzer", "fr": "Max. utilisateurs"}, "maxFeatureInstances": {"en": "Max Instances", "de": "Max. Instanzen", "fr": "Max. instances"}, + "maxDataVolumeMB": {"en": "Data Volume (MB)", "de": "Datenvolumen (MB)"}, + "budgetAiCHF": {"en": "AI Budget (CHF)", "de": "AI-Budget (CHF)"}, }, ) @@ -122,7 +128,7 @@ registerModelLabels( # Instance: MandateSubscription # ============================================================================ -class MandateSubscription(BaseModel): +class MandateSubscription(PowerOnModel): """A subscription instance bound to a specific mandate. See wiki/concepts/Subscription-State-Machine.md for state transitions.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") @@ -182,20 +188,24 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { autoRenew=False, maxUsers=None, maxFeatureInstances=None, + maxDataVolumeMB=None, + budgetAiCHF=0.0, ), "TRIAL_7D": SubscriptionPlan( planKey="TRIAL_7D", selectableByUser=False, title={"en": "Free Trial (7 days)", "de": "Gratis-Testphase (7 Tage)", "fr": "Essai gratuit (7 jours)"}, description={ - "en": "Try the platform for 7 days — 1 user, up to 3 feature instances.", - "de": "Plattform 7 Tage testen — 1 User, bis zu 3 Feature-Instanzen.", + "en": "Try the platform for 7 days — 1 user, up to 3 feature instances, 5 CHF AI budget included.", + "de": "Plattform 7 Tage testen — 1 User, bis zu 3 Feature-Instanzen, 5 CHF AI-Budget inklusive.", }, billingPeriod=BillingPeriodEnum.NONE, autoRenew=False, maxUsers=1, maxFeatureInstances=3, trialDays=7, + maxDataVolumeMB=500, + budgetAiCHF=5.0, successorPlanKey="STANDARD_MONTHLY", ), "STANDARD_MONTHLY": SubscriptionPlan( @@ -203,24 +213,28 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = { selectableByUser=True, title={"en": "Standard (Monthly)", "de": "Standard (Monatlich)", "fr": "Standard (Mensuel)"}, description={ - "en": "Usage-based billing per active user and feature instance, billed monthly.", - "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich.", + "en": "Usage-based billing per active user and feature instance, billed monthly. Includes 10 CHF AI budget.", + "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich. Inkl. 10 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.MONTHLY, - pricePerUserCHF=90.0, - pricePerFeatureInstanceCHF=150.0, + pricePerUserCHF=79.0, + pricePerFeatureInstanceCHF=119.0, + maxDataVolumeMB=1024, + budgetAiCHF=10.0, ), "STANDARD_YEARLY": SubscriptionPlan( planKey="STANDARD_YEARLY", selectableByUser=True, title={"en": "Standard (Yearly)", "de": "Standard (Jährlich)", "fr": "Standard (Annuel)"}, description={ - "en": "Usage-based billing per active user and feature instance, billed yearly.", - "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich.", + "en": "Usage-based billing per active user and feature instance, billed yearly. Includes 120 CHF AI budget.", + "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich. Inkl. 120 CHF AI-Budget.", }, billingPeriod=BillingPeriodEnum.YEARLY, - pricePerUserCHF=1080.0, - pricePerFeatureInstanceCHF=1800.0, + pricePerUserCHF=948.0, + pricePerFeatureInstanceCHF=1428.0, + maxDataVolumeMB=1024, + budgetAiCHF=120.0, ), } diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py index 22d94ebe..35e9ec7c 100644 --- a/modules/datamodels/datamodelUam.py +++ b/modules/datamodels/datamodelUam.py @@ -10,9 +10,10 @@ Multi-Tenant Design: """ import uuid -from typing import Optional, List +from typing import Optional, List, Dict, Any from enum import Enum from pydantic import BaseModel, Field, EmailStr, field_validator, computed_field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.shared.timeUtils import getUtcTimestamp @@ -21,6 +22,7 @@ class AuthAuthority(str, Enum): LOCAL = "local" GOOGLE = "google" MSFT = "msft" + CLICKUP = "clickup" class ConnectionStatus(str, Enum): ACTIVE = "active" @@ -59,7 +61,7 @@ class UserPermissions(BaseModel): ) -class Mandate(BaseModel): +class Mandate(PowerOnModel): """ Mandate (Mandant/Tenant) model. Ein Mandant ist ein isolierter Bereich für Daten und Berechtigungen. @@ -88,6 +90,11 @@ class Mandate(BaseModel): description="Whether this is a system mandate (e.g. root mandate). Cannot be deleted.", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False} ) + deletedAt: Optional[float] = Field( + default=None, + description="Timestamp when the mandate was soft-deleted. After 30 days, hard-delete is triggered.", + json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False} + ) @field_validator('isSystem', mode='before') @classmethod @@ -97,7 +104,6 @@ class Mandate(BaseModel): return False return v - registerModelLabels( "Mandate", {"en": "Mandate", "de": "Mandant", "fr": "Mandat"}, @@ -107,11 +113,12 @@ registerModelLabels( "label": {"en": "Label", "de": "Label", "fr": "Libellé"}, "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"}, "isSystem": {"en": "System Mandate", "de": "System-Mandant", "fr": "Mandat système"}, + "deletedAt": {"en": "Deleted at", "de": "Gelöscht am", "fr": "Supprimé le"}, }, ) -class UserConnection(BaseModel): +class UserConnection(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the connection", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) userId: str = Field(description="ID of the user this connection belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) authority: AuthAuthority = Field(description="Authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": "/api/connections/authorities/options"}) @@ -141,7 +148,12 @@ class UserConnection(BaseModel): @property def displayLabel(self) -> str: """Human-readable label for display in dropdowns""" - authorityLabels = {"msft": "Microsoft", "google": "Google", "local": "Local"} + authorityLabels = { + "msft": "Microsoft", + "google": "Google", + "local": "Local", + "clickup": "ClickUp", + } return f"{authorityLabels.get(self.authority.value, self.authority.value)}: {self.externalUsername}" @@ -168,7 +180,7 @@ registerModelLabels( ) -class User(BaseModel): +class User(PowerOnModel): """ User model. @@ -255,6 +267,11 @@ class User(BaseModel): description="Primary authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": "/api/connections/authorities/options"} ) + roleLabels: List[str] = Field( + default_factory=list, + description="Role labels (from DB or enriched when loading users)", + json_schema_extra={"frontend_type": "multiselect", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}, + ) registerModelLabels( @@ -269,6 +286,7 @@ registerModelLabels( "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"}, "isSysAdmin": {"en": "System Admin", "de": "System-Admin", "fr": "Admin système"}, "authenticationAuthority": {"en": "Auth Authority", "de": "Authentifizierung", "fr": "Autorité d'authentification"}, + "roleLabels": {"en": "Role Labels", "de": "Rollen-Labels", "fr": "Libellés de rôles"}, }, ) @@ -289,3 +307,65 @@ registerModelLabels( "resetTokenExpires": {"en": "Reset Token Expires", "de": "Token läuft ab", "fr": "Expiration du jeton"}, }, ) + + +def _normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]: + """ + Coerce ttsVoiceMap payloads to Dict[str, str]. + + UI/clients may send per-locale objects like {"voiceName": "de-DE-Chirp3-HD-Achird"}; + storage and model field type are locale -> voice id string. + """ + if value is None: + return None + if not isinstance(value, dict): + return None + out: Dict[str, str] = {} + for rawKey, rawVal in value.items(): + key = str(rawKey) + if rawVal is None: + continue + if isinstance(rawVal, str): + out[key] = rawVal + elif isinstance(rawVal, dict): + vn = rawVal.get("voiceName") + if vn is not None and str(vn).strip() != "": + out[key] = str(vn).strip() + else: + out[key] = str(rawVal) + return out if out else None + + +class UserVoicePreferences(PowerOnModel): + """User-level voice/language preferences, shared across all features.""" + id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") + userId: str = Field(description="User ID") + mandateId: Optional[str] = Field(default=None, description="Mandate scope (None = global for user)") + sttLanguage: str = Field(default="de-DE", description="Speech-to-text language code") + ttsLanguage: str = Field(default="de-DE", description="Text-to-speech language code") + ttsVoice: Optional[str] = Field(default=None, description="Preferred TTS voice identifier") + ttsVoiceMap: Optional[Dict[str, str]] = Field(default=None, description="Language-to-voice mapping") + translationSourceLanguage: Optional[str] = Field(default=None, description="Source language for translations") + translationTargetLanguage: Optional[str] = Field(default=None, description="Target language for translations") + + @field_validator("ttsVoiceMap", mode="before") + @classmethod + def _validateTtsVoiceMap(cls, value: Any) -> Optional[Dict[str, str]]: + return _normalizeTtsVoiceMap(value) + + +registerModelLabels( + "UserVoicePreferences", + {"en": "Voice Preferences", "de": "Spracheinstellungen", "fr": "Préférences vocales"}, + { + "id": {"en": "ID", "de": "ID", "fr": "ID"}, + "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"}, + "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"}, + "sttLanguage": {"en": "STT Language", "de": "STT-Sprache", "fr": "Langue STT"}, + "ttsLanguage": {"en": "TTS Language", "de": "TTS-Sprache", "fr": "Langue TTS"}, + "ttsVoice": {"en": "TTS Voice", "de": "TTS-Stimme", "fr": "Voix TTS"}, + "ttsVoiceMap": {"en": "Voice Map", "de": "Stimmen-Zuordnung", "fr": "Carte des voix"}, + "translationSourceLanguage": {"en": "Translation Source", "de": "Übersetzung Quelle", "fr": "Langue source"}, + "translationTargetLanguage": {"en": "Translation Target", "de": "Übersetzung Ziel", "fr": "Langue cible"}, + }, +) diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py index 614d6592..1088cb31 100644 --- a/modules/datamodels/datamodelUtils.py +++ b/modules/datamodels/datamodelUtils.py @@ -3,13 +3,13 @@ """Utility datamodels: Prompt, TextMultilingual.""" from typing import Dict, Optional -from pydantic import BaseModel, ConfigDict, Field, field_validator +from pydantic import BaseModel, Field, field_validator +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid -class Prompt(BaseModel): - model_config = ConfigDict(extra='allow') # Preserve system fields (_createdBy, _createdAt, etc.) +class Prompt(PowerOnModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) mandateId: str = Field(default="", description="ID of the mandate this prompt belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}) isSystem: bool = Field(default=False, description="System prompt visible to all users (read-only for non-SysAdmin)", json_schema_extra={"frontend_type": "boolean", "frontend_readonly": True, "frontend_required": False}) diff --git a/modules/datamodels/datamodelVoice.py b/modules/datamodels/datamodelVoice.py index 565c7677..c3a622ac 100644 --- a/modules/datamodels/datamodelVoice.py +++ b/modules/datamodels/datamodelVoice.py @@ -1,7 +1,7 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. -"""Voice settings datamodel — re-exported from workspace feature for backward compatibility.""" +"""Voice settings datamodel — re-exported from UAM for central voice preferences.""" -from modules.features.workspace.datamodelFeatureWorkspace import VoiceSettings +from modules.datamodels.datamodelUam import UserVoicePreferences -__all__ = ["VoiceSettings"] +__all__ = ["UserVoicePreferences"] diff --git a/modules/features/automation/datamodelFeatureAutomation.py b/modules/features/automation/datamodelFeatureAutomation.py index 732f3163..8ea4a300 100644 --- a/modules/features/automation/datamodelFeatureAutomation.py +++ b/modules/features/automation/datamodelFeatureAutomation.py @@ -4,6 +4,7 @@ from typing import List, Dict, Any, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels from modules.datamodels.datamodelUtils import TextMultilingual import uuid @@ -48,7 +49,7 @@ registerModelLabels( ) -class AutomationTemplate(BaseModel): +class AutomationTemplate(PowerOnModel): """Automation-Vorlage ohne scharfe Placeholder-Werte (DB-persistiert). System-Templates (isSystem=True): Nur durch SysAdmin aenderbar. Alle User koennen lesen. @@ -82,9 +83,6 @@ class AutomationTemplate(BaseModel): description="Feature instance ID (null for system templates, set for instance-scoped templates)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False} ) - # System fields (_createdAt, _createdBy, etc.) werden automatisch vom DB-Connector gesetzt - - registerModelLabels( "AutomationTemplate", {"en": "Automation Template", "ge": "Automation-Vorlage", "fr": "Modèle d'automatisation"}, diff --git a/modules/features/automation/interfaceFeatureAutomation.py b/modules/features/automation/interfaceFeatureAutomation.py index 4091bc28..a4f90a51 100644 --- a/modules/features/automation/interfaceFeatureAutomation.py +++ b/modules/features/automation/interfaceFeatureAutomation.py @@ -22,6 +22,13 @@ from modules.shared.configuration import APP_CONFIG logger = logging.getLogger(__name__) + +def _automationDefinitionPayload(data: Dict[str, Any]) -> Dict[str, Any]: + """Strip connector/enrichment keys; only fields defined on AutomationDefinition.""" + allowed = AutomationDefinition.model_fields.keys() + return {k: v for k, v in (data or {}).items() if k in allowed} + + # Singleton factory for Automation instances _automationInterfaces = {} @@ -100,7 +107,7 @@ class AutomationObjects: if recordId: record = self.db.getRecordset(model, recordFilter={"id": recordId}) if record: - return record[0].get("_createdBy") == self.userId + return record[0].get("sysCreatedBy") == self.userId else: return False # Record not found = no access return True # No recordId needed (e.g., for CREATE) @@ -130,7 +137,7 @@ class AutomationObjects: featureInstanceIds = set() for automation in automations: - createdBy = automation.get("_createdBy") + createdBy = automation.get("sysCreatedBy") if createdBy: userIds.add(createdBy) @@ -186,8 +193,8 @@ class AutomationObjects: # Enrich each automation with the fetched data # SECURITY: Never show a fallback name — if lookup fails, show empty string for automation in automations: - createdBy = automation.get("_createdBy") - automation["_createdByUserName"] = usersMap.get(createdBy, "") if createdBy else "" + createdBy = automation.get("sysCreatedBy") + automation["sysCreatedByUserName"] = usersMap.get(createdBy, "") if createdBy else "" mandateId = automation.get("mandateId") automation["mandateName"] = mandatesMap.get(mandateId, "") if mandateId else "" @@ -295,7 +302,7 @@ class AutomationObjects: Args: automationId: ID of the automation to get - includeSystemFields: If True, returns raw dict with system fields (_createdBy, etc). + includeSystemFields: If True, returns raw dict with system fields (sysCreatedBy, etc). If False (default), returns Pydantic model without system fields. """ try: @@ -330,7 +337,7 @@ class AutomationObjects: return AutomationWithSystemFields(automation) # Clean metadata fields and return Pydantic model - cleanedRecord = {k: v for k, v in automation.items() if not k.startswith("_")} + cleanedRecord = _automationDefinitionPayload(automation) return AutomationDefinition(**cleanedRecord) except Exception as e: logger.error(f"Error getting automation definition: {str(e)}") @@ -365,7 +372,7 @@ class AutomationObjects: # Ensure database connector has correct userId context if not self.userId: - logger.error(f"createAutomationDefinition: userId is not set! Cannot set _createdBy. currentUser={self.currentUser}") + logger.error(f"createAutomationDefinition: userId is not set! Cannot set sysCreatedBy. currentUser={self.currentUser}") elif hasattr(self.db, 'updateContext'): try: self.db.updateContext(self.userId) @@ -386,7 +393,7 @@ class AutomationObjects: self._notifyAutomationChanged() # Clean metadata fields and return Pydantic model - cleanedRecord = {k: v for k, v in createdAutomation.items() if not k.startswith("_")} + cleanedRecord = _automationDefinitionPayload(createdAutomation) return AutomationDefinition(**cleanedRecord) except Exception as e: logger.error(f"Error creating automation definition: {str(e)}") @@ -446,7 +453,7 @@ class AutomationObjects: self._notifyAutomationChanged() # Clean metadata fields and return Pydantic model - cleanedRecord = {k: v for k, v in updatedAutomation.items() if not k.startswith("_")} + cleanedRecord = _automationDefinitionPayload(updatedAutomation) return AutomationDefinition(**cleanedRecord) except Exception as e: logger.error(f"Error updating automation definition: {str(e)}") @@ -561,7 +568,7 @@ class AutomationObjects: # Collect unique user IDs userIds = set() for template in templates: - createdBy = template.get("_createdBy") + createdBy = template.get("sysCreatedBy") if createdBy: userIds.add(createdBy) @@ -585,8 +592,8 @@ class AutomationObjects: # Apply to templates — SECURITY: no fallback, empty if not found for template in templates: - createdBy = template.get("_createdBy") - template["_createdByUserName"] = userNameMap.get(createdBy, "") if createdBy else "" + createdBy = template.get("sysCreatedBy") + template["sysCreatedByUserName"] = userNameMap.get(createdBy, "") if createdBy else "" except Exception as e: logger.warning(f"Could not enrich templates with user names: {e}") diff --git a/modules/features/automation/mainAutomation.py b/modules/features/automation/mainAutomation.py index 4bb30f7f..d56804fd 100644 --- a/modules/features/automation/mainAutomation.py +++ b/modules/features/automation/mainAutomation.py @@ -227,7 +227,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, # Automatically create instance in root mandate during bootstrap + "autoCreateInstance": False, } diff --git a/modules/features/automation/routeFeatureAutomation.py b/modules/features/automation/routeFeatureAutomation.py index 48f53eea..c6343b25 100644 --- a/modules/features/automation/routeFeatureAutomation.py +++ b/modules/features/automation/routeFeatureAutomation.py @@ -77,8 +77,8 @@ def get_automations( # If pagination was requested, result is PaginatedResult # If no pagination, result is List[Dict] - # Note: Using JSONResponse to bypass Pydantic validation which would filter out _createdBy - # The enriched fields (_createdByUserName, mandateName) are not in the Pydantic model + # Note: Using JSONResponse to bypass Pydantic validation which would filter out sysCreatedBy + # The enriched fields (sysCreatedByUserName, mandateName) are not in the Pydantic model from fastapi.responses import JSONResponse if paginationParams: diff --git a/modules/features/automation2/datamodelFeatureAutomation2.py b/modules/features/automation2/datamodelFeatureAutomation2.py index f505c7d0..97b33754 100644 --- a/modules/features/automation2/datamodelFeatureAutomation2.py +++ b/modules/features/automation2/datamodelFeatureAutomation2.py @@ -4,6 +4,7 @@ from typing import Dict, Any, List, Optional from pydantic import BaseModel, Field +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.attributeUtils import registerModelLabels import uuid @@ -36,6 +37,11 @@ class Automation2Workflow(BaseModel): description="Whether workflow is active", json_schema_extra={"frontend_type": "checkbox", "frontend_required": False}, ) + invocations: List[Dict[str, Any]] = Field( + default_factory=list, + description="Entry points / starts (manual, form, schedule, webhook, …) configured outside the canvas", + json_schema_extra={"frontend_type": "textarea", "frontend_required": False}, + ) registerModelLabels( @@ -48,11 +54,12 @@ registerModelLabels( "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"}, "graph": {"en": "Graph", "de": "Graph", "fr": "Graphe"}, "active": {"en": "Active", "de": "Aktiv", "fr": "Actif"}, + "invocations": {"en": "Starts / Entry points", "de": "Starts / Einstiegspunkte", "fr": "Points d'entrée"}, }, ) -class Automation2WorkflowRun(BaseModel): +class Automation2WorkflowRun(PowerOnModel): id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key", @@ -98,7 +105,7 @@ registerModelLabels( ) -class Automation2HumanTask(BaseModel): +class Automation2HumanTask(PowerOnModel): id: str = Field( default_factory=lambda: str(uuid.uuid4()), description="Primary key", diff --git a/modules/features/automation2/entryPoints.py b/modules/features/automation2/entryPoints.py new file mode 100644 index 00000000..2bcc74ce --- /dev/null +++ b/modules/features/automation2/entryPoints.py @@ -0,0 +1,96 @@ +# Copyright (c) 2025 Patrick Motsch +""" +Workflow entry points (Starts) — configuration outside the flow editor. + +Kinds align with run envelope trigger.type where applicable. +""" + +import uuid +from typing import Any, Dict, List, Optional + +# On-demand (gear: Manueller Trigger, Formular) +KINDS_ON_DEMAND = frozenset({"manual", "form", "api"}) + +# Always-on (gear: Zeitplan, Immer aktiv, plus legacy listener kinds) +KINDS_ALWAYS_ON = frozenset({"schedule", "always_on", "email", "webhook", "event"}) + +ALL_KINDS = KINDS_ON_DEMAND | KINDS_ALWAYS_ON + + +def category_for_kind(kind: str) -> str: + if kind in KINDS_ALWAYS_ON: + return "always_on" + return "on_demand" + + +def default_manual_entry_point() -> Dict[str, Any]: + """Single default manual start when a workflow has no invocations yet.""" + return { + "id": str(uuid.uuid4()), + "kind": "manual", + "category": "on_demand", + "enabled": True, + "title": { + "de": "Jetzt ausführen", + "en": "Run now", + "fr": "Exécuter", + }, + "description": {}, + "config": {}, + } + + +def _normalize_title(title: Any) -> Dict[str, str]: + if isinstance(title, dict): + return {k: str(v) for k, v in title.items() if v is not None} + if isinstance(title, str) and title.strip(): + return {"de": title, "en": title, "fr": title} + return {"de": "Start", "en": "Start", "fr": "Départ"} + + +def normalize_invocation_entry(raw: Dict[str, Any]) -> Dict[str, Any]: + """Validate and normalize a single entry point dict.""" + kind = (raw.get("kind") or "manual").strip() + if kind not in ALL_KINDS: + kind = "manual" + cat = raw.get("category") + if cat not in ("on_demand", "always_on"): + cat = category_for_kind(kind) + eid = raw.get("id") or str(uuid.uuid4()) + enabled = raw.get("enabled", True) + if not isinstance(enabled, bool): + enabled = bool(enabled) + config = raw.get("config") if isinstance(raw.get("config"), dict) else {} + desc = raw.get("description") if isinstance(raw.get("description"), dict) else {} + return { + "id": str(eid), + "kind": kind, + "category": cat, + "enabled": enabled, + "title": _normalize_title(raw.get("title")), + "description": desc, + "config": config, + } + + +def normalize_invocations_list(items: Optional[List[Any]]) -> List[Dict[str, Any]]: + if not items: + return [default_manual_entry_point()] + out: List[Dict[str, Any]] = [] + for raw in items: + if isinstance(raw, dict): + out.append(normalize_invocation_entry(raw)) + if not out: + return [default_manual_entry_point()] + return out + + +# Schedule / cron: wire an external job runner (APScheduler, Celery, system cron) to call +# POST .../execute with entryPointId set to a schedule entry — no separate in-process scheduler here yet. + + +def find_invocation(workflow: Dict[str, Any], entry_point_id: str) -> Optional[Dict[str, Any]]: + for inv in workflow.get("invocations") or []: + if isinstance(inv, dict) and inv.get("id") == entry_point_id: + return inv + return None diff --git a/modules/features/automation2/interfaceFeatureAutomation2.py b/modules/features/automation2/interfaceFeatureAutomation2.py index cdc9bccf..b38b21db 100644 --- a/modules/features/automation2/interfaceFeatureAutomation2.py +++ b/modules/features/automation2/interfaceFeatureAutomation2.py @@ -30,6 +30,7 @@ from modules.features.automation2.datamodelFeatureAutomation2 import ( Automation2WorkflowRun, Automation2HumanTask, ) +from modules.features.automation2.entryPoints import normalize_invocations_list from modules.connectors.connectorDbPostgre import DatabaseConnector from modules.shared.configuration import APP_CONFIG @@ -49,6 +50,83 @@ def getAutomation2Interface( ) +def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]: + """ + Get all active Automation2 workflows that have a schedule entry point (primary invocation). + Used by the scheduler to register cron jobs. Does not filter by mandate/instance. + """ + dbHost = APP_CONFIG.get("DB_HOST", "localhost") + dbDatabase = "poweron_automation2" + dbUser = APP_CONFIG.get("DB_USER") + dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD") + dbPort = int(APP_CONFIG.get("DB_PORT", 5432)) + connector = DatabaseConnector( + dbHost=dbHost, + dbDatabase=dbDatabase, + dbUser=dbUser, + dbPassword=dbPassword, + dbPort=dbPort, + userId=None, + ) + if not connector._ensureTableExists(Automation2Workflow): + logger.warning("Automation2 schedule: table Automation2Workflow does not exist") + return [] + # Don't filter by active in SQL: existing workflows may have active=NULL. + # Treat NULL as active; skip only when active is explicitly False. + records = connector.getRecordset( + Automation2Workflow, + recordFilter=None, + ) + raw_count = len(records) if records else 0 + result = [] + for r in records or []: + if r.get("active") is False: + continue + wf = dict(r) + wf["invocations"] = normalize_invocations_list(wf.get("invocations")) + invocations = wf.get("invocations") or [] + primary = invocations[0] if invocations else {} + if not isinstance(primary, dict): + primary = {} + + # Cron comes from graph start node params (trigger.schedule) + graph = wf.get("graph") or {} + nodes = graph.get("nodes") or [] + cron = None + for n in nodes: + if n.get("type") == "trigger.schedule": + params = n.get("parameters") or {} + cron = params.get("cron") + if cron: + break + + if not cron or not isinstance(cron, str) or not cron.strip(): + continue + + # Prefer invocations; if graph has trigger.schedule but invocations say manual, still schedule + if primary.get("kind") == "schedule" and primary.get("enabled", True): + entry_point_id = primary.get("id") + elif invocations and isinstance(invocations[0], dict) and invocations[0].get("id"): + entry_point_id = invocations[0].get("id") + else: + entry_point_id = str(uuid.uuid4()) + + result.append({ + "workflowId": wf.get("id"), + "mandateId": wf.get("mandateId"), + "featureInstanceId": wf.get("featureInstanceId"), + "entryPointId": entry_point_id, + "cron": cron.strip(), + "workflow": wf, + }) + logger.info( + "Automation2 schedule: DB has %d workflow(s), %d active with trigger.schedule+cron", + raw_count, + len(result), + ) + return result + + class Automation2Objects: """Interface for Automation2 database operations.""" @@ -87,18 +165,26 @@ class Automation2Objects: # Workflow CRUD # ------------------------------------------------------------------------- - def getWorkflows(self) -> List[Dict[str, Any]]: - """Get all workflows for this mandate and feature instance.""" + def getWorkflows(self, active: Optional[bool] = None) -> List[Dict[str, Any]]: + """Get all workflows for this mandate and feature instance. + Optional active filter: True=only active, False=only inactive, None=all. + """ if not self.db._ensureTableExists(Automation2Workflow): return [] + rf: Dict[str, Any] = { + "mandateId": self.mandateId, + "featureInstanceId": self.featureInstanceId, + } + if active is not None: + rf["active"] = active records = self.db.getRecordset( Automation2Workflow, - recordFilter={ - "mandateId": self.mandateId, - "featureInstanceId": self.featureInstanceId, - }, + recordFilter=rf, ) - return [dict(r) for r in records] if records else [] + rows = [dict(r) for r in records] if records else [] + for wf in rows: + wf["invocations"] = normalize_invocations_list(wf.get("invocations")) + return rows def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]: """Get a single workflow by ID.""" @@ -114,7 +200,9 @@ class Automation2Objects: ) if not records: return None - return dict(records[0]) + wf = dict(records[0]) + wf["invocations"] = normalize_invocations_list(wf.get("invocations")) + return wf def createWorkflow(self, data: Dict[str, Any]) -> Dict[str, Any]: """Create a new workflow.""" @@ -122,8 +210,18 @@ class Automation2Objects: data["id"] = str(uuid.uuid4()) data["mandateId"] = self.mandateId data["featureInstanceId"] = self.featureInstanceId + if "active" not in data or data.get("active") is None: + data["active"] = True + data["invocations"] = normalize_invocations_list(data.get("invocations")) created = self.db.recordCreate(Automation2Workflow, data) - return dict(created) + out = dict(created) + out["invocations"] = normalize_invocations_list(out.get("invocations")) + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("automation2.workflow.changed") + except Exception: + pass + return out def updateWorkflow(self, workflowId: str, data: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Update an existing workflow.""" @@ -133,8 +231,17 @@ class Automation2Objects: # Don't overwrite mandateId/featureInstanceId data.pop("mandateId", None) data.pop("featureInstanceId", None) + if "invocations" in data: + data["invocations"] = normalize_invocations_list(data.get("invocations")) updated = self.db.recordModify(Automation2Workflow, workflowId, data) - return dict(updated) + out = dict(updated) + out["invocations"] = normalize_invocations_list(out.get("invocations")) + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("automation2.workflow.changed") + except Exception: + pass + return out def deleteWorkflow(self, workflowId: str) -> bool: """Delete a workflow.""" @@ -142,6 +249,11 @@ class Automation2Objects: if not existing: return False self.db.recordDelete(Automation2Workflow, workflowId) + try: + from modules.shared.callbackRegistry import callbackRegistry + callbackRegistry.trigger("automation2.workflow.changed") + except Exception: + pass return True # ------------------------------------------------------------------------- @@ -209,6 +321,28 @@ class Automation2Objects: ) return [dict(r) for r in records] if records else [] + def getRecentCompletedRuns(self, limit: int = 20) -> List[Dict[str, Any]]: + """Get recently completed runs for workflows in this instance (for output display).""" + if not self.db._ensureTableExists(Automation2WorkflowRun): + return [] + workflows = self.getWorkflows() + wf_ids = [w["id"] for w in workflows if w.get("id")] + if not wf_ids: + return [] + records = self.db.getRecordset( + Automation2WorkflowRun, + recordFilter={"status": "completed"}, + ) + if not records: + return [] + runs = [dict(r) for r in records if r.get("workflowId") in wf_ids] + wf_by_id = {w["id"]: w for w in workflows} + for r in runs: + wf = wf_by_id.get(r.get("workflowId"), {}) + r["workflowLabel"] = wf.get("label") or r.get("workflowId", "") + runs.sort(key=lambda x: (x.get("_modifiedAt") or x.get("_createdAt") or 0), reverse=True) + return runs[:limit] + def getRunsWaitingForEmail(self) -> List[Dict[str, Any]]: """Get all paused runs waiting for a new email (for background poller).""" if not self.db._ensureTableExists(Automation2WorkflowRun): @@ -289,23 +423,38 @@ class Automation2Objects: status: str = None, assigneeId: str = None, ) -> List[Dict[str, Any]]: - """Get tasks with optional filters. AssigneeId filters to that user; None returns all.""" + """Get tasks with optional filters. + When assigneeId is set: returns tasks assigned to that user OR unassigned (so schedule tasks show up). + When assigneeId is None: returns all tasks. + """ if not self.db._ensureTableExists(Automation2HumanTask): return [] - rf = {} + base_rf: Dict[str, Any] = {} if workflowId: - rf["workflowId"] = workflowId + base_rf["workflowId"] = workflowId if runId: - rf["runId"] = runId + base_rf["runId"] = runId if status: - rf["status"] = status + base_rf["status"] = status if assigneeId: - rf["assigneeId"] = assigneeId - records = self.db.getRecordset( - Automation2HumanTask, - recordFilter=rf if rf else None, - ) - items = [dict(r) for r in records] if records else [] + rf_assigned = {**base_rf, "assigneeId": assigneeId} + rf_unassigned = {**base_rf, "assigneeId": None} + records1 = self.db.getRecordset(Automation2HumanTask, recordFilter=rf_assigned) + records2 = self.db.getRecordset(Automation2HumanTask, recordFilter=rf_unassigned) + seen = set() + items = [] + for r in (records1 or []) + (records2 or []): + rec = dict(r) + tid = rec.get("id") + if tid and tid not in seen: + seen.add(tid) + items.append(rec) + else: + records = self.db.getRecordset( + Automation2HumanTask, + recordFilter=base_rf if base_rf else None, + ) + items = [dict(r) for r in records] if records else [] workflows = {w["id"]: w for w in self.getWorkflows()} filtered = [t for t in items if t.get("workflowId") in workflows] return filtered diff --git a/modules/features/automation2/mainAutomation2.py b/modules/features/automation2/mainAutomation2.py index 9ec97eca..80c8f854 100644 --- a/modules/features/automation2/mainAutomation2.py +++ b/modules/features/automation2/mainAutomation2.py @@ -19,6 +19,8 @@ REQUIRED_SERVICES = [ {"serviceKey": "ai", "meta": {"usage": "AI nodes"}}, {"serviceKey": "extraction", "meta": {"usage": "Workflow method actions"}}, {"serviceKey": "sharepoint", "meta": {"usage": "SharePoint actions"}}, + {"serviceKey": "clickup", "meta": {"usage": "ClickUp actions"}}, + {"serviceKey": "generation", "meta": {"usage": "file.create document rendering"}}, ] FEATURE_LABEL = {"en": "Automation 2", "de": "Automatisierung 2", "fr": "Automatisation 2"} FEATURE_ICON = "mdi-sitemap" @@ -60,12 +62,25 @@ RESOURCE_OBJECTS = [ ] TEMPLATE_ROLES = [ + { + "roleLabel": "automation2-viewer", + "description": { + "en": "Automation2 Viewer - View workflows (read-only)", + "de": "Automation2 Betrachter - Workflows ansehen (nur lesen)", + "fr": "Visualiseur Automation2 - Consulter les workflows (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.automation2.workflows", "view": True}, + {"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, { "roleLabel": "automation2-user", "description": { "en": "Automation2 User - Use automation2 flow builder", "de": "Automation2 Benutzer - Flow-Builder nutzen", - "fr": "Utilisateur Automation2 - Utiliser le flow builder" + "fr": "Utilisateur Automation2 - Utiliser le flow builder", }, "accessRules": [ {"context": "UI", "item": "ui.feature.automation2.editor", "view": True}, @@ -75,7 +90,20 @@ TEMPLATE_ROLES = [ {"context": "RESOURCE", "item": "resource.feature.automation2.node-types", "view": True}, {"context": "RESOURCE", "item": "resource.feature.automation2.execute", "view": True}, {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, - ] + ], + }, + { + "roleLabel": "automation2-admin", + "description": { + "en": "Automation2 Admin - Full UI and API for the instance; data remains user-scoped (MY)", + "de": "Automation2 Admin - Volle UI und API für die Instanz; Daten weiterhin benutzerspezifisch (MY)", + "fr": "Administrateur Automation2 - UI et API complets pour l'instance; donnees limitees a l'utilisateur (MY)", + }, + "accessRules": [ + {"context": "UI", "item": None, "view": True}, + {"context": "RESOURCE", "item": None, "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, + ], }, ] @@ -157,6 +185,8 @@ class _Automation2ServiceHub: utils = None extraction = None sharepoint = None + clickup = None + generation = None async def onStart(eventUser) -> None: @@ -175,7 +205,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, + "autoCreateInstance": False, } diff --git a/modules/features/automation2/nodeDefinitions/__init__.py b/modules/features/automation2/nodeDefinitions/__init__.py index 61eec51a..2f4920c8 100644 --- a/modules/features/automation2/nodeDefinitions/__init__.py +++ b/modules/features/automation2/nodeDefinitions/__init__.py @@ -3,18 +3,20 @@ from .triggers import TRIGGER_NODES from .flow import FLOW_NODES -from .data import DATA_NODES from .input import INPUT_NODES from .ai import AI_NODES from .email import EMAIL_NODES from .sharepoint import SHAREPOINT_NODES +from .clickup import CLICKUP_NODES +from .file import FILE_NODES STATIC_NODE_TYPES = ( TRIGGER_NODES + FLOW_NODES - + DATA_NODES + INPUT_NODES + AI_NODES + EMAIL_NODES + SHAREPOINT_NODES + + CLICKUP_NODES + + FILE_NODES ) diff --git a/modules/features/automation2/nodeDefinitions/ai.py b/modules/features/automation2/nodeDefinitions/ai.py index 4fdf0db9..bb85e809 100644 --- a/modules/features/automation2/nodeDefinitions/ai.py +++ b/modules/features/automation2/nodeDefinitions/ai.py @@ -9,7 +9,6 @@ AI_NODES = [ "description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"}, "parameters": [ {"name": "prompt", "type": "string", "required": True, "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}}, - {"name": "resultType", "type": "string", "required": False, "description": {"en": "Output format (txt, json, md, etc.)", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "txt"}, ], "inputs": 1, "outputs": 1, @@ -85,7 +84,6 @@ AI_NODES = [ "description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"}, "parameters": [ {"name": "prompt", "type": "string", "required": True, "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}}, - {"name": "format", "type": "string", "required": False, "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "docx"}, ], "inputs": 1, "outputs": 1, diff --git a/modules/features/automation2/nodeDefinitions/clickup.py b/modules/features/automation2/nodeDefinitions/clickup.py new file mode 100644 index 00000000..4acb0db9 --- /dev/null +++ b/modules/features/automation2/nodeDefinitions/clickup.py @@ -0,0 +1,227 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""ClickUp nodes — map to MethodClickup actions.""" + +CLICKUP_NODES = [ + { + "id": "clickup.searchTasks", + "category": "clickup", + "label": {"en": "Search tasks", "de": "Aufgaben suchen", "fr": "Rechercher tâches"}, + "description": { + "en": "Search tasks in a workspace (team)", + "de": "Aufgaben in einem Workspace suchen", + "fr": "Rechercher des tâches dans un espace", + }, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "teamId", "type": "string", "required": True, "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}}, + {"name": "query", "type": "string", "required": True, "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}}, + {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, + { + "name": "listId", + "type": "string", + "required": False, + "description": { + "en": "If set, search this list via list API (not team search).", + "de": "Wenn gesetzt: Suche in dieser Liste (Listen-API, nicht Team-Suche).", + "fr": "Si défini : recherche dans cette liste (API liste).", + }, + }, + { + "name": "includeClosed", + "type": "boolean", + "required": False, + "default": False, + "description": { + "en": "With listId: include closed tasks.", + "de": "Mit Liste: erledigte Aufgaben einbeziehen.", + "fr": "Avec liste : inclure les tâches terminées.", + }, + }, + { + "name": "fullTaskData", + "type": "boolean", + "required": False, + "default": False, + "description": { + "en": "Return full ClickUp API JSON per task (very large). Default: slim fields only.", + "de": "Vollständige ClickUp-Rohdaten pro Task (sehr groß). Standard: nur schlanke Felder.", + "fr": "Réponse brute complète (très volumineuse). Par défaut : champs réduits.", + }, + }, + { + "name": "matchNameOnly", + "type": "boolean", + "required": False, + "default": True, + "description": { + "en": "Keep only tasks whose title contains the search query (default: on).", + "de": "Nur Aufgaben, deren Titel den Suchbegriff enthält (Standard: an).", + "fr": "Ne garder que les tâches dont le titre contient la requête (défaut : oui).", + }, + }, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-magnify", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "searchTasks", + "_paramMap": { + "connectionId": "connectionReference", + "teamId": "teamId", + "query": "query", + "page": "page", + "listId": "listId", + "fullTaskData": "fullTaskData", + "matchNameOnly": "matchNameOnly", + "includeClosed": "includeClosed", + }, + }, + { + "id": "clickup.listTasks", + "category": "clickup", + "label": {"en": "List tasks", "de": "Aufgaben auflisten", "fr": "Lister les tâches"}, + "description": { + "en": "List tasks in a list (pick list path from browse)", + "de": "Aufgaben einer Liste auflisten (Pfad aus Browse)", + "fr": "Lister les tâches d'une liste", + }, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "path", "type": "string", "required": True, "description": {"en": "Virtual path to list /team/.../list/...", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}}, + {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, + {"name": "includeClosed", "type": "boolean", "required": False, "description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "listTasks", + "_paramMap": { + "connectionId": "connectionReference", + "path": "pathQuery", + "page": "page", + "includeClosed": "includeClosed", + }, + }, + { + "id": "clickup.getTask", + "category": "clickup", + "label": {"en": "Get task", "de": "Aufgabe abrufen", "fr": "Obtenir la tâche"}, + "description": {"en": "Get one task by ID or path", "de": "Eine Aufgabe abrufen", "fr": "Obtenir une tâche"}, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Or path .../task/{id}", "de": "Oder Pfad .../task/{id}", "fr": "Ou chemin .../task/{id}"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "getTask", + "_paramMap": {"connectionId": "connectionReference", "taskId": "taskId", "path": "pathQuery"}, + }, + { + "id": "clickup.createTask", + "category": "clickup", + "label": {"en": "Create task", "de": "Aufgabe erstellen", "fr": "Créer une tâche"}, + "description": {"en": "Create a task in a list", "de": "Aufgabe in einer Liste erstellen", "fr": "Créer une tâche dans une liste"}, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "teamId", "type": "string", "required": False, "description": {"en": "Workspace (team) for list picker", "de": "Workspace für Listen-Auswahl", "fr": "Équipe"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Optional path /team/.../list/...", "de": "Optional: Pfad zur Liste", "fr": "Chemin optionnel"}}, + {"name": "listId", "type": "string", "required": False, "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}}, + {"name": "name", "type": "string", "required": True, "description": {"en": "Task name", "de": "Name", "fr": "Nom"}}, + {"name": "description", "type": "string", "required": False, "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}}, + {"name": "taskStatus", "type": "string", "required": False, "description": {"en": "Status (list status name)", "de": "Status (wie in der Liste)", "fr": "Statut"}}, + {"name": "taskPriority", "type": "string", "required": False, "description": {"en": "1–4 or empty", "de": "1–4 oder leer", "fr": "1–4"}}, + {"name": "taskDueDateMs", "type": "string", "required": False, "description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}}, + {"name": "taskAssigneeIds", "type": "object", "required": False, "description": {"en": "Assignee user ids", "de": "Zugewiesene (User-IDs)", "fr": "Assignés"}}, + {"name": "taskTimeEstimateMs", "type": "string", "required": False, "description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}}, + {"name": "taskTimeEstimateHours", "type": "string", "required": False, "description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (Stunden)", "fr": "Heures"}}, + {"name": "customFieldValues", "type": "object", "required": False, "description": {"en": "Custom field id → value", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}}, + {"name": "taskFields", "type": "string", "required": False, "description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON (fortgeschritten)", "fr": "JSON avancé"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "createTask", + "_paramMap": { + "connectionId": "connectionReference", + "teamId": "teamId", + "path": "pathQuery", + "listId": "listId", + "name": "name", + "description": "description", + "taskStatus": "taskStatus", + "taskPriority": "taskPriority", + "taskDueDateMs": "taskDueDateMs", + "taskAssigneeIds": "taskAssigneeIds", + "taskTimeEstimateMs": "taskTimeEstimateMs", + "taskTimeEstimateHours": "taskTimeEstimateHours", + "customFieldValues": "customFieldValues", + "taskFields": "taskFields", + }, + }, + { + "id": "clickup.updateTask", + "category": "clickup", + "label": {"en": "Update task", "de": "Aufgabe aktualisieren", "fr": "Mettre à jour la tâche"}, + "description": { + "en": "Update task fields (rows or JSON)", + "de": "Felder der Aufgabe ändern (Zeilen oder JSON)", + "fr": "Mettre à jour les champs (lignes ou JSON)", + }, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, + { + "name": "taskUpdateEntries", + "type": "object", + "required": False, + "description": { + "en": "List of {fieldKey, value, customFieldId?}", + "de": "Liste der zu ändernden Felder (fieldKey, value, optional customFieldId)", + "fr": "Liste de champs à mettre à jour", + }, + }, + {"name": "taskUpdate", "type": "string", "required": False, "description": {"en": "JSON body for API (optional if rows set)", "de": "JSON für API (optional wenn Zeilen gesetzt)", "fr": "Corps JSON"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "updateTask", + "_paramMap": { + "connectionId": "connectionReference", + "taskId": "taskId", + "path": "path", + "taskUpdate": "taskUpdate", + }, + }, + { + "id": "clickup.uploadAttachment", + "category": "clickup", + "label": {"en": "Upload attachment", "de": "Anhang hochladen", "fr": "Téléverser pièce jointe"}, + "description": {"en": "Upload file to a task (upstream file)", "de": "Datei an Task anhängen", "fr": "Joindre un fichier à la tâche"}, + "parameters": [ + {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, + {"name": "fileName", "type": "string", "required": False, "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}}, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-attachment", "color": "#7B68EE"}, + "_method": "clickup", + "_action": "uploadAttachment", + "_paramMap": { + "connectionId": "connectionReference", + "taskId": "taskId", + "path": "path", + "fileName": "fileName", + }, + }, +] diff --git a/modules/features/automation2/nodeDefinitions/data.py b/modules/features/automation2/nodeDefinitions/data.py deleted file mode 100644 index b44618d1..00000000 --- a/modules/features/automation2/nodeDefinitions/data.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2025 Patrick Motsch -# Data transformation node definitions. - -DATA_NODES = [ - { - "id": "data.setFields", - "category": "data", - "label": {"en": "Set Fields", "de": "Felder setzen", "fr": "Définir champs"}, - "description": {"en": "Set or override fields on payload", "de": "Felder setzen oder überschreiben", "fr": "Définir ou écraser des champs"}, - "parameters": [ - {"name": "fields", "type": "object", "required": True, "description": {"en": "Key-value pairs", "de": "Schlüssel-Wert-Paare", "fr": "Paires clé-valeur"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-pencil", "color": "#673AB7"}, - }, - { - "id": "data.filter", - "category": "data", - "label": {"en": "Filter", "de": "Filtern", "fr": "Filtrer"}, - "description": {"en": "Filter array by condition", "de": "Array nach Bedingung filtern", "fr": "Filtrer tableau par condition"}, - "parameters": [ - {"name": "condition", "type": "string", "required": True, "description": {"en": "Expression (e.g. item.active == true)", "de": "Bedingung", "fr": "Condition"}}, - {"name": "itemsPath", "type": "string", "required": False, "description": {"en": "Path to array", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-filter", "color": "#673AB7"}, - }, - { - "id": "data.parseJson", - "category": "data", - "label": {"en": "Parse JSON", "de": "JSON parsen", "fr": "Parser JSON"}, - "description": {"en": "Parse JSON string to object", "de": "JSON-String in Objekt parsen", "fr": "Parser chaîne JSON en objet"}, - "parameters": [ - {"name": "jsonPath", "type": "string", "required": False, "description": {"en": "Path to JSON string (default: input)", "de": "Pfad zum JSON", "fr": "Chemin vers JSON"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-code-json", "color": "#673AB7"}, - }, - { - "id": "data.template", - "category": "data", - "label": {"en": "Template / Interpolation", "de": "Vorlage / Interpolation", "fr": "Modèle / Interpolation"}, - "description": {"en": "Text with {{placeholder}} substitution", "de": "Text mit {{platzhalter}}-Ersetzung", "fr": "Texte avec substitution {{placeholder}}"}, - "parameters": [ - {"name": "template", "type": "string", "required": True, "description": {"en": "Template (use {{path}} for values)", "de": "Vorlage", "fr": "Modèle"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "data", - "meta": {"icon": "mdi-format-text", "color": "#673AB7"}, - }, -] diff --git a/modules/features/automation2/nodeDefinitions/file.py b/modules/features/automation2/nodeDefinitions/file.py new file mode 100644 index 00000000..bb168218 --- /dev/null +++ b/modules/features/automation2/nodeDefinitions/file.py @@ -0,0 +1,60 @@ +# Copyright (c) 2025 Patrick Motsch +# File node definitions - create files from context (e.g. from AI nodes). + +FILE_NODES = [ + { + "id": "file.create", + "category": "file", + "label": {"en": "Create File", "de": "Datei erstellen", "fr": "Créer fichier"}, + "description": { + "en": "Create a file from context (text/markdown from AI). Configurable format and style.", + "de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI). Format und Stil konfigurierbar.", + "fr": "Crée un fichier à partir du contexte. Format et style configurables.", + }, + "parameters": [ + { + "name": "contentSources", + "type": "json", + "required": False, + "description": { + "en": "Array of context refs (e.g. AI, form). Concatenated in order. Empty = from connected node.", + "de": "Liste von Kontext-Quellen (z.B. KI, Formular). Werden nacheinander zusammengefügt. Leer = vom verbundenen Node.", + "fr": "Liste de sources de contexte. Concaténées dans l'ordre. Vide = du noeud connecté.", + }, + "default": [], + }, + { + "name": "outputFormat", + "type": "string", + "required": True, + "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, + "default": "docx", + }, + { + "name": "title", + "type": "string", + "required": False, + "description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"}, + }, + { + "name": "templateName", + "type": "string", + "required": False, + "description": {"en": "Style preset: default, corporate, minimal", "de": "Stil-Vorlage", "fr": "Prését style"}, + }, + { + "name": "language", + "type": "string", + "required": False, + "description": {"en": "Language code (de, en, fr)", "de": "Sprachcode", "fr": "Code langue"}, + "default": "de", + }, + ], + "inputs": 1, + "outputs": 1, + "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"}, + "_method": "file", + "_action": "create", + "_paramMap": {}, + }, +] diff --git a/modules/features/automation2/nodeDefinitions/flow.py b/modules/features/automation2/nodeDefinitions/flow.py index 573a83ad..02e25764 100644 --- a/modules/features/automation2/nodeDefinitions/flow.py +++ b/modules/features/automation2/nodeDefinitions/flow.py @@ -12,6 +12,7 @@ FLOW_NODES = [ ], "inputs": 1, "outputs": 2, + "outputLabels": {"en": ["Yes", "No"], "de": ["Ja", "Nein"], "fr": ["Oui", "Non"]}, "executor": "flow", "meta": {"icon": "mdi-source-branch", "color": "#FF9800"}, }, @@ -29,19 +30,6 @@ FLOW_NODES = [ "executor": "flow", "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"}, }, - { - "id": "flow.merge", - "category": "flow", - "label": {"en": "Merge", "de": "Zusammenführen", "fr": "Fusionner"}, - "description": {"en": "Merge multiple inputs", "de": "Mehrere Eingaben zusammenführen", "fr": "Fusionner plusieurs entrées"}, - "parameters": [ - {"name": "mode", "type": "string", "required": False, "description": {"en": "append | combine", "de": "Modus", "fr": "Mode"}}, - ], - "inputs": 2, - "outputs": 1, - "executor": "flow", - "meta": {"icon": "mdi-merge", "color": "#FF9800"}, - }, { "id": "flow.loop", "category": "flow", @@ -55,28 +43,4 @@ FLOW_NODES = [ "executor": "flow", "meta": {"icon": "mdi-repeat", "color": "#FF9800"}, }, - { - "id": "flow.wait", - "category": "flow", - "label": {"en": "Wait / Delay", "de": "Warten / Verzögerung", "fr": "Attendre / Délai"}, - "description": {"en": "Pause for duration", "de": "Pause für Dauer", "fr": "Pause pour durée"}, - "parameters": [ - {"name": "seconds", "type": "number", "required": True, "description": {"en": "Seconds to wait", "de": "Sekunden", "fr": "Secondes"}}, - ], - "inputs": 1, - "outputs": 1, - "executor": "flow", - "meta": {"icon": "mdi-timer", "color": "#FF9800"}, - }, - { - "id": "flow.stop", - "category": "flow", - "label": {"en": "Stop / Terminate", "de": "Stopp / Beenden", "fr": "Arrêter / Terminer"}, - "description": {"en": "Stop workflow execution", "de": "Workflow-Ausführung beenden", "fr": "Arrêter l'exécution"}, - "parameters": [], - "inputs": 1, - "outputs": 0, - "executor": "flow", - "meta": {"icon": "mdi-stop", "color": "#F44336"}, - }, ] diff --git a/modules/features/automation2/nodeDefinitions/input.py b/modules/features/automation2/nodeDefinitions/input.py index 8eb43e63..d9c56c78 100644 --- a/modules/features/automation2/nodeDefinitions/input.py +++ b/modules/features/automation2/nodeDefinitions/input.py @@ -12,7 +12,11 @@ INPUT_NODES = [ "name": "fields", "type": "json", "required": True, - "description": {"en": "Form fields: [{name, type, label, required, options?}]", "de": "Formularfelder", "fr": "Champs du formulaire"}, + "description": { + "en": "Form fields: [{name, type, label, required, options?}]. type may include clickup_tasks with clickupConnectionId + clickupListId for a ClickUp task dropdown (value {add, rem}).", + "de": "Formularfelder. type: u. a. clickup_tasks mit clickupConnectionId und clickupListId für ClickUp-Aufgaben-Dropdown (Wert wie Relationship-Feld).", + "fr": "Champs du formulaire", + }, "default": [], }, ], @@ -42,7 +46,8 @@ INPUT_NODES = [ "label": {"en": "Upload", "de": "Upload", "fr": "Téléversement"}, "description": {"en": "User uploads file(s)", "de": "Benutzer lädt Datei(en) hoch", "fr": "L'utilisateur téléverse des fichiers"}, "parameters": [ - {"name": "accept", "type": "string", "required": False, "description": {"en": "MIME types (e.g. .pdf,image/*)", "de": "MIME-Typen", "fr": "Types MIME"}, "default": ""}, + {"name": "accept", "type": "string", "required": False, "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String für Dateiauswahl", "fr": "Chaîne accept"}, "default": ""}, + {"name": "allowedTypes", "type": "json", "required": False, "description": {"en": "Selected file types (from UI multi-select)", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []}, {"name": "maxSize", "type": "number", "required": False, "description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10}, {"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False}, ], diff --git a/modules/features/automation2/nodeDefinitions/triggers.py b/modules/features/automation2/nodeDefinitions/triggers.py index 0e206dc0..5071a762 100644 --- a/modules/features/automation2/nodeDefinitions/triggers.py +++ b/modules/features/automation2/nodeDefinitions/triggers.py @@ -1,12 +1,16 @@ # Copyright (c) 2025 Patrick Motsch -# Trigger node definitions - workflow entry points. +# Canvas start nodes — variant reflects workflow configuration (gear in editor). TRIGGER_NODES = [ { "id": "trigger.manual", "category": "trigger", - "label": {"en": "Manual Trigger", "de": "Manueller Trigger", "fr": "Déclencheur manuel"}, - "description": {"en": "Start workflow on button press", "de": "Startet den Workflow bei Knopfdruck", "fr": "Démarre le workflow sur clic"}, + "label": {"en": "Start", "de": "Start", "fr": "Départ"}, + "description": { + "en": "Manual, API, or background triggers (webhook, email, …).", + "de": "Manuell, API oder Hintergrund-Starts (Webhook, E-Mail, …).", + "fr": "Manuel, API ou déclencheurs en arrière-plan.", + }, "parameters": [], "inputs": 0, "outputs": 1, @@ -14,29 +18,47 @@ TRIGGER_NODES = [ "meta": {"icon": "mdi-play", "color": "#4CAF50"}, }, { - "id": "trigger.schedule", + "id": "trigger.form", "category": "trigger", - "label": {"en": "Schedule", "de": "Zeitplan", "fr": "Planification"}, - "description": {"en": "Run on a cron schedule", "de": "Läuft nach Cron-Zeitplan", "fr": "S'exécute selon un cron"}, + "label": {"en": "Start (form)", "de": "Start (Formular)", "fr": "Départ (formulaire)"}, + "description": { + "en": "Form fields are filled at run time; configure fields on this node.", + "de": "Felder werden beim Start befüllt; konfigurieren Sie die Felder auf dieser Node.", + "fr": "Les champs sont remplis au démarrage.", + }, "parameters": [ - {"name": "cron", "type": "string", "required": True, "description": {"en": "Cron expression (e.g. 0 9 * * * for daily at 9)", "de": "Cron-Ausdruck", "fr": "Expression cron"}}, - ], - "inputs": 0, - "outputs": 1, - "executor": "trigger", - "meta": {"icon": "mdi-clock", "color": "#2196F3"}, - }, - { - "id": "trigger.formSubmit", - "category": "trigger", - "label": {"en": "Form Submit", "de": "Formular-Absendung", "fr": "Soumission formulaire"}, - "description": {"en": "Start when form is submitted", "de": "Startet bei Formular-Absendung", "fr": "Démarre à la soumission du formulaire"}, - "parameters": [ - {"name": "formId", "type": "string", "required": True, "description": {"en": "Form identifier", "de": "Formular-ID", "fr": "Identifiant du formulaire"}}, + { + "name": "formFields", + "type": "json", + "required": False, + "description": {"en": "Field definitions", "de": "Felddefinitionen", "fr": "Définitions"}, + }, ], "inputs": 0, "outputs": 1, "executor": "trigger", "meta": {"icon": "mdi-form-select", "color": "#9C27B0"}, }, + { + "id": "trigger.schedule", + "category": "trigger", + "label": {"en": "Start (schedule)", "de": "Start (Zeitplan)", "fr": "Départ (planification)"}, + "description": { + "en": "Cron expression for scheduled runs (configure on this node).", + "de": "Cron-Ausdruck für geplante Läufe.", + "fr": "Expression cron pour les exécutions planifiées.", + }, + "parameters": [ + { + "name": "cron", + "type": "string", + "required": False, + "description": {"en": "Cron expression", "de": "Cron-Ausdruck", "fr": "Expression cron"}, + }, + ], + "inputs": 0, + "outputs": 1, + "executor": "trigger", + "meta": {"icon": "mdi-clock", "color": "#2196F3"}, + }, ] diff --git a/modules/features/automation2/nodeRegistry.py b/modules/features/automation2/nodeRegistry.py index 39c3e2c9..4bcc9ba5 100644 --- a/modules/features/automation2/nodeRegistry.py +++ b/modules/features/automation2/nodeRegistry.py @@ -36,6 +36,11 @@ def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: out["label"] = node["label"].get(lang, node["label"].get("en", str(node["label"]))) if isinstance(node.get("description"), dict): out["description"] = node["description"].get(lang, node["description"].get("en", str(node["description"]))) + ol = node.get("outputLabels") + if isinstance(ol, dict) and ol: + first = next(iter(ol.values()), None) + if isinstance(first, (list, tuple)): + out["outputLabels"] = ol.get(lang, ol.get("en", list(first))) params = [] for p in node.get("parameters", []): pc = dict(p) @@ -61,8 +66,10 @@ def getNodeTypesForApi( {"id": "flow", "label": {"en": "Flow", "de": "Ablauf", "fr": "Flux"}}, {"id": "data", "label": {"en": "Data", "de": "Daten", "fr": "Données"}}, {"id": "ai", "label": {"en": "AI", "de": "KI", "fr": "IA"}}, + {"id": "file", "label": {"en": "File", "de": "Datei", "fr": "Fichier"}}, {"id": "email", "label": {"en": "Email", "de": "E-Mail", "fr": "Email"}}, {"id": "sharepoint", "label": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"}}, + {"id": "clickup", "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}}, ] return {"nodeTypes": localized, "categories": categories} diff --git a/modules/features/automation2/routeFeatureAutomation2.py b/modules/features/automation2/routeFeatureAutomation2.py index 996c3cb6..aa40f8bb 100644 --- a/modules/features/automation2/routeFeatureAutomation2.py +++ b/modules/features/automation2/routeFeatureAutomation2.py @@ -5,6 +5,8 @@ Automation2 routes - node-types, execute, workflows, runs, tasks, connections, b """ import logging +from typing import Any, Dict, Optional + from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException from fastapi.responses import JSONResponse from modules.auth import limiter, getRequestContext, RequestContext @@ -13,9 +15,75 @@ from modules.features.automation2.mainAutomation2 import getAutomation2Services from modules.features.automation2.nodeRegistry import getNodeTypesForApi from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface from modules.workflows.automation2.executionEngine import executeGraph +from modules.workflows.automation2.runEnvelope import ( + default_run_envelope, + merge_run_envelope, + normalize_run_envelope, +) +from modules.features.automation2.entryPoints import find_invocation logger = logging.getLogger(__name__) + +def _build_execute_run_envelope( + body: Dict[str, Any], + workflow: Optional[Dict[str, Any]], + user_id: Optional[str], +) -> Dict[str, Any]: + """Build normalized run envelope from POST /execute body.""" + if isinstance(body.get("runEnvelope"), dict): + env = normalize_run_envelope(body["runEnvelope"], user_id=user_id) + pl = body.get("payload") + if isinstance(pl, dict): + env = merge_run_envelope(env, {"payload": pl}) + return env + + entry_point_id = body.get("entryPointId") + if entry_point_id: + if not workflow: + raise HTTPException( + status_code=400, + detail="entryPointId requires a saved workflow (workflowId must refer to a stored workflow)", + ) + inv = find_invocation(workflow, entry_point_id) + if not inv: + raise HTTPException(status_code=400, detail="entryPointId not found on workflow") + if not inv.get("enabled", True): + raise HTTPException(status_code=400, detail="entry point is disabled") + kind = inv.get("kind", "manual") + trig_map = { + "manual": "manual", + "form": "form", + "schedule": "schedule", + "always_on": "event", + "email": "email", + "webhook": "webhook", + "api": "api", + "event": "event", + } + trig = trig_map.get(kind, "manual") + title = inv.get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + base = default_run_envelope( + trig, + entry_point_id=inv.get("id"), + entry_point_label=label or None, + ) + pl = body.get("payload") + if isinstance(pl, dict): + base = merge_run_envelope(base, {"payload": pl}) + return normalize_run_envelope(base, user_id=user_id) + + env = normalize_run_envelope(None, user_id=user_id) + pl = body.get("payload") + if isinstance(pl, dict): + env = merge_run_envelope(env, {"payload": pl}) + return env + router = APIRouter( prefix="/api/automation2", tags=["Automation2"], @@ -55,6 +123,26 @@ def get_automation2_info( } +@router.post("/{instanceId}/schedule-sync") +@limiter.limit("10/minute") +def post_schedule_sync( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Manually trigger schedule sync (re-register cron jobs for all schedule workflows).""" + _validateInstanceAccess(instanceId, context) + from modules.interfaces.interfaceDbApp import getRootInterface + from modules.workflows.automation2.subAutomation2Schedule import sync_automation2_schedule_events + + root = getRootInterface() + event_user = root.getUserByUsername("event") + if not event_user: + return {"success": False, "error": "Event user not available", "synced": 0} + result = sync_automation2_schedule_events(event_user) + return {"success": True, **result} + + @router.get("/{instanceId}/node-types") @limiter.limit("60/minute") def get_node_types( @@ -109,6 +197,10 @@ async def post_execute( graph = body.get("graph") or body workflowId = body.get("workflowId") req_nodes = graph.get("nodes") or [] + workflow_for_envelope: Optional[Dict[str, Any]] = None + if workflowId and not str(workflowId).startswith("transient-"): + a2_pre = getAutomation2Interface(context.user, mandateId, instanceId) + workflow_for_envelope = a2_pre.getWorkflow(workflowId) # When workflowId is set: prefer graph from request (current editor state) if it has nodes. # Only fall back to stored workflow graph when request graph is empty (e.g. resume from email). if workflowId and len(req_nodes) == 0: @@ -117,6 +209,7 @@ async def post_execute( if wf and wf.get("graph"): graph = wf["graph"] logger.info("automation2 execute: loaded graph from workflow %s", workflowId) + workflow_for_envelope = wf # Use transient workflowId when none provided (e.g. execute from editor without save) # Required for email.checkEmail pause/resume - run must be created if not workflowId: @@ -132,6 +225,8 @@ async def post_execute( workflowId, mandateId, ) + run_env = _build_execute_run_envelope(body, workflow_for_envelope, userId) + a2_interface = getAutomation2Interface(context.user, mandateId, instanceId) result = await executeGraph( graph=graph, @@ -141,6 +236,7 @@ async def post_execute( userId=userId, mandateId=mandateId, automation2_interface=a2_interface, + run_envelope=run_env, ) logger.info( "automation2 execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s", @@ -239,6 +335,7 @@ async def list_connection_services( services = provider.getAvailableServices() _serviceLabels = { "sharepoint": "SharePoint", + "clickup": "ClickUp", "outlook": "Outlook", "teams": "Teams", "onedrive": "OneDrive", @@ -248,6 +345,7 @@ async def list_connection_services( } _serviceIcons = { "sharepoint": "sharepoint", + "clickup": "folder", "outlook": "mail", "teams": "chat", "onedrive": "cloud", @@ -342,15 +440,17 @@ def _get_node_label_from_graph(graph: dict, nodeId: str) -> str: def get_workflows( request: Request, instanceId: str = Path(..., description="Feature instance ID"), + active: Optional[bool] = Query(None, description="Filter by active: true|false"), context: RequestContext = Depends(getRequestContext), ) -> dict: """List all workflows for this feature instance. Enriches each workflow with runCount, isRunning, stuckAtNodeId, stuckAtNodeLabel, createdAt, lastStartedAt. + Query param active: filter by active status (true|false). """ mandateId = _validateInstanceAccess(instanceId, context) a2 = getAutomation2Interface(context.user, mandateId, instanceId) - items = a2.getWorkflows() + items = a2.getWorkflows(active=active) enriched = [] for wf in items: wf_id = wf.get("id") @@ -359,7 +459,7 @@ def get_workflows( active_run = None last_started_at = None for r in runs: - ts = r.get("_createdAt") + ts = r.get("sysCreatedAt") if ts and (last_started_at is None or ts > last_started_at): last_started_at = ts if r.get("status") in ("running", "paused"): @@ -375,7 +475,7 @@ def get_workflows( "runStatus": active_run.get("status") if active_run else None, "stuckAtNodeId": stuck_at_node_id, "stuckAtNodeLabel": stuck_at_node_label or stuck_at_node_id or "", - "createdAt": wf.get("_createdAt"), + "createdAt": wf.get("sysCreatedAt"), "lastStartedAt": last_started_at, }) return {"workflows": enriched} @@ -447,11 +547,163 @@ def delete_workflow( return {"success": True} +@router.post("/{instanceId}/workflows/{workflowId}/webhooks/{entryPointId}") +@limiter.limit("60/minute") +async def post_workflow_webhook( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + workflowId: str = Path(..., description="Workflow ID"), + entryPointId: str = Path(..., description="Entry point ID (kind must be webhook)"), + body: dict = Body(default_factory=dict), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """ + Invoke a workflow via a webhook entry point. Optional shared secret in + X-Automation2-Webhook-Secret or X-Webhook-Secret when config.webhookSecret is set. + """ + mandateId = _validateInstanceAccess(instanceId, context) + userId = str(context.user.id) if context.user else None + a2 = getAutomation2Interface(context.user, mandateId, instanceId) + wf = a2.getWorkflow(workflowId) + if not wf or not wf.get("graph"): + raise HTTPException(status_code=404, detail="Workflow not found") + inv = find_invocation(wf, entryPointId) + if not inv: + raise HTTPException(status_code=404, detail="Entry point not found") + if inv.get("kind") != "webhook": + raise HTTPException(status_code=400, detail="Entry point is not a webhook") + if not inv.get("enabled", True): + raise HTTPException(status_code=400, detail="Entry point is disabled") + cfg = inv.get("config") or {} + secret = cfg.get("webhookSecret") + if secret: + hdr = request.headers.get("X-Automation2-Webhook-Secret") or request.headers.get( + "X-Webhook-Secret" + ) + if hdr != str(secret): + raise HTTPException(status_code=403, detail="Invalid webhook secret") + + services = getAutomation2Services( + context.user, + mandateId=mandateId, + featureInstanceId=instanceId, + ) + from modules.workflows.processing.shared.methodDiscovery import discoverMethods + + discoverMethods(services) + + title = inv.get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + pl = body if isinstance(body, dict) else {} + base = default_run_envelope( + "webhook", + entry_point_id=inv.get("id"), + entry_point_label=label or None, + payload=pl, + raw={"httpBody": body}, + ) + run_env = normalize_run_envelope(base, user_id=userId) + + result = await executeGraph( + graph=wf["graph"], + services=services, + workflowId=workflowId, + instanceId=instanceId, + userId=userId, + mandateId=mandateId, + automation2_interface=a2, + run_envelope=run_env, + ) + return result + + +@router.post("/{instanceId}/workflows/{workflowId}/forms/{entryPointId}/submit") +@limiter.limit("60/minute") +async def post_workflow_form_submit( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + workflowId: str = Path(..., description="Workflow ID"), + entryPointId: str = Path(..., description="Entry point ID (kind must be form)"), + body: dict = Body(default_factory=dict), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Form-style submit: same as execute with trigger.type form and payload from body.""" + mandateId = _validateInstanceAccess(instanceId, context) + userId = str(context.user.id) if context.user else None + a2 = getAutomation2Interface(context.user, mandateId, instanceId) + wf = a2.getWorkflow(workflowId) + if not wf or not wf.get("graph"): + raise HTTPException(status_code=404, detail="Workflow not found") + inv = find_invocation(wf, entryPointId) + if not inv: + raise HTTPException(status_code=404, detail="Entry point not found") + if inv.get("kind") != "form": + raise HTTPException(status_code=400, detail="Entry point is not a form") + if not inv.get("enabled", True): + raise HTTPException(status_code=400, detail="Entry point is disabled") + + services = getAutomation2Services( + context.user, + mandateId=mandateId, + featureInstanceId=instanceId, + ) + from modules.workflows.processing.shared.methodDiscovery import discoverMethods + + discoverMethods(services) + + title = inv.get("title") or {} + label = "" + if isinstance(title, dict): + label = title.get("en") or title.get("de") or "" + elif isinstance(title, str): + label = title + pl = body if isinstance(body, dict) else {} + base = default_run_envelope( + "form", + entry_point_id=inv.get("id"), + entry_point_label=label or None, + payload=pl, + raw={"formBody": body}, + ) + run_env = normalize_run_envelope(base, user_id=userId) + + result = await executeGraph( + graph=wf["graph"], + services=services, + workflowId=workflowId, + instanceId=instanceId, + userId=userId, + mandateId=mandateId, + automation2_interface=a2, + run_envelope=run_env, + ) + return result + + # ------------------------------------------------------------------------- # Runs and Resume # ------------------------------------------------------------------------- +@router.get("/{instanceId}/runs/completed") +@limiter.limit("60/minute") +def get_completed_runs( + request: Request, + instanceId: str = Path(..., description="Feature instance ID"), + limit: int = Query(20, ge=1, le=50), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Get recently completed runs with output (for Tasks page output section).""" + mandateId = _validateInstanceAccess(instanceId, context) + a2 = getAutomation2Interface(context.user, mandateId, instanceId) + runs = a2.getRecentCompletedRuns(limit=limit) + return {"runs": runs} + + @router.get("/{instanceId}/workflows/{workflowId}/runs") @limiter.limit("60/minute") def get_workflow_runs( @@ -536,7 +788,7 @@ def get_tasks( context: RequestContext = Depends(getRequestContext), ) -> dict: """Get tasks - by default those assigned to current user, or all if no assignee filter. - Enriches each task with workflowLabel and createdAt (_createdAt). + Enriches each task with workflowLabel and createdAt (from sysCreatedAt). """ mandateId = _validateInstanceAccess(instanceId, context) a2 = getAutomation2Interface(context.user, mandateId, instanceId) @@ -549,7 +801,7 @@ def get_tasks( enriched.append({ **t, "workflowLabel": wf.get("label", t.get("workflowId", "")) if wf else t.get("workflowId", ""), - "createdAt": t.get("_createdAt"), + "createdAt": t.get("sysCreatedAt"), }) return {"tasks": enriched} diff --git a/modules/features/chatbot/interfaceFeatureChatbot.py b/modules/features/chatbot/interfaceFeatureChatbot.py index 4a03bec9..151a96ce 100644 --- a/modules/features/chatbot/interfaceFeatureChatbot.py +++ b/modules/features/chatbot/interfaceFeatureChatbot.py @@ -20,6 +20,7 @@ from modules.datamodels.datamodelRbac import AccessRuleContext from modules.datamodels.datamodelUam import AccessLevel from modules.datamodels.datamodelChat import UserInputRequest +from modules.datamodels.datamodelBase import PowerOnModel from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp # ============================================================================= @@ -27,7 +28,7 @@ from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp # ============================================================================= -class ChatbotDocument(BaseModel): +class ChatbotDocument(PowerOnModel): """Documents attached to chatbot messages.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") messageId: str = Field(description="Foreign key to message") @@ -41,7 +42,7 @@ class ChatbotDocument(BaseModel): actionId: Optional[str] = Field(None, description="ID of the action that created this document") -class ChatbotMessage(BaseModel): +class ChatbotMessage(PowerOnModel): """Messages in chatbot conversations. Must match bridge format in memory.py.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") conversationId: str = Field(description="Foreign key to conversation") @@ -64,7 +65,7 @@ class ChatbotMessage(BaseModel): actionProgress: Optional[str] = Field(None, description="Action progress status") -class ChatbotLog(BaseModel): +class ChatbotLog(PowerOnModel): """Log entries for chatbot conversations.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") conversationId: str = Field(description="Foreign key to conversation") @@ -85,7 +86,7 @@ class ChatbotWorkflowModeEnum(str, Enum): WORKFLOW_CHATBOT = "Chatbot" -class ChatbotConversation(BaseModel): +class ChatbotConversation(PowerOnModel): """Chatbot conversation container. Per feature-instance isolation via featureInstanceId.""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key") featureInstanceId: str = Field(description="Feature instance ID for per-instance isolation") @@ -328,9 +329,8 @@ class ChatObjects: objectFields[fieldName] = value else: # Field not in model - treat as scalar if simple, otherwise filter out - # BUT: always include metadata fields (_createdBy, _createdAt, etc.) as they're handled by connector + # Underscore-prefixed keys (e.g. UI meta) pass through; sys* live on PowerOnModel subclasses if fieldName.startswith("_"): - # Metadata fields should be passed through to connector simpleFields[fieldName] = value elif isinstance(value, (str, int, float, bool, type(None))): simpleFields[fieldName] = value diff --git a/modules/features/chatbot/service.py b/modules/features/chatbot/service.py index 121ca29b..a98150b5 100644 --- a/modules/features/chatbot/service.py +++ b/modules/features/chatbot/service.py @@ -1222,23 +1222,21 @@ def _preflight_billing_check(services, mandateId: str, featureInstanceId: Option balanceCheck = billingService.checkBalance(0.01) if not balanceCheck.allowed: mid = str(getattr(services, "mandateId", None) or mandateId or "") - from modules.datamodels.datamodelBilling import BillingModelEnum from modules.serviceCenter.services.serviceBilling.billingExhaustedNotify import ( maybeEmailMandatePoolExhausted, ) - if balanceCheck.billingModel == BillingModelEnum.PREPAY_MANDATE: - u = getattr(services, "user", None) - ulabel = ( - (getattr(u, "email", None) or getattr(u, "username", None) or str(getattr(u, "id", ""))) - if u is not None else "" - ) - maybeEmailMandatePoolExhausted( - mid, - str(getattr(u, "id", "") if u is not None else ""), - ulabel, - float(balanceCheck.currentBalance or 0.0), - 0.01, - ) + u = getattr(services, "user", None) + ulabel = ( + (getattr(u, "email", None) or getattr(u, "username", None) or str(getattr(u, "id", ""))) + if u is not None else "" + ) + maybeEmailMandatePoolExhausted( + mid, + str(getattr(u, "id", "") if u is not None else ""), + ulabel, + float(balanceCheck.currentBalance or 0.0), + 0.01, + ) raise BillingService.InsufficientBalanceException.fromBalanceCheck( balanceCheck, mid, diff --git a/modules/features/commcoach/datamodelCommcoach.py b/modules/features/commcoach/datamodelCommcoach.py index 090640c6..82be6044 100644 --- a/modules/features/commcoach/datamodelCommcoach.py +++ b/modules/features/commcoach/datamodelCommcoach.py @@ -7,6 +7,8 @@ Pydantic models for coaching contexts, sessions, messages, tasks, scores, and us from typing import Optional, List, Dict, Any from pydantic import BaseModel, Field from enum import Enum + +from modules.datamodels.datamodelBase import PowerOnModel import uuid @@ -73,7 +75,7 @@ class CoachingScoreTrend(str, Enum): # Database Models # ============================================================================ -class CoachingContext(BaseModel): +class CoachingContext(PowerOnModel): """A coaching context/dossier representing a topic the user is working on.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID (strict ownership)") @@ -91,11 +93,9 @@ class CoachingContext(BaseModel): lastSessionAt: Optional[str] = Field(default=None) rollingOverview: Optional[str] = Field(default=None, description="AI summary of older sessions for long context history") rollingOverviewUpToSessionCount: Optional[int] = Field(default=None, description="Session count covered by rollingOverview") - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) -class CoachingSession(BaseModel): +class CoachingSession(PowerOnModel): """A single coaching conversation session within a context.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) contextId: str = Field(description="FK to CoachingContext") @@ -115,11 +115,9 @@ class CoachingSession(BaseModel): emailSent: bool = Field(default=False) startedAt: Optional[str] = Field(default=None) endedAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) -class CoachingMessage(BaseModel): +class CoachingMessage(PowerOnModel): """A single message in a coaching session.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) sessionId: str = Field(description="FK to CoachingSession") @@ -130,10 +128,9 @@ class CoachingMessage(BaseModel): contentType: CoachingMessageContentType = Field(default=CoachingMessageContentType.TEXT) audioRef: Optional[str] = Field(default=None, description="Reference to audio file") metadata: Optional[str] = Field(default=None, description="JSON: token count, voice info, etc.") - createdAt: Optional[str] = Field(default=None) -class CoachingTask(BaseModel): +class CoachingTask(PowerOnModel): """A task/checklist item assigned within a coaching context.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) contextId: str = Field(description="FK to CoachingContext") @@ -146,11 +143,9 @@ class CoachingTask(BaseModel): priority: CoachingTaskPriority = Field(default=CoachingTaskPriority.MEDIUM) dueDate: Optional[str] = Field(default=None) completedAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) -class CoachingScore(BaseModel): +class CoachingScore(PowerOnModel): """A competence score for a dimension, recorded after a session.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) contextId: str = Field(description="FK to CoachingContext") @@ -161,17 +156,14 @@ class CoachingScore(BaseModel): score: float = Field(ge=0.0, le=100.0) trend: CoachingScoreTrend = Field(default=CoachingScoreTrend.STABLE) evidence: Optional[str] = Field(default=None, description="AI reasoning for the score") - createdAt: Optional[str] = Field(default=None) -class CoachingUserProfile(BaseModel): +class CoachingUserProfile(PowerOnModel): """Per-user coaching profile and preferences.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID") mandateId: str = Field(description="Mandate ID") instanceId: str = Field(description="Feature instance ID") - preferredLanguage: str = Field(default="de-DE") - preferredVoice: Optional[str] = Field(default=None, description="Google TTS voice name") dailyReminderTime: Optional[str] = Field(default=None, description="HH:MM format") dailyReminderEnabled: bool = Field(default=False) emailSummaryEnabled: bool = Field(default=True) @@ -180,15 +172,13 @@ class CoachingUserProfile(BaseModel): totalSessions: int = Field(default=0) totalMinutes: int = Field(default=0) lastSessionAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) # ============================================================================ # Iteration 2: Personas # ============================================================================ -class CoachingPersona(BaseModel): +class CoachingPersona(PowerOnModel): """A roleplay persona for coaching sessions.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID ('system' for builtins)") @@ -201,35 +191,13 @@ class CoachingPersona(BaseModel): gender: Optional[str] = Field(default=None, description="m or f") category: str = Field(default="builtin", description="'builtin' or 'custom'") isActive: bool = Field(default=True) - createdAt: Optional[str] = Field(default=None) - updatedAt: Optional[str] = Field(default=None) - - -# ============================================================================ -# Iteration 2: Documents -# ============================================================================ - -class CoachingDocument(BaseModel): - """A document attached to a coaching context.""" - id: str = Field(default_factory=lambda: str(uuid.uuid4())) - contextId: str = Field(description="FK to CoachingContext") - userId: str = Field(description="Owner user ID") - mandateId: str = Field(description="Mandate ID") - instanceId: Optional[str] = Field(default=None) - fileName: str = Field(description="Original file name") - mimeType: str = Field(default="application/octet-stream") - fileSize: int = Field(default=0) - extractedText: Optional[str] = Field(default=None, description="Text content extracted from file") - summary: Optional[str] = Field(default=None, description="AI-generated summary") - fileRef: Optional[str] = Field(default=None, description="Reference to file in storage") - createdAt: Optional[str] = Field(default=None) # ============================================================================ # Iteration 2: Badges / Gamification # ============================================================================ -class CoachingBadge(BaseModel): +class CoachingBadge(PowerOnModel): """An achievement badge awarded to a user.""" id: str = Field(default_factory=lambda: str(uuid.uuid4())) userId: str = Field(description="Owner user ID") @@ -237,7 +205,6 @@ class CoachingBadge(BaseModel): instanceId: str = Field(description="Feature instance ID") badgeKey: str = Field(description="Badge identifier, e.g. 'streak_7'") awardedAt: Optional[str] = Field(default=None) - createdAt: Optional[str] = Field(default=None) # ============================================================================ @@ -261,6 +228,10 @@ class UpdateContextRequest(BaseModel): class SendMessageRequest(BaseModel): content: str = Field(description="User message text") contentType: Optional[CoachingMessageContentType] = CoachingMessageContentType.TEXT + fileIds: Optional[List[str]] = Field(default=None, description="Attached file IDs for agent context") + dataSourceIds: Optional[List[str]] = Field(default=None, description="Personal data source IDs") + featureDataSourceIds: Optional[List[str]] = Field(default=None, description="Feature data source IDs") + allowedProviders: Optional[List[str]] = Field(default=None, description="Allowed AI providers") class CreateTaskRequest(BaseModel): @@ -282,8 +253,6 @@ class UpdateTaskStatusRequest(BaseModel): class UpdateProfileRequest(BaseModel): - preferredLanguage: Optional[str] = None - preferredVoice: Optional[str] = None dailyReminderTime: Optional[str] = None dailyReminderEnabled: Optional[bool] = None emailSummaryEnabled: Optional[bool] = None diff --git a/modules/features/commcoach/interfaceFeatureCommcoach.py b/modules/features/commcoach/interfaceFeatureCommcoach.py index e612c6ba..825fca5d 100644 --- a/modules/features/commcoach/interfaceFeatureCommcoach.py +++ b/modules/features/commcoach/interfaceFeatureCommcoach.py @@ -269,34 +269,6 @@ class CommcoachObjects: from .datamodelCommcoach import CoachingPersona return self.db.recordDelete(CoachingPersona, personaId) - # ========================================================================= - # Documents - # ========================================================================= - - def getDocuments(self, contextId: str, userId: str) -> List[Dict[str, Any]]: - from .datamodelCommcoach import CoachingDocument - records = self.db.getRecordset(CoachingDocument, recordFilter={"contextId": contextId, "userId": userId}) - records.sort(key=lambda r: r.get("createdAt") or "", reverse=True) - return records - - def getDocument(self, documentId: str) -> Optional[Dict[str, Any]]: - from .datamodelCommcoach import CoachingDocument - records = self.db.getRecordset(CoachingDocument, recordFilter={"id": documentId}) - return records[0] if records else None - - def createDocument(self, data: Dict[str, Any]) -> Dict[str, Any]: - from .datamodelCommcoach import CoachingDocument - data["createdAt"] = getIsoTimestamp() - return self.db.recordCreate(CoachingDocument, data) - - def updateDocument(self, documentId: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: - from .datamodelCommcoach import CoachingDocument - return self.db.recordModify(CoachingDocument, documentId, updates) - - def deleteDocument(self, documentId: str) -> bool: - from .datamodelCommcoach import CoachingDocument - return self.db.recordDelete(CoachingDocument, documentId) - # ========================================================================= # Badges # ========================================================================= diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py index 69ac6b1c..d21da056 100644 --- a/modules/features/commcoach/mainCommcoach.py +++ b/modules/features/commcoach/mainCommcoach.py @@ -36,12 +36,22 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.commcoach.CoachingContext", "label": {"en": "Coaching Context", "de": "Coaching-Kontext", "fr": "Contexte coaching"}, - "meta": {"table": "CoachingContext", "fields": ["id", "title", "category", "status"]} + "meta": { + "table": "CoachingContext", + "fields": ["id", "title", "category", "status"], + "isParent": True, + "displayFields": ["title", "category", "status"], + } }, { "objectKey": "data.feature.commcoach.CoachingSession", "label": {"en": "Coaching Session", "de": "Coaching-Session", "fr": "Session coaching"}, - "meta": {"table": "CoachingSession", "fields": ["id", "contextId", "status", "summary"]} + "meta": { + "table": "CoachingSession", + "fields": ["id", "contextId", "status", "summary"], + "parentTable": "CoachingContext", + "parentKey": "contextId", + } }, { "objectKey": "data.feature.commcoach.CoachingMessage", @@ -51,7 +61,12 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.commcoach.CoachingTask", "label": {"en": "Coaching Task", "de": "Coaching-Aufgabe", "fr": "Tache coaching"}, - "meta": {"table": "CoachingTask", "fields": ["id", "contextId", "title", "status"]} + "meta": { + "table": "CoachingTask", + "fields": ["id", "contextId", "title", "status"], + "parentTable": "CoachingContext", + "parentKey": "contextId", + } }, { "objectKey": "data.feature.commcoach.CoachingScore", @@ -61,18 +76,13 @@ DATA_OBJECTS = [ { "objectKey": "data.feature.commcoach.CoachingUserProfile", "label": {"en": "User Profile", "de": "Benutzerprofil", "fr": "Profil utilisateur"}, - "meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "preferredLanguage"]} + "meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "dailyReminderEnabled"]} }, { "objectKey": "data.feature.commcoach.CoachingPersona", "label": {"en": "Coaching Persona", "de": "Coaching-Persona", "fr": "Persona coaching"}, "meta": {"table": "CoachingPersona", "fields": ["id", "key", "label", "gender"]} }, - { - "objectKey": "data.feature.commcoach.CoachingDocument", - "label": {"en": "Coaching Document", "de": "Coaching-Dokument", "fr": "Document coaching"}, - "meta": {"table": "CoachingDocument", "fields": ["id", "contextId", "fileName"]} - }, { "objectKey": "data.feature.commcoach.CoachingBadge", "label": {"en": "Coaching Badge", "de": "Coaching-Auszeichnung", "fr": "Badge coaching"}, @@ -114,12 +124,27 @@ RESOURCE_OBJECTS = [ ] TEMPLATE_ROLES = [ + { + "roleLabel": "commcoach-viewer", + "description": { + "en": "Communication Coach Viewer - View coaching data (read-only)", + "de": "Kommunikations-Coach Betrachter - Coaching-Daten ansehen (nur lesen)", + "fr": "Visualiseur Coach Communication - Consulter les donnees coaching (lecture seule)", + }, + "accessRules": [ + {"context": "UI", "item": "ui.feature.commcoach.dashboard", "view": True}, + {"context": "UI", "item": "ui.feature.commcoach.coaching", "view": True}, + {"context": "UI", "item": "ui.feature.commcoach.dossier", "view": True}, + {"context": "UI", "item": "ui.feature.commcoach.settings", "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"}, + ], + }, { "roleLabel": "commcoach-user", "description": { "en": "Communication Coach User - Can manage own coaching contexts and sessions", "de": "Kommunikations-Coach Benutzer - Kann eigene Coaching-Kontexte und Sessions verwalten", - "fr": "Utilisateur Coach Communication - Peut gerer ses propres contextes et sessions" + "fr": "Utilisateur Coach Communication - Peut gerer ses propres contextes et sessions", }, "accessRules": [ {"context": "UI", "item": "ui.feature.commcoach.dashboard", "view": True}, @@ -137,7 +162,20 @@ TEMPLATE_ROLES = [ {"context": "RESOURCE", "item": "resource.feature.commcoach.session.start", "view": True}, {"context": "RESOURCE", "item": "resource.feature.commcoach.session.complete", "view": True}, {"context": "RESOURCE", "item": "resource.feature.commcoach.task.manage", "view": True}, - ] + ], + }, + { + "roleLabel": "commcoach-admin", + "description": { + "en": "Communication Coach Admin - All UI and API actions; data scoped to own records", + "de": "Kommunikations-Coach Admin - Alle UI- und API-Aktionen; Daten nur eigene Datensaetze", + "fr": "Administrateur Coach Communication - Toute l'UI et les API; donnees propres", + }, + "accessRules": [ + {"context": "UI", "item": None, "view": True}, + {"context": "RESOURCE", "item": None, "view": True}, + {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"}, + ], }, ] @@ -147,7 +185,7 @@ def getFeatureDefinition() -> Dict[str, Any]: "code": FEATURE_CODE, "label": FEATURE_LABEL, "icon": FEATURE_ICON, - "autoCreateInstance": True, + "autoCreateInstance": False, } diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py index 9074d2ba..8ffd3eca 100644 --- a/modules/features/commcoach/routeFeatureCommcoach.py +++ b/modules/features/commcoach/routeFeatureCommcoach.py @@ -2,7 +2,7 @@ # All rights reserved. """ CommCoach routes for the backend API. -Implements coaching context management, session streaming, tasks, dashboard, and voice endpoints. +Implements coaching context management, session streaming, tasks, and dashboard. """ import logging @@ -26,7 +26,7 @@ from .datamodelCommcoach import ( CoachingContext, CoachingContextStatus, CoachingSession, CoachingSessionStatus, CoachingMessage, CoachingMessageRole, CoachingMessageContentType, CoachingTask, CoachingTaskStatus, - CoachingPersona, CoachingDocument, CoachingBadge, + CoachingPersona, CoachingBadge, CreateContextRequest, UpdateContextRequest, SendMessageRequest, CreateTaskRequest, UpdateTaskRequest, UpdateTaskStatusRequest, UpdateProfileRequest, @@ -334,10 +334,8 @@ async def startSession( try: from modules.interfaces.interfaceVoiceObjects import getVoiceInterface voiceInterface = getVoiceInterface(context.user, mandateId) - profile = interface.getProfile(userId, instanceId) - language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE" - voiceName = profile.get("preferredVoice") if profile else None - from .serviceCommcoach import _stripMarkdownForTts + from .serviceCommcoach import _getUserVoicePrefs, _stripMarkdownForTts, _buildTtsConfigErrorMessage + language, voiceName = _getUserVoicePrefs(userId, mandateId) ttsResult = await voiceInterface.textToSpeech( text=_stripMarkdownForTts(greetingText), languageCode=language, @@ -350,8 +348,12 @@ async def startSession( audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode() ).decode() yield f"data: {json.dumps({'type': 'ttsAudio', 'data': {'audio': audioB64, 'format': 'mp3'}})}\n\n" + else: + errorDetail = ttsResult.get("error", "Text-to-Speech failed") + yield f"data: {json.dumps({'type': 'error', 'data': {'message': _buildTtsConfigErrorMessage(language, voiceName, errorDetail), 'detail': errorDetail, 'ttsLanguage': language, 'ttsVoice': voiceName}})}\n\n" except Exception as e: logger.warning(f"TTS failed for resumed session: {e}") + yield f"data: {json.dumps({'type': 'error', 'data': {'message': 'Die konfigurierte Stimme für diese Sprache ist ungültig oder nicht verfügbar. Bitte passe sie unter Einstellungen > Stimme & Sprache an.', 'detail': str(e)}})}\n\n" yield f"data: {json.dumps({'type': 'complete', 'data': {}, 'timestamp': getIsoTimestamp()})}\n\n" return StreamingResponse( @@ -512,7 +514,13 @@ async def sendMessageStream( _activeProcessTasks.pop(sessionId, None) task = asyncio.create_task( - service.processMessage(sessionId, contextId, body.content, interface) + service.processMessage( + sessionId, contextId, body.content, interface, + fileIds=body.fileIds, + dataSourceIds=body.dataSourceIds, + featureDataSourceIds=body.featureDataSourceIds, + allowedProviders=body.allowedProviders, + ) ) task.add_done_callback(_onTaskDone) _activeProcessTasks[sessionId] = task @@ -574,8 +582,8 @@ async def sendAudioStream( if not audioBody: raise HTTPException(status_code=400, detail="No audio data received") - profile = interface.getProfile(str(context.user.id), instanceId) - language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE" + from .serviceCommcoach import _getUserVoicePrefs + language, _ = _getUserVoicePrefs(str(context.user.id), mandateId) contextId = session.get("contextId") service = CommcoachService(context.user, mandateId, instanceId) @@ -839,73 +847,6 @@ async def updateProfile( return {"profile": updated} -# ========================================================================= -# Voice Endpoints -# ========================================================================= - -@router.get("/{instanceId}/voice/languages") -@limiter.limit("30/minute") -async def getVoiceLanguages( - request: Request, - instanceId: str, - context: RequestContext = Depends(getRequestContext), -): - mandateId = _validateInstanceAccess(instanceId, context) - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - languagesResult = await voiceInterface.getAvailableLanguages() - languageList = languagesResult.get("languages", []) if isinstance(languagesResult, dict) else languagesResult - return {"languages": languageList} - - -@router.get("/{instanceId}/voice/voices") -@limiter.limit("30/minute") -async def getVoiceVoices( - request: Request, - instanceId: str, - language: str = "de-DE", - context: RequestContext = Depends(getRequestContext), -): - mandateId = _validateInstanceAccess(instanceId, context) - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - voicesResult = await voiceInterface.getAvailableVoices(language) - voiceList = voicesResult.get("voices", []) if isinstance(voicesResult, dict) else voicesResult - return {"voices": voiceList} - - -@router.post("/{instanceId}/voice/tts") -@limiter.limit("10/minute") -async def testVoice( - request: Request, - instanceId: str, - context: RequestContext = Depends(getRequestContext), -): - """TTS preview / voice test.""" - mandateId = _validateInstanceAccess(instanceId, context) - body = await request.json() - text = body.get("text", "Hallo, ich bin dein Coaching-Assistent.") - language = body.get("language", "de-DE") - voiceId = body.get("voiceId") - - from modules.interfaces.interfaceVoiceObjects import getVoiceInterface - voiceInterface = getVoiceInterface(context.user, mandateId) - - try: - result = await voiceInterface.textToSpeech(text=text, languageCode=language, voiceName=voiceId) - if result and isinstance(result, dict): - audioContent = result.get("audioContent") - if audioContent: - audioB64 = base64.b64encode( - audioContent if isinstance(audioContent, bytes) else audioContent.encode() - ).decode() - return {"success": True, "audio": audioB64, "format": "mp3", "text": text} - return {"success": False, "error": "TTS returned no audio"} - except Exception as e: - logger.error(f"Voice test failed: {e}") - raise HTTPException(status_code=500, detail=f"TTS test failed: {str(e)}") - - # ========================================================================= # Export Endpoints (Iteration 2) # ========================================================================= @@ -1074,202 +1015,6 @@ async def deletePersonaRoute( return {"deleted": True} -# ========================================================================= -# Document Endpoints (Iteration 2) -# ========================================================================= - -@router.get("/{instanceId}/contexts/{contextId}/documents") -@limiter.limit("60/minute") -async def listDocuments( - request: Request, - instanceId: str, - contextId: str, - context: RequestContext = Depends(getRequestContext), -): - _validateInstanceAccess(instanceId, context) - interface = _getInterface(context, instanceId) - userId = str(context.user.id) - docs = interface.getDocuments(contextId, userId) - return {"documents": docs} - - -@router.post("/{instanceId}/contexts/{contextId}/documents") -@limiter.limit("10/minute") -async def uploadDocument( - request: Request, - instanceId: str, - contextId: str, - context: RequestContext = Depends(getRequestContext), -): - """Upload a document and bind it to a context. Stores file in Management DB.""" - mandateId = _validateInstanceAccess(instanceId, context) - interface = _getInterface(context, instanceId) - userId = str(context.user.id) - - ctx = interface.getContext(contextId) - if not ctx: - raise HTTPException(status_code=404, detail="Context not found") - _validateOwnership(ctx, context) - - form = await request.form() - file = form.get("file") - if not file or not hasattr(file, "read"): - raise HTTPException(status_code=400, detail="No file uploaded") - - content = await file.read() - fileName = getattr(file, "filename", "document") - mimeType = getattr(file, "content_type", "application/octet-stream") - fileSize = len(content) - - if not content: - raise HTTPException(status_code=400, detail="Leere Datei hochgeladen") - - import modules.interfaces.interfaceDbManagement as interfaceDbManagement - mgmtInterface = interfaceDbManagement.getInterface(currentUser=context.user) - fileItem, _dupType = mgmtInterface.saveUploadedFile(content, fileName) - fileRef = fileItem.id - - extractedText = _extractText(content, mimeType, fileName) - summary = None - if extractedText and len(extractedText.strip()) > 50: - try: - from .serviceCommcoach import CommcoachService - service = CommcoachService(context.user, mandateId, instanceId) - aiResp = await service._callAi( - "Du fasst Dokumente in 2-3 Saetzen zusammen.", - f"Fasse folgendes Dokument zusammen:\n\n{extractedText[:3000]}" - ) - if aiResp and aiResp.errorCount == 0 and aiResp.content: - summary = aiResp.content.strip() - except Exception as e: - logger.warning(f"Document summary failed: {e}") - - docData = CoachingDocument( - contextId=contextId, - userId=userId, - mandateId=mandateId, - instanceId=instanceId, - fileName=fileName, - mimeType=mimeType, - fileSize=fileSize, - extractedText=extractedText[:10000] if extractedText else None, - summary=summary, - fileRef=fileRef, - ).model_dump() - created = interface.createDocument(docData) - return {"document": created} - - -@router.delete("/{instanceId}/documents/{documentId}") -@limiter.limit("10/minute") -async def deleteDocumentRoute( - request: Request, - instanceId: str, - documentId: str, - context: RequestContext = Depends(getRequestContext), -): - mandateId = _validateInstanceAccess(instanceId, context) - interface = _getInterface(context, instanceId) - - doc = interface.getDocument(documentId) - if not doc: - raise HTTPException(status_code=404, detail="Document not found") - _validateOwnership(doc, context) - - fileRef = doc.get("fileRef") - if fileRef: - try: - import modules.interfaces.interfaceDbManagement as interfaceDbManagement - mgmtInterface = interfaceDbManagement.getInterface( - currentUser=context.user, mandateId=mandateId, featureInstanceId=instanceId - ) - mgmtInterface.deleteFile(fileRef) - except Exception as e: - logger.warning(f"Failed to delete file {fileRef}: {e}") - - interface.deleteDocument(documentId) - return {"deleted": True} - - -def _extractText(content: bytes, mimeType: str, fileName: str) -> Optional[str]: - """Extract text from uploaded file content (TXT, MD, HTML, PDF, DOCX, XLSX, PPTX).""" - import io - - lowerName = fileName.lower() - try: - if mimeType in ("text/plain",) or lowerName.endswith(".txt"): - return content.decode("utf-8", errors="replace") - - if mimeType in ("text/markdown",) or lowerName.endswith(".md"): - return content.decode("utf-8", errors="replace") - - if mimeType in ("text/html",) or lowerName.endswith((".html", ".htm")): - from html.parser import HTMLParser - class _Strip(HTMLParser): - def __init__(self): - super().__init__() - self._parts: list[str] = [] - def handle_data(self, d): - self._parts.append(d) - def result(self): - return " ".join(self._parts) - parser = _Strip() - parser.feed(content.decode("utf-8", errors="replace")) - return parser.result() - - if "pdf" in mimeType or lowerName.endswith(".pdf"): - try: - from PyPDF2 import PdfReader - reader = PdfReader(io.BytesIO(content)) - return "".join(page.extract_text() or "" for page in reader.pages) - except ImportError: - logger.warning("PyPDF2 not installed, cannot extract PDF text") - return None - - if "wordprocessingml" in mimeType or lowerName.endswith(".docx"): - try: - from docx import Document - doc = Document(io.BytesIO(content)) - return "\n".join(p.text for p in doc.paragraphs if p.text) - except ImportError: - logger.warning("python-docx not installed, cannot extract DOCX text") - return None - - if "spreadsheetml" in mimeType or lowerName.endswith(".xlsx"): - try: - from openpyxl import load_workbook - wb = load_workbook(io.BytesIO(content), read_only=True, data_only=True) - parts: list[str] = [] - for ws in wb.worksheets: - for row in ws.iter_rows(values_only=True): - cells = [str(c) for c in row if c is not None] - if cells: - parts.append("\t".join(cells)) - return "\n".join(parts) - except ImportError: - logger.warning("openpyxl not installed, cannot extract XLSX text") - return None - - if "presentationml" in mimeType or lowerName.endswith(".pptx"): - try: - from pptx import Presentation - prs = Presentation(io.BytesIO(content)) - parts = [] - for slide in prs.slides: - for shape in slide.shapes: - if shape.has_text_frame: - parts.append(shape.text_frame.text) - return "\n".join(parts) - except ImportError: - logger.warning("python-pptx not installed, cannot extract PPTX text") - return None - - logger.info(f"No text extractor for {fileName} (mime={mimeType})") - except Exception as e: - logger.warning(f"Text extraction failed for {fileName}: {e}") - return None - - # ========================================================================= # Badge + Score History Endpoints (Iteration 2) # ========================================================================= diff --git a/modules/features/commcoach/serviceCommcoach.py b/modules/features/commcoach/serviceCommcoach.py index bf5ec281..332a4a01 100644 --- a/modules/features/commcoach/serviceCommcoach.py +++ b/modules/features/commcoach/serviceCommcoach.py @@ -6,6 +6,7 @@ Manages the coaching pipeline: message processing, AI calls, scoring, task extra """ import re +import html import logging import json import asyncio @@ -33,6 +34,7 @@ from .serviceCommcoachContextRetrieval import ( buildSessionSummariesForPrompt, findSessionByDate, searchSessionsByTopic, + searchSessionsByTopicRag, _parseDateFromMessage, PREVIOUS_SESSION_SUMMARIES_COUNT, ROLLING_OVERVIEW_SESSION_THRESHOLD, @@ -42,6 +44,122 @@ from .serviceCommcoachContextRetrieval import ( logger = logging.getLogger(__name__) +def _selectConfiguredVoice( + language: str, + voiceMap: Any, + legacyVoice: Optional[str] = None, + legacyLanguage: Optional[str] = None, +) -> Optional[str]: + """Resolve the configured TTS voice for a language from ttsVoiceMap, then legacy ttsVoice.""" + normalizedLanguage = str(language or "").strip() + normalizedLower = normalizedLanguage.lower() + baseLanguage = normalizedLower.split("-", 1)[0] if normalizedLower else "" + + if isinstance(voiceMap, dict) and voiceMap: + direct = voiceMap.get(normalizedLanguage) + if isinstance(direct, str) and direct.strip(): + return direct.strip() + + directBase = voiceMap.get(baseLanguage) + if isinstance(directBase, str) and directBase.strip(): + return directBase.strip() + + for mapKey, mapValue in voiceMap.items(): + if not isinstance(mapValue, str) or not mapValue.strip(): + continue + keyNorm = str(mapKey or "").strip().lower() + if keyNorm == normalizedLower or keyNorm == baseLanguage or (baseLanguage and keyNorm.startswith(baseLanguage + "-")): + return mapValue.strip() + + if legacyVoice and str(legacyVoice).strip(): + legacyLangNorm = str(legacyLanguage or "").strip().lower() + if not legacyLangNorm or legacyLangNorm == normalizedLower: + return str(legacyVoice).strip() + + return None + + +def _buildTtsConfigErrorMessage(language: str, voiceName: Optional[str], rawError: str = "") -> str: + if voiceName: + return ( + f'Die konfigurierte Stimme "{voiceName}" für {language} ist ungültig oder nicht verfügbar. ' + 'Bitte passe sie unter Einstellungen > Stimme & Sprache an.' + ) + return ( + f'Für die Sprache {language} ist keine gültige TTS-Stimme konfiguriert. ' + 'Bitte prüfe die Einstellungen unter Stimme & Sprache.' + ) + + +def _getUserVoicePrefs(userId: str, mandateId: Optional[str] = None) -> tuple: + """Load voice language and voiceName from central UserVoicePreferences. + Returns (language, voiceName) tuple.""" + try: + from modules.datamodels.datamodelUam import UserVoicePreferences + from modules.interfaces.interfaceDbApp import getRootInterface + rootIf = getRootInterface() + prefs = rootIf.db.getRecordset( + UserVoicePreferences, + recordFilter={"userId": userId} + ) + if prefs: + allPrefs = [ + pref if isinstance(pref, dict) else pref.model_dump() + for pref in prefs + ] + scopedPref = next( + ( + pref for pref in allPrefs + if str(pref.get("mandateId") or "").strip() == str(mandateId or "").strip() + ), + None, + ) + globalPref = next( + ( + pref for pref in allPrefs + if not str(pref.get("mandateId") or "").strip() + ), + None, + ) + + language = ( + (globalPref or {}).get("ttsLanguage") + or (globalPref or {}).get("sttLanguage") + or (scopedPref or {}).get("ttsLanguage") + or (scopedPref or {}).get("sttLanguage") + or "de-DE" + ) + + scopedVoiceFromMap = _selectConfiguredVoice( + language=language, + voiceMap=(scopedPref or {}).get("ttsVoiceMap"), + ) + globalVoice = _selectConfiguredVoice( + language=language, + voiceMap=(globalPref or {}).get("ttsVoiceMap"), + legacyVoice=(globalPref or {}).get("ttsVoice"), + legacyLanguage=(globalPref or {}).get("ttsLanguage"), + ) + scopedLegacyVoice = _selectConfiguredVoice( + language=language, + voiceMap=None, + legacyVoice=(scopedPref or {}).get("ttsVoice"), + legacyLanguage=(scopedPref or {}).get("ttsLanguage"), + ) + anyPref = allPrefs[0] + fallbackVoice = _selectConfiguredVoice( + language=language, + voiceMap=(anyPref or {}).get("ttsVoiceMap"), + legacyVoice=(anyPref or {}).get("ttsVoice"), + legacyLanguage=(anyPref or {}).get("ttsLanguage"), + ) + voiceName = scopedVoiceFromMap or globalVoice or scopedLegacyVoice or fallbackVoice + return (language, voiceName) + except Exception as e: + logger.warning(f"Failed to load UserVoicePreferences for user={userId}: {e}") + return ("de-DE", None) + + def _stripMarkdownForTts(text: str) -> str: """Strip markdown formatting so TTS reads clean speech text.""" t = text @@ -86,26 +204,91 @@ def cleanupSessionEvents(sessionId: str): CHUNK_WORD_SIZE = 4 CHUNK_DELAY_SECONDS = 0.05 -def _wrapEmailHtml(contentHtml: str) -> str: - """Wrap AI-generated HTML content in a styled email shell.""" - return f""" - - - -
-
-
-

Coaching-Session Zusammenfassung

-

PowerOn CommCoach

-
-
{contentHtml}
-
-

Diese Zusammenfassung wurde automatisch erstellt.

-
-
-
- -""" + +def _normalizeEmailBulletList(values: Any, maxItems: int = 4) -> List[str]: + items: List[str] = [] + if not isinstance(values, list): + return items + for value in values: + text = str(value or "").strip() + if text: + items.append(text) + if len(items) >= maxItems: + break + return items + + +def _buildSummaryEmailBlock( + emailData: Optional[Dict[str, Any]], + summary: str, + contextTitle: str, +) -> str: + """Render a stable, mail-client-friendly CommCoach summary block.""" + payload = emailData or {} + headline = str(payload.get("headline") or contextTitle or "Coaching-Session").strip() + intro = str(payload.get("intro") or "").strip() + coreTopic = str(payload.get("coreTopic") or "").strip() + insights = _normalizeEmailBulletList(payload.get("insights")) + nextSteps = _normalizeEmailBulletList(payload.get("nextSteps")) + progress = _normalizeEmailBulletList(payload.get("progress")) + + if not (intro or coreTopic or insights or nextSteps or progress): + escapedSummary = html.escape(summary or "").replace("\n", "
") + return ( + '
' + f'

{html.escape(headline)}

' + f'
{escapedSummary}
' + '
' + ) + + def _renderSection(title: str, bodyHtml: str) -> str: + if not bodyHtml: + return "" + return ( + '' + f'
{html.escape(title)}
' + f'
{bodyHtml}
' + '' + ) + + def _renderList(values: List[str]) -> str: + if not values: + return "" + rows = "".join( + '' + '•' + f'{html.escape(item)}' + '' + for item in values + ) + return f'{rows}
' + + introHtml = f'

{html.escape(intro)}

' if intro else "" + coreTopicHtml = f'

{html.escape(coreTopic)}

' if coreTopic else "" + + sectionsHtml = "".join([ + _renderSection("Kernbotschaft", introHtml), + _renderSection("Kernthema", coreTopicHtml), + _renderSection("Erkenntnisse", _renderList(insights)), + _renderSection("Nächste Schritte", _renderList(nextSteps)), + _renderSection("Fortschritt", _renderList(progress)), + ]) + + return ( + '' + '' + '
' + f'

{html.escape(headline)}

' + f'

Thema: {html.escape(contextTitle)}

' + '' + f'{sectionsHtml}' + '
' + '
' + ) DOC_INTENT_MAX_DOCS = 3 DOC_CONTENT_MAX_CHARS = 3000 @@ -135,7 +318,7 @@ def _stripPendingUserMessages(messages: List[Dict[str, Any]]) -> List[Dict[str, def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]: - """Parse the structured JSON response from AI. Strips optional markdown code fences.""" + """Parse optional structured AI output; otherwise treat free text as normal response.""" text = rawText.strip() if text.startswith("```"): lines = text.split("\n") @@ -144,10 +327,14 @@ def _parseAiJsonResponse(rawText: str) -> Dict[str, Any]: lines = lines[:-1] text = "\n".join(lines) try: - return json.loads(text) + parsed = json.loads(text) + if isinstance(parsed, dict): + if parsed.get("text") and not parsed.get("speech"): + parsed["speech"] = parsed.get("text") + return parsed + return {"text": rawText.strip(), "speech": rawText.strip(), "documents": []} except json.JSONDecodeError: - logger.warning(f"AI JSON parse failed, using raw text: {text[:200]}") - return {"text": rawText.strip(), "speech": "", "documents": []} + return {"text": rawText.strip(), "speech": rawText.strip(), "documents": []} async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mandateId: str, @@ -159,9 +346,7 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand from modules.interfaces.interfaceVoiceObjects import getVoiceInterface import base64 voiceInterface = getVoiceInterface(currentUser, mandateId) - profile = interface.getProfile(str(currentUser.id), instanceId) - language = profile.get("preferredLanguage", "de-DE") if profile else "de-DE" - voiceName = profile.get("preferredVoice") if profile else None + language, voiceName = _getUserVoicePrefs(str(currentUser.id), mandateId) ttsResult = await voiceInterface.textToSpeech( text=_stripMarkdownForTts(speechText), languageCode=language, @@ -174,8 +359,20 @@ async def _generateAndEmitTts(sessionId: str, speechText: str, currentUser, mand audioBytes if isinstance(audioBytes, bytes) else audioBytes.encode() ).decode() await emitSessionEvent(sessionId, "ttsAudio", {"audio": audioB64, "format": "mp3"}) + return + errorDetail = ttsResult.get("error", "Text-to-Speech failed") + await emitSessionEvent(sessionId, "error", { + "message": _buildTtsConfigErrorMessage(language, voiceName, errorDetail), + "detail": errorDetail, + "ttsLanguage": language, + "ttsVoice": voiceName, + }) except Exception as e: logger.warning(f"TTS failed for session {sessionId}: {e}") + await emitSessionEvent(sessionId, "error", { + "message": _buildTtsConfigErrorMessage("de-DE", None, str(e)), + "detail": str(e), + }) def _resolveFileNameAndMime(title: str) -> tuple: @@ -196,60 +393,36 @@ def _resolveFileNameAndMime(title: str) -> tuple: async def _saveOrUpdateDocument(doc: Dict[str, Any], contextId: str, userId: str, mandateId: str, instanceId: str, interface, sessionId: str, user=None): - """Save a new document or update an existing one. Stores file in Management DB.""" - from .datamodelCommcoach import CoachingDocument + """Save a document as platform FileItem (no CoachingDocument).""" try: - docId = doc.get("id") title = doc.get("title", "Dokument") content = doc.get("content", "") contentBytes = content.encode("utf-8") fileName, mimeType = _resolveFileNameAndMime(title) - fileRef = None - try: - import modules.interfaces.interfaceDbManagement as interfaceDbManagement - mgmtInterface = interfaceDbManagement.getInterface( - currentUser=user, mandateId=mandateId, featureInstanceId=instanceId - ) - fileItem = mgmtInterface.createFile(name=fileName, mimeType=mimeType, content=contentBytes) - mgmtInterface.createFileData(fileItem.id, contentBytes) - fileRef = fileItem.id - except Exception as e: - logger.warning(f"Failed to store document in file DB: {e}") + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + mgmtInterface = interfaceDbManagement.getInterface( + currentUser=user, mandateId=mandateId, featureInstanceId=instanceId + ) + fileItem = mgmtInterface.createFile(name=fileName, mimeType=mimeType, content=contentBytes) + mgmtInterface.createFileData(fileItem.id, contentBytes) + + from modules.datamodels.datamodelFiles import FileItem as FileItemModel + mgmtInterface.db.recordModify(FileItemModel, fileItem.id, { + "scope": "featureInstance", + "featureInstanceId": instanceId, + "mandateId": mandateId, + }) + + await emitSessionEvent(sessionId, "documentCreated", { + "id": fileItem.id, "fileName": fileName, "fileSize": len(contentBytes), + }) + logger.info(f"Document saved as platform FileItem: {fileItem.id} ({title})") - if docId: - updates = { - "fileName": fileName, - "mimeType": mimeType, - "extractedText": content, - "summary": title, - "fileSize": len(contentBytes), - } - if fileRef: - updates["fileRef"] = fileRef - updated = interface.updateDocument(docId, updates) - if updated: - await emitSessionEvent(sessionId, "documentUpdated", updated) - logger.info(f"Document updated: {docId} ({title})") - else: - logger.warning(f"Document update failed, id not found: {docId}") - else: - docData = CoachingDocument( - contextId=contextId, - userId=userId, - mandateId=mandateId, - instanceId=instanceId, - fileName=fileName, - mimeType=mimeType, - fileSize=len(contentBytes), - extractedText=content, - summary=title, - fileRef=fileRef, - ).model_dump() - created = interface.createDocument(docData) - await emitSessionEvent(sessionId, "documentCreated", created) except Exception as e: - logger.warning(f"Failed to save/update document: {e}") + logger.warning(f"Failed to save document as FileItem: {e}") + + async def _resolveDocumentIntent(combinedUserPrompt: str, docs: List[Dict[str, Any]], callAiFn) -> Dict[str, Any]: @@ -269,17 +442,60 @@ async def _resolveDocumentIntent(combinedUserPrompt: str, docs: List[Dict[str, A return {"read": [], "update": [], "create": [], "noDocumentAction": True} -def _loadDocumentContents(docIds: List[str], interface) -> List[Dict[str, Any]]: - """Load full extractedText for the given document IDs.""" - results = [] - for docId in docIds[:DOC_INTENT_MAX_DOCS]: - doc = interface.getDocument(docId) - if doc and doc.get("extractedText"): - results.append({ - "id": doc.get("id", ""), - "title": doc.get("summary") or doc.get("fileName", ""), - "content": doc.get("extractedText", "")[:DOC_CONTENT_MAX_CHARS], +def _getPlatformFileList(mandateId: str = None, instanceId: str = None) -> List[Dict[str, Any]]: + """Get list of platform FileItems for this feature instance (for doc intent detection).""" + try: + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + from modules.datamodels.datamodelFiles import FileItem + mgmtIf = interfaceDbManagement.getInterface( + currentUser=None, mandateId=mandateId, featureInstanceId=instanceId + ) + records = mgmtIf.db.getRecordset( + FileItem, recordFilter={"featureInstanceId": instanceId} + ) if instanceId else [] + result = [] + for r in records: + d = r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else {} + result.append({ + "id": d.get("id", ""), + "fileName": d.get("fileName") or d.get("name") or "Dokument", + "summary": d.get("fileName") or "", }) + return result + except Exception as e: + logger.warning(f"Failed to load platform file list: {e}") + return [] + + +def _loadDocumentContents(docIds: List[str], interface, mandateId: str = None, instanceId: str = None) -> List[Dict[str, Any]]: + """Load file content for given IDs from platform FileItem store.""" + results = [] + try: + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + from modules.datamodels.datamodelFiles import FileItem + mgmtIf = interfaceDbManagement.getInterface( + currentUser=None, mandateId=mandateId, featureInstanceId=instanceId + ) + for fId in docIds[:DOC_INTENT_MAX_DOCS]: + fileRecords = mgmtIf.db.getRecordset(FileItem, recordFilter={"id": fId}) + if fileRecords: + f = fileRecords[0] if isinstance(fileRecords[0], dict) else fileRecords[0].model_dump() + content = "" + try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + idxRecords = mgmtIf.db.getRecordset(FileContentIndex, recordFilter={"fileId": fId}) + if idxRecords: + idx = idxRecords[0] if isinstance(idxRecords[0], dict) else idxRecords[0].model_dump() + content = (idx.get("extractedText") or "")[:DOC_CONTENT_MAX_CHARS] + except Exception: + pass + results.append({ + "id": fId, + "title": f.get("fileName") or f.get("name") or "Dokument", + "content": content, + }) + except Exception as e: + logger.warning(f"Failed to load document contents from platform: {e}") return results @@ -319,23 +535,190 @@ def _resolvePersona(session: Optional[Dict[str, Any]], interface) -> Optional[Di return None -def _getDocumentSummaries(contextId: str, userId: str, interface) -> Optional[List[str]]: - """Get document summaries for context to include in the AI prompt.""" +def _getDocumentSummaries(contextId: str, userId: str, interface, + mandateId: str = None, instanceId: str = None) -> Optional[List[str]]: + """Get document summaries from platform FileItems (UDL) for the coaching instance.""" try: - docs = interface.getDocuments(contextId, userId) + import modules.interfaces.interfaceDbManagement as interfaceDbManagement + from modules.datamodels.datamodelFiles import FileItem + mgmtIf = interfaceDbManagement.getInterface( + currentUser=None, mandateId=mandateId, featureInstanceId=instanceId + ) + files = mgmtIf.db.getRecordset( + FileItem, recordFilter={"featureInstanceId": instanceId} + ) if instanceId else [] summaries = [] - for doc in docs[:5]: - summary = doc.get("summary") - if summary: - summaries.append(f"[{doc.get('fileName', 'Dokument')}] {summary}") - elif doc.get("extractedText"): - summaries.append(f"[{doc.get('fileName', 'Dokument')}] {doc['extractedText'][:200]}...") + for f in files[:10]: + fData = f if isinstance(f, dict) else f.model_dump() if hasattr(f, "model_dump") else {} + name = fData.get("fileName") or fData.get("name") or "Dokument" + fId = fData.get("id") + snippet = None + if fId: + try: + from modules.datamodels.datamodelKnowledge import FileContentIndex + idxRecords = mgmtIf.db.getRecordset( + FileContentIndex, recordFilter={"fileId": fId} + ) + if idxRecords: + idx = idxRecords[0] if isinstance(idxRecords[0], dict) else idxRecords[0].model_dump() + snippet = (idx.get("extractedText") or "")[:200] + except Exception: + pass + if snippet: + summaries.append(f"[{name}] {snippet}...") + else: + summaries.append(f"[{name}]") return summaries if summaries else None except Exception as e: - logger.warning(f"Failed to load document summaries for context {contextId}: {e}") + logger.warning(f"Failed to load platform file summaries for instance {instanceId}: {e}") return None +def _createCommcoachRagFn( + userId: str, + featureInstanceId: str, + mandateId: str, + context: Dict[str, Any], + tasks: List[Dict[str, Any]], + currentUser=None, +): + """Create a CommCoach-specific RAG function combining KnowledgeService RAG with live coaching DB context.""" + + async def _buildRagContext( + currentPrompt: str, workflowId: str, userId: str, + featureInstanceId: str, mandateId: str, **kwargs + ) -> str: + parts = [] + + # 1. Standard KnowledgeService RAG (finds indexed session chunks + files) + try: + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + serviceContext = ServiceCenterContext( + user=currentUser, + mandate_id=mandateId, + feature_instance_id=featureInstanceId, + ) + knowledgeService = getService("knowledge", serviceContext) + ragContext = await knowledgeService.buildAgentContext( + currentPrompt=currentPrompt, + workflowId=workflowId, + userId=userId, + featureInstanceId=featureInstanceId, + mandateId=mandateId, + ) + if ragContext: + parts.append(ragContext) + except Exception as e: + logger.debug(f"CommCoach RAG knowledge context failed: {e}") + + # 2. Live coaching DB context (current goals, tasks, rolling overview) + liveContext = [] + goals = _parseJsonField(context.get("goals")) if context else None + if goals: + goalTexts = [g.get("text", g) if isinstance(g, dict) else str(g) for g in goals if g] + if goalTexts: + liveContext.append("Aktuelle Ziele:\n" + "\n".join(f"- {g}" for g in goalTexts)) + + openTasks = [t for t in (tasks or []) if t.get("status") in ("open", "inProgress")] + if openTasks: + taskLines = [f"- {t.get('title', '')}" for t in openTasks[:5]] + liveContext.append("Offene Aufgaben:\n" + "\n".join(taskLines)) + + rollingOverview = context.get("rollingOverview") if context else None + if rollingOverview: + liveContext.append(f"Gesamtüberblick bisheriger Sessions:\n{rollingOverview[:500]}") + + insights = _parseJsonField(context.get("insights")) if context else None + if insights: + insightTexts = [i.get("text", i) if isinstance(i, dict) else str(i) for i in insights[-5:] if i] + if insightTexts: + liveContext.append("Bisherige Erkenntnisse:\n" + "\n".join(f"- {t}" for t in insightTexts)) + + if liveContext: + parts.append("--- Coaching-Kontext (Live) ---\n" + "\n\n".join(liveContext)) + + return "\n\n".join(parts) if parts else "" + + return _buildRagContext + + +def _parseJsonField(value, fallback=None): + if not value: + return fallback + if isinstance(value, (list, dict)): + return value + try: + return json.loads(value) + except (json.JSONDecodeError, TypeError): + return fallback + + +_RESEARCH_KEYWORDS = re.compile( + r"\b(such|recherchier|schau nach|im web|finde heraus|google|online|nachschlagen|" + r"search|look up|find out|browse)\b", + re.IGNORECASE, +) + + +def _shouldActivateTools( + fileIds: Optional[List[str]], + dataSourceIds: Optional[List[str]], + featureDataSourceIds: Optional[List[str]], + userMessage: str, +) -> bool: + """Decide whether the agent should have tools activated for this turn.""" + if fileIds: + return True + if dataSourceIds: + return True + if featureDataSourceIds: + return True + if _RESEARCH_KEYWORDS.search(userMessage or ""): + return True + return False + + +def _buildConversationHistory(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Convert coaching messages to OpenAI-style conversation history for the agent.""" + history = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + if role in ("user", "assistant") and content: + history.append({"role": role, "content": content}) + return history + + +_TTS_WORD_LIMIT = 200 + + +async def _prepareSpeechText(fullText: str, callAiFn) -> str: + """Prepare text for TTS. Short responses used directly; long ones get summarized.""" + cleaned = _stripMarkdownForTts(fullText) + wordCount = len(cleaned.split()) + if wordCount <= _TTS_WORD_LIMIT: + return cleaned + try: + prompt = f"""Fasse den folgenden Text in 3-4 natürlichen, gesprochenen Sätzen zusammen. +Der Text soll vorgelesen werden – schreibe daher natürlich und flüssig, keine Aufzählungen. +Behalte die wichtigsten Punkte und den Ton bei. + +Text: +{cleaned[:3000]} + +Antworte NUR mit der gekürzten Sprachversion.""" + response = await callAiFn( + "Du kürzt Texte für Sprachausgabe. Antworte kurz und natürlich.", + prompt, + ) + if response and response.errorCount == 0 and response.content: + return response.content.strip() + except Exception as e: + logger.warning(f"Speech summary generation failed: {e}") + return cleaned[:1500] + + class CommcoachService: """Coaching orchestrator: processes messages, calls AI, extracts tasks and scores.""" @@ -345,14 +728,20 @@ class CommcoachService: self.instanceId = instanceId self.userId = str(currentUser.id) - async def processMessage(self, sessionId: str, contextId: str, userContent: str, interface) -> Dict[str, Any]: + async def processMessage( + self, sessionId: str, contextId: str, userContent: str, interface, + fileIds: Optional[List[str]] = None, + dataSourceIds: Optional[List[str]] = None, + featureDataSourceIds: Optional[List[str]] = None, + allowedProviders: Optional[List[str]] = None, + ) -> Dict[str, Any]: """ - Process a user message through the coaching pipeline: + Process a user message through the agent-based coaching pipeline: 1. Store user message - 2. Build context with history - 3. Call AI for coaching response - 4. Store assistant message - 5. Emit SSE events + 2. Build coaching system prompt + session history + 3. Run AgentService with CommCoach RAG and optional tools + 4. Map agent events to CommCoach SSE events + 5. Post-processing: store message, TTS, tasks, scores """ from . import interfaceFeatureCommcoach as interfaceDb @@ -410,84 +799,62 @@ class CommcoachService: logger.warning(f"History compression failed for session {sessionId}: {e}") previousMessages = messages[-20:] - # Combine all pending user messages (after last assistant message) as the user prompt combinedUserPrompt = _buildCombinedUserPrompt(previousMessages) if not combinedUserPrompt: combinedUserPrompt = userContent - # Strip pending user messages from previousMessages to avoid redundancy in system prompt contextMessages = _stripPendingUserMessages(previousMessages) - tasks = interface.getTasks(contextId, self.userId) await emitSessionEvent(sessionId, "status", {"label": "Kontext wird geladen..."}) - retrievalResult = await self._buildRetrievalContext( - contextId, sessionId, combinedUserPrompt, context, interface - ) - persona = _resolvePersona(session, interface) - documentSummaries = _getDocumentSummaries(contextId, self.userId, interface) - - # Document intent detection (pre-AI-call) - referencedDocumentContents = None - allDocs = interface.getDocuments(contextId, self.userId) if documentSummaries else [] - if allDocs: - await emitSessionEvent(sessionId, "status", {"label": "Dokumente werden geprueft..."}) - docIntent = await _resolveDocumentIntent(combinedUserPrompt, allDocs, self._callAi) - if not docIntent.get("noDocumentAction"): - docIdsToLoad = list(set((docIntent.get("read") or []) + (docIntent.get("update") or []))) - if docIdsToLoad: - referencedDocumentContents = _loadDocumentContents(docIdsToLoad, interface) systemPrompt = aiPrompts.buildCoachingSystemPrompt( context, contextMessages, tasks, - previousSessionSummaries=retrievalResult.get("previousSessionSummaries"), earlierSummary=earlierSummary, - rollingOverview=retrievalResult.get("rollingOverview"), - retrievedSession=retrievalResult.get("retrievedSession"), - retrievedByTopic=retrievalResult.get("retrievedByTopic"), persona=persona, - documentSummaries=documentSummaries, - referencedDocumentContents=referencedDocumentContents, ) - if retrievalResult.get("intent") == RetrievalIntent.SUMMARIZE_ALL: - systemPrompt += "\n\nWICHTIG: Der Benutzer möchte eine Gesamtzusammenfassung. Erstelle eine umfassende Zusammenfassung aller genannten Sessions und der aktuellen Session." + # Build conversation history for the agent + conversationHistory = _buildConversationHistory(contextMessages) + + # Dynamic tool activation + useTools = _shouldActivateTools(fileIds, dataSourceIds, featureDataSourceIds, combinedUserPrompt) - # Call AI await emitSessionEvent(sessionId, "status", {"label": "Coach formuliert Antwort..."}) try: - aiResponse = await self._callAi(systemPrompt, combinedUserPrompt) + agentResponse = await self._runAgent( + sessionId=sessionId, + prompt=combinedUserPrompt, + systemPrompt=systemPrompt, + conversationHistory=conversationHistory, + context=context, + tasks=tasks, + fileIds=fileIds, + useTools=useTools, + allowedProviders=allowedProviders, + ) except asyncio.CancelledError: logger.info(f"processMessage cancelled for session {sessionId} (new message arrived)") return createdUserMsg except Exception as e: - logger.error(f"AI call failed for session {sessionId}: {e}") + logger.error(f"Agent call failed for session {sessionId}: {e}") await emitSessionEvent(sessionId, "error", {"message": f"AI error: {str(e)}"}) return createdUserMsg - responseRaw = aiResponse.content.strip() if aiResponse and aiResponse.errorCount == 0 else "" + textContent = agentResponse or "" - if not responseRaw: - parsed = {"text": "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut.", "speech": "", "documents": []} - else: - parsed = _parseAiJsonResponse(responseRaw) - - textContent = parsed.get("text", "") - speechContent = parsed.get("speech", "") - documents = parsed.get("documents", []) + if not textContent: + textContent = "Entschuldigung, ich konnte gerade nicht antworten. Bitte versuche es erneut." if asyncio.current_task() and asyncio.current_task().cancelled(): logger.info(f"processMessage cancelled before storing response for session {sessionId}") return createdUserMsg - for doc in documents: - await _saveOrUpdateDocument(doc, contextId, self.userId, self.mandateId, self.instanceId, interface, sessionId, user=self.currentUser) - assistantMsg = CoachingMessage( sessionId=sessionId, contextId=contextId, @@ -503,8 +870,11 @@ class CommcoachService: await emitSessionEvent(sessionId, "status", {"label": "Antwort wird verarbeitet..."}) + # TTS: use free-text directly; for long responses, generate speech summary + speechText = await _prepareSpeechText(textContent, self._callAi) + ttsTask = asyncio.create_task( - _generateAndEmitTts(sessionId, speechContent, self.currentUser, self.mandateId, self.instanceId, interface) + _generateAndEmitTts(sessionId, speechText, self.currentUser, self.mandateId, self.instanceId, interface) ) await _emitChunkedResponse(sessionId, createdAssistantMsg, textContent) await ttsTask @@ -512,6 +882,75 @@ class CommcoachService: await emitSessionEvent(sessionId, "complete", {}) return createdAssistantMsg + async def _runAgent( + self, + sessionId: str, + prompt: str, + systemPrompt: str, + conversationHistory: List[Dict[str, Any]], + context: Dict[str, Any], + tasks: List[Dict[str, Any]], + fileIds: Optional[List[str]] = None, + useTools: bool = False, + allowedProviders: Optional[List[str]] = None, + ) -> str: + """Run the AgentService for a coaching message. Returns the final text response.""" + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentConfig, AgentEventTypeEnum + + serviceContext = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + ) + agentService = getService("agent", serviceContext) + + config = AgentConfig( + toolSet="commcoach" if useTools else "none", + maxRounds=3 if useTools else 1, + temperature=0.4, + ) + + buildRagContextFn = _createCommcoachRagFn( + userId=self.userId, + featureInstanceId=self.instanceId, + mandateId=self.mandateId, + context=context, + tasks=tasks, + currentUser=self.currentUser, + ) + + finalText = "" + async for event in agentService.runAgent( + prompt=prompt, + fileIds=fileIds, + config=config, + toolSet=config.toolSet, + workflowId=f"commcoach:{sessionId}", + conversationHistory=conversationHistory, + buildRagContextFn=buildRagContextFn, + systemPromptOverride=systemPrompt, + ): + if event.type == AgentEventTypeEnum.CHUNK: + chunk = event.content or "" + finalText += chunk + elif event.type == AgentEventTypeEnum.MESSAGE: + finalText += event.content or "" + elif event.type == AgentEventTypeEnum.FINAL: + if not finalText: + finalText = event.content or "" + elif event.type == AgentEventTypeEnum.TOOL_CALL: + await emitSessionEvent(sessionId, "toolCall", event.data or {}) + elif event.type == AgentEventTypeEnum.TOOL_RESULT: + await emitSessionEvent(sessionId, "toolResult", event.data or {}) + elif event.type == AgentEventTypeEnum.AGENT_PROGRESS: + await emitSessionEvent(sessionId, "agentProgress", event.data or {}) + elif event.type == AgentEventTypeEnum.ERROR: + await emitSessionEvent(sessionId, "error", {"message": event.content or "Agent error"}) + + return finalText.strip() + async def processSessionOpening(self, sessionId: str, contextId: str, interface) -> Dict[str, Any]: """ Generate and stream the opening greeting for a new session. @@ -536,7 +975,9 @@ class CommcoachService: session = interface.getSession(sessionId) persona = _resolvePersona(session, interface) - documentSummaries = _getDocumentSummaries(contextId, self.userId, interface) + documentSummaries = _getDocumentSummaries( + contextId, self.userId, interface, mandateId=self.mandateId, instanceId=self.instanceId + ) systemPrompt = aiPrompts.buildCoachingSystemPrompt( context, previousMessages, tasks, @@ -672,9 +1113,9 @@ class CommcoachService: }) return session - # Generate summary (AI returns JSON with summary + emailHtml) + # Generate summary (AI returns JSON with summary + structured email payload) summary = None - emailHtml = None + emailData = None try: summaryPrompt = aiPrompts.buildSummaryPrompt(messages, context.get("title", "Coaching")) summaryResponse = await self._callAi("Du bist ein präziser Zusammenfasser. Antworte NUR als JSON.", summaryPrompt) @@ -682,7 +1123,10 @@ class CommcoachService: parsed = aiPrompts.parseJsonResponse(summaryResponse.content.strip(), None) if isinstance(parsed, dict): summary = parsed.get("summary") or parsed.get("text") - emailHtml = parsed.get("emailHtml") + if isinstance(parsed.get("email"), dict): + emailData = parsed.get("email") + elif isinstance(parsed.get("emailData"), dict): + emailData = parsed.get("emailData") else: summary = summaryResponse.content.strip() except Exception as e: @@ -773,6 +1217,40 @@ class CommcoachService: except Exception as e: logger.warning(f"Insight generation failed: {e}") + # Index session data for RAG-based long-term memory + try: + from .serviceCommcoachIndexer import indexSessionData + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + + serviceContext = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + ) + knowledgeService = getService("knowledge", serviceContext) + parsedGoals = aiPrompts._parseJsonField(context.get("goals") if context else None, []) + parsedInsights = aiPrompts._parseJsonField(context.get("insights") if context else None, []) + allTasks = interface.getTasks(contextId, self.userId) + + await indexSessionData( + sessionId=sessionId, + contextId=contextId, + userId=self.userId, + featureInstanceId=self.instanceId, + mandateId=self.mandateId, + messages=messages, + summary=summary, + keyTopics=keyTopics, + goals=parsedGoals, + insights=parsedInsights, + tasks=allTasks, + contextTitle=context.get("title", "Coaching") if context else "Coaching", + knowledgeService=knowledgeService, + ) + except Exception as e: + logger.warning(f"Coaching session indexing failed (non-blocking): {e}") + # Calculate duration startedAt = session.get("startedAt", "") durationSeconds = 0 @@ -828,7 +1306,7 @@ class CommcoachService: # Send email summary if summary: contextTitle = context.get("title", "Coaching") if context else "Coaching" - await self._sendSessionEmail(session, summary, emailHtml, contextTitle, interface) + await self._sendSessionEmail(session, summary, emailData, contextTitle, interface) await emitSessionEvent(sessionId, "sessionState", { "status": "completed", @@ -879,8 +1357,15 @@ class CommcoachService: except Exception as e: logger.warning(f"Failed to update streak: {e}") - async def _sendSessionEmail(self, session: Dict[str, Any], summary: str, emailHtml: str, contextTitle: str, interface): - """Send session summary via email if enabled. Uses AI-generated HTML directly.""" + async def _sendSessionEmail( + self, + session: Dict[str, Any], + summary: str, + emailData: Optional[Dict[str, Any]], + contextTitle: str, + interface, + ): + """Send session summary via email with the standard PowerOn layout.""" try: profile = interface.getProfile(self.userId, self.instanceId) if profile and not profile.get("emailSummaryEnabled", True): @@ -888,6 +1373,7 @@ class CommcoachService: from modules.interfaces.interfaceMessaging import getInterface as getMessagingInterface from modules.interfaces.interfaceDbApp import getRootInterface + from modules.shared.notifyMandateAdmins import _renderHtmlEmail, _resolveMandateName rootInterface = getRootInterface() user = rootInterface.getUser(self.userId) @@ -896,9 +1382,18 @@ class CommcoachService: messaging = getMessagingInterface() subject = f"Coaching-Session Zusammenfassung: {contextTitle}" - - contentHtml = emailHtml if emailHtml else f"

{summary}

" - htmlMessage = _wrapEmailHtml(contentHtml) + mandateName = _resolveMandateName(self.mandateId) + contentHtml = _buildSummaryEmailBlock(emailData, summary, contextTitle) + htmlMessage = _renderHtmlEmail( + "Coaching-Session Zusammenfassung", + [ + f'Thema: {contextTitle}', + "Hier ist die kompakte Zusammenfassung deiner abgeschlossenen Session.", + ], + mandateName, + footerNote="Diese Zusammenfassung wurde automatisch aus deiner Coaching-Session erstellt.", + rawHtmlBlock=contentHtml, + ) messaging.send("email", user.email, subject, htmlMessage) interface.updateSession(session.get("id"), {"emailSent": True}) @@ -966,10 +1461,29 @@ class CommcoachService: result["rollingOverview"] = rollingOverview elif intent == RetrievalIntent.RECALL_TOPIC: - retrieved = searchSessionsByTopic(completedSessions, userContent) + retrieved = list(searchSessionsByTopic(completedSessions, userContent)) + queryVector = await self._embedUserQuery(userContent) + if queryVector: + ragHits = searchSessionsByTopicRag( + userContent, + self.userId, + self.instanceId, + mandateId=self.mandateId, + queryVector=queryVector, + ) + for hit in ragHits: + content = (hit.get("content") or "").strip() + if not content: + continue + retrieved.append({ + "summary": content[:450], + "date": "", + "source": "rag", + "ragSourceLabel": hit.get("fileName") or "Mandantenwissen", + }) result["retrievedByTopic"] = retrieved if retrieved: - logger.info(f"Topic recall: found {len(retrieved)} sessions for query") + logger.info(f"Topic recall: {len(retrieved)} item(s) (sessions + optional RAG)") result["previousSessionSummaries"] = buildSessionSummariesForPrompt( allSessions, excludeSessionId=sessionId, limit=PREVIOUS_SESSION_SUMMARIES_COUNT ) @@ -1032,3 +1546,31 @@ class CommcoachService: ) ) return await aiService.callAi(aiRequest) + + async def _embedUserQuery(self, text: str) -> Optional[List[float]]: + """Embedding for mandate-wide RAG (same ServiceCenter AI service as coaching calls).""" + snippet = (text or "").strip()[:2000] + if not snippet: + return None + from modules.serviceCenter import getService + from modules.serviceCenter.context import ServiceCenterContext + + serviceContext = ServiceCenterContext( + user=self.currentUser, + mandate_id=self.mandateId, + feature_instance_id=self.instanceId, + ) + aiService = getService("ai", serviceContext) + await aiService.ensureAiObjectsInitialized() + try: + response = await aiService.callEmbedding([snippet]) + except Exception as e: + logger.warning(f"CommCoach RAG embedding failed: {e}") + return None + if not response or response.errorCount > 0: + return None + embs = (response.metadata or {}).get("embeddings") or [] + vec = embs[0] if embs else None + if isinstance(vec, list) and len(vec) > 0: + return vec + return None diff --git a/modules/features/commcoach/serviceCommcoachAi.py b/modules/features/commcoach/serviceCommcoachAi.py index 7ba52f58..8b916005 100644 --- a/modules/features/commcoach/serviceCommcoachAi.py +++ b/modules/features/commcoach/serviceCommcoachAi.py @@ -168,29 +168,18 @@ Handlungsprinzip: - Wenn der Benutzer dich bittet, etwas zu erstellen (Dokument, Präsentation, Checkliste, Plan), dann TU ES SOFORT. Frage NICHT nochmals nach Bestätigung. - Verwende alle verfügbaren Informationen aus dem Chat-Verlauf, den Dokumenten und dem Kontext. - Wenn der Benutzer sagt "erstelle", "mach", "schreib", dann liefere das fertige Ergebnis — keine Aufzählung von Punkten, die du "gleich umsetzen wirst". +- Dir wird automatisch relevanter Kontext aus früheren Sessions bereitgestellt (Relevant Knowledge). Nutze diesen für Kontinuität und Bezugnahme auf frühere Gespräche. Antwortformat: -Du antwortest IMMER als reines JSON-Objekt mit exakt diesen Feldern: -{"text": "...", "speech": "...", "documents": []} +- Antworte direkt als Freitext (KEIN JSON). Markdown-Formatierung ist erlaubt. +- Halte Antworten gesprächig und kurz (2-6 Sätze im Normalfall), wie in einem echten Coaching-Gespräch. +- Bei komplexen Themen oder wenn der Benutzer Details anfragt, darf die Antwort ausführlicher sein. +- Dein Text wird sowohl angezeigt als auch vorgelesen – schreibe daher natürlich und gut sprechbar. -"text": Dein schriftlicher Chat-Text. Details, Struktur, Übungen, Beispiele. Markdown-Formatierung erlaubt. -"speech": Dein gesprochener Kommentar. Natürlich, wie ein Gespräch. Fasse zusammen, kommentiere, motiviere, stelle Fragen. Lies NICHT den Text vor, ergänze ihn mündlich. 2-4 Sätze, reiner Redetext ohne Formatierung. -"documents": Dokumente die der Benutzer aufbewahren kann. Erstelle ein Dokument wenn: der Benutzer explizit darum bittet, du strukturierte Inhalte lieferst, oder Material zum Aufbewahren sinnvoll ist. Wenn keine: leeres Array []. - -Dokument-Format: -{"title": "Dateiname_mit_Extension.html", "content": "...vollstaendiger Inhalt..."} -- Der Title IST der Dateiname inkl. Extension (.html, .md, .txt etc.) -- Fuer HTML-Dokumente: Erstelle VOLLSTAENDIGES, professionell gestyltes HTML mit inline CSS. Kein Markdown, sondern fertiges HTML mit Farben, Layout, Typografie. -- Fuer andere Dokumente: Verwende Markdown. -- WICHTIG: Der Content muss VOLLSTAENDIG und AUSFUEHRLICH sein. Keine Platzhalter, keine "hier kommt..."-Abschnitte. Schreibe echte, detaillierte Inhalte basierend auf allen verfuegbaren Informationen aus dem Chat und den Dokumenten. -- Laengenbeschraenkung fuer Dokumente: KEINE. Schreibe so viel wie noetig fuer ein vollstaendiges Ergebnis. - -Kanalverteilung: -- Fakten, Listen, Übungen -> text -- Empathie, Einordnung, Nachfragen -> speech -- Erstellte Dateien, Materialien zum Aufbewahren -> documents - -WICHTIG: Antworte NUR mit dem JSON-Objekt. Kein Text vor oder nach dem JSON.""" +Tool-Nutzung: +- Du hast Zugriff auf Tools (Dateien lesen, Web-Suche, Datenquellen abfragen) wenn der Benutzer Dateien/Quellen angehängt hat oder Recherche benötigt. +- Nutze Tools NUR wenn nötig. Für normales Coaching-Gespräch: antworte direkt ohne Tools. +- Wenn du ein Tool nutzt, erkläre kurz was du tust.""" if contextDescription: prompt += f"\n\nKontext-Beschreibung: {contextDescription}" @@ -229,12 +218,18 @@ WICHTIG: Antworte NUR mit dem JSON-Objekt. Kein Text vor oder nach dem JSON.""" prompt += f"\n{retrievedSession.get('summary', '')[:500]}" if retrievedByTopic: - prompt += "\n\nRelevante Sessions zum angefragten Thema:" - for s in retrievedByTopic[:3]: - summary = s.get("summary", "") + prompt += "\n\nRelevante Sessions und Mandantenwissen zum angefragten Thema:" + for s in retrievedByTopic[:5]: + summary = s.get("summary", s.get("content", "")) + if not summary: + continue dateStr = s.get("date", "") - if summary: - prompt += f"\n- [{dateStr}] {summary[:300]}" + if s.get("source") == "rag": + label = s.get("ragSourceLabel") or "Mandantenwissen" + prompt += f"\n- [Wissen: {label}] {summary[:320]}" + else: + prefix = f"[{dateStr}] " if dateStr else "" + prompt += f"\n- {prefix}{summary[:300]}" if openTasks: prompt += "\n\nOffene Aufgaben:" @@ -273,7 +268,7 @@ Fuer ein NEUES Dokument: {"title": "...", "content": "...Inhalt..."}""" def buildSummaryPrompt(messages: List[Dict[str, Any]], contextTitle: str) -> str: - """Build a prompt to generate a session summary as JSON with plain text and styled HTML email.""" + """Build a prompt to generate a session summary plus structured email content.""" conversation = "" for msg in messages: role = "Benutzer" if msg.get("role") == "user" else "Coach" @@ -281,27 +276,33 @@ def buildSummaryPrompt(messages: List[Dict[str, Any]], contextTitle: str) -> str return f"""Erstelle eine Zusammenfassung dieser Coaching-Session zum Thema "{contextTitle}". -Antworte AUSSCHLIESSLICH als JSON mit zwei Feldern: +Antworte AUSSCHLIESSLICH als JSON im folgenden Format: {{ - "summary": "Kompakte Zusammenfassung als Plaintext (fuer Anzeige in der App). Struktur: 1. Kernthema, 2. Erkenntnisse, 3. Naechste Schritte, 4. Fortschritt.", - "emailHtml": "
...
" + "summary": "Kompakte Plaintext-Zusammenfassung fuer die App. Struktur: Kernthema, Erkenntnisse, Naechste Schritte, Fortschritt.", + "email": {{ + "headline": "Kurze, professionelle Titelzeile fuer die E-Mail", + "intro": "1-2 Saetze, die den Kern der Session auf den Punkt bringen", + "coreTopic": "Das zentrale Thema in einem praezisen Satz", + "insights": ["Erkenntnis 1", "Erkenntnis 2"], + "nextSteps": ["Naechster Schritt 1", "Naechster Schritt 2"], + "progress": ["Fortschritt 1", "Fortschritt 2"] + }} }} -Fuer "emailHtml": Erstelle ein professionell formatiertes HTML-Fragment (KEIN vollstaendiges HTML-Dokument, nur der Inhalt-Block). -Verwende inline CSS fuer schoene Darstellung in E-Mail-Clients: -- Verwende

fuer Abschnitte (color: #1e40af; margin: 20px 0 8px; font-size: 16px) -- Verwende