From 29c31e79bdc77eb701610d6d2266064bd21c3984 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Wed, 24 Sep 2025 23:18:10 +0200
Subject: [PATCH] Tested workflow engine 3.1
---
app.py | 6 +-
env_dev.env | 5 +
env_int.env | 22 +-
env_prod.env | 25 +-
modules/connectors/connectorDbPostgre.py | 21 +-
modules/connectors/connectorTicketsClickup.py | 145 ++
...rTicketJira.py => connectorTicketsJira.py} | 87 +-
modules/connectors/connectorWebTavily.py | 36 +-
modules/features/init.py | 18 +
.../mainNeutralizePlayground.py | 51 +-
modules/features/syncDelta/mainSyncDelta.py | 942 ++++++-----
modules/interfaces/interfaceAppModel.py | 17 -
modules/interfaces/interfaceAppObjects.py | 77 +-
modules/interfaces/interfaceTicketModel.py | 4 +-
modules/interfaces/interfaceTicketObjects.py | 1376 +----------------
modules/interfaces/interfaceWebObjects.py | 9 +-
modules/routes/routeDataNeutralization.py | 20 +-
modules/security/csrf.py | 10 +-
modules/services/__init__.py | 62 +-
modules/services/serviceAi/mainServiceAi.py | 12 +-
.../mainServiceDocumentExtraction.py | 80 +-
...zation.py => mainServiceNeutralization.py} | 67 +-
.../serviceNeutralization/subParseString.py | 8 +-
.../serviceNeutralization/subProcessList.py | 8 +-
.../serviceNeutralization/subProcessText.py | 6 +-
...Sharepoint.py => mainServiceSharepoint.py} | 53 +-
.../serviceTicket/mainServiceTicket.py | 41 +
.../mainServiceWorkflow.py} | 68 +-
modules/workflows/methods/methodSharepoint.py | 63 +-
modules/workflows/processing/promptFactory.py | 16 +-
modules/workflows/workflowManager.py | 68 +-
...y => tool_security_generate_master_keys.py | 0
32 files changed, 1360 insertions(+), 2063 deletions(-)
create mode 100644 modules/connectors/connectorTicketsClickup.py
rename modules/connectors/{connectorTicketJira.py => connectorTicketsJira.py} (87%)
create mode 100644 modules/features/init.py
rename modules/services/serviceNeutralization/{mainNeutralization.py => mainServiceNeutralization.py} (79%)
rename modules/services/serviceSharepoint/{mainSharepoint.py => mainServiceSharepoint.py} (89%)
create mode 100644 modules/services/serviceTicket/mainServiceTicket.py
rename modules/services/{serviceWorkflows/mainServiceWorkflows.py => serviceWorkflow/mainServiceWorkflow.py} (90%)
rename tools_security_generate_master_keys.py => tool_security_generate_master_keys.py (100%)
diff --git a/app.py b/app.py
index ccf0c7d1..16a60595 100644
--- a/app.py
+++ b/app.py
@@ -208,9 +208,6 @@ async def lifespan(app: FastAPI):
logger.info("Application has been shut down")
-
-
-
# START APP
app = FastAPI(
title="PowerOn | Data Platform API",
@@ -248,6 +245,9 @@ app.add_middleware(TokenRefreshMiddleware, enabled=True)
# Proactive token refresh middleware (refresh tokens before they expire)
app.add_middleware(ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5)
+# Run triggered features
+import modules.features.init
+
# Include all routers
from modules.routes.routeAdmin import router as generalRouter
app.include_router(generalRouter)
diff --git a/env_dev.env b/env_dev.env
index 4348fe37..266c6023 100644
--- a/env_dev.env
+++ b/env_dev.env
@@ -5,6 +5,8 @@ APP_ENV_TYPE = dev
APP_ENV_LABEL = Development Instance Patrick
APP_API_URL = http://localhost:8000
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/key.txt
+APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xRjFXZXRwU0NnLTNhdFVUU3ZlcFU4emRMa2xLRno5c0xwSVhOcjgzNlBUWnZ2V2RmQ0RmRmE4a3BTQ3FRMHN2aWdsSTAxSDJrUGJ2UmQwME5Pa3RNTTgyVFh6NUl4YTJoZTVxdkExUVkyWnpac1k9
+APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xRjFXQWY5WWpQYXUzX2dTQllVNk1Vb1J1S2t2NG9PMEYzSWNLeVR6WlhvYjU4TDFmYjZva01oZll5QWI0MHU5cXJvT0lvZHdoNW01WWxqRG9pdEQyWTcxYWlJVE5SRXVIMkh2VTRlYk1kSGRVNnM9
# PostgreSQL Storage (new)
DB_APP_HOST=localhost
@@ -76,3 +78,6 @@ Connector_WebTavily_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8wSFJNSEJ2YmVieFRaWk5yR1k
# Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8wSFJNSFFITGlUVzF3NE5Ldk10d3o5MS10Q2o4aEJGM250WF9CeWxFQVNaNHBhMk1hS3E5YXRrakh5dmx0VDJuZ3BsWGVMTC0tbU9wWFRWZWM1N25ibWpkeF84enJ1Y2ViMVd1V0plUWdxN3VId1VRUzBhN3MzLVBkSXEwM1BHT2Z2c3JBalh6eXVKMUNFX2pfbGdGYUg1ZUFfcXhSRnJyT0tzdWVVdG1HSHBZOUgwLUVPMVQ4YkZUc3dMcFlLWjRxQUM1X05OWm5ndmJGcjFETV9UM1FoLWt2RVVEem92UGhvZlRFXzNxOVRzQkhyV0hqeTRWQXdMdDVDbEMwOWFkTnV3UXpsYWZwRENaRzd4QjlwTjJUWHhHLVZPTzd1eXNhSWh5ajNwelgxSDRlNUx0N05yTlI1N1RjSzdIZGhFLXBOMjEwMkxsT0daSVhiWVpQZUtfNVdwdGVrazVMM2NkUGZPOHBuNjM3YXdFcGFPdlVtY01ReGhsVENwNnRvNGhJejNHd3hFOFA0bWgxalFFNDVoQ2xYTG5VN1dDZGhndEdWRlFjYzBRMUgwbzRfS2N3VVgyaXJpYmJfZzNadmx5cTFxS2Vja1I5Qm1UT0hDM1FuNk5JRmYtT2p3RWp2SWxTWGZuU1psOUN4NEJTOHkweWIzY2NjbTJRZG5oRjVxNGh4LTUwZE1zZi1zLU43Ulk4UGtmR0N6dU5RcVVvRF9DQlE5Sk1FR1YtOE84WnVuTDlOUHhQR1JLT2g0VkNIT2ctWTBuMXIwNHhSSjcxNnNWRFhQc18zSm1UR1M0Mm54TGxsRG5uX2tDSWhBNDRGaHFObkhuVmtnVVlQU1FhVWhTdnpGUDRfcDQ1OWpERklHMmN5Y0RVWC1JYlItTUozaWY1dmxZUW12NXAtUEtsQWpqUFk4NzFwWVNfSUNqeDNkc25wMnJHN3c5NTB1dmxmUFZfU0NWS1hQMTc1NmdOTmEyREZRVXB0cmlyaldkT3B0Q3FQMFdpdWQ3WU1RZDZKYlFneDdnQ2NWWHFHSXl1c2xRN21LbDdyUGFUcWFxeVVTOWoxSkVJaFZiUHI2VFBHWEdvM2Q1cXdIVGYyc3Y2cVdRd00ydHdrME8tcDVqSmNLV193R291VElTNWFNa2pMQi1zX21VdnZ1R0tTbEJndndvbWRrVE52eW1aTFFzRURtdGItc3FJeXJDenVTWTlIZ0E1eG1yX2N1SHJSUWIxdm8wakdzaDIyaDQ0cE9UdDlhclp2MzVVamQ2em0zbmdLUzBJa1ZaRFpQaTBnZGpTWnRhRGZxUVNZWDg5VDFndWFmZlZnVG5SUEhlWkpfQnREWS0xbEZfNXd5OUpEUkZHa1NZNWtPbnBadFFialgzazlyM0dTb3ctR2x5LUozT3VDc3F1Tk5TbGN2MnRRS1hTb1gzWUNVSlJuUl85azhxaGxCMzVNQUQzVGg1cDZHalRaOUFrM1JPSGJKaGlKRTAwbnV4TmxIZnhkMF9FODVKUk1GZGlWZk1ScnhmQnJXWmRxMTk3SWhIdnBjSVJJOElkalRUWXFRTFNvQXZpdFpFOUdDWkhHOTRLVmN2cEh0X2JpYjNvRjhvUHFVQVNQdXY4OWxQSWNvcUNfZW5HYy10dEFicldhRHZLS1ktY2RGczQta2lGWXkxb2RhNUZMNExabWx0dXdhR3BSWGpSYVUxRXJZVTNBYmdNVFd5NW1vY2s0T0RlV3hqZjNSMHhJakY1TDBackV5bmM2V1o2SEJlT3RSbnpPR0VXbmhQTUtPMzYyU1RjbFRmQUlWTUZjVGRheXBuekZJN3NNZVFFZ3JHenNnOFdQVWxsbFBoYTVvQUd1NGx2SDdYcGhrdUpSWlRIRWVVUkpxdjJSZV9zb0J3N3o4QnRpYXpTRHdkZ1pqSWswSjdJMjVEZDZUNzZuWDVXWkNxUDRtQ1p1dnk2ZEx0S0NKT2ZUc3B5eEdRdEpnTlZQMkt5OHFjQ3FfcHpzUFZEY3Z5WDdEQkt4cEN2MFg2eXF4bDZFeHZFWk5tMFpUR0xDZi1JVjN4eUtRaXlNXzBJUFV2N19MVTRhMWtxWnd6d0Y2bVNFQUJSdEU5Z01FTjEtZDJmWkpEYUlsTVJnTEJYdU1iVFoySEttd3libURrSUNJelVic2Mzb0t5ZzNDX0hjZUtfOFQ1QkxRWmx2dmhnbDhNZllla1dNa0Y5akVpNDRKdHRSUU9fTE9sYVUzdzZtTkJEYTBWdkxkRURSa01TOGxWcVZkUmxkWTA1QjJjS1pOUjJEQTZxeDdSVXhNWldXbnE1V1J2STVCNkt2VHRuNEdtaHUweWdEbUZyMlhWd09FWWI0UUFyQVpUeDE3QXdfQkMtcjdpUU5GUTQzUEczNWg1Wm5rVEgwRW11RFowVnFxYnpGNUYwYks1Y3JPbTdUc2ZXS1ZfYzdhcno3U1ZXZUVkblRoOVl5XzZpTUgwRXFZeFd6NXdqTGlvNm1QeXgxS2ZFTVJSV1JVejliWFBVRGU1MWVudEZzRDFwSW94YlU1Y3JmallsVldXcHdvTmFQdnU5UE0tNHNHMXhPWE1JQUxCNC1WVVRJNmNJcTM3a1dUWWwzSVptTFg3OXlWLWxITkdiR0MyTmRzRWFOeHBMZEVzbms3RC1MTFo1TVhKeURhUW9peHk1bHhJbHphVzR4RmxiUkJwcmkzcWZ3S3dWV0Jkb2VaZ3pMTXdUNUJmZjZfVEVXeDFNMnBvemM0TUJNeUQ2SE1aeWczc0V6M0NUMHFGdURMbTRka3AzZ1d1TUh2V1c5RzBKQVVlTEstWEthOTdaWUZHTlRHaVNmbEFJRFU3M0l2TWlBNF9kaFpJUXlxMHJYa2lxOGFRbDNqMTA1RDFFclFTcGxmb0g2WVI3Z0NrLWN4cUNzNWVuR2VMaE41dWRqMnR5eWNuM0gwUmIwcTFEQ09qbmJCUFIwbjM4MGF6TlhxQWpKOFZXWGNKdnl2Wi1zU1BsZU5NYWpsbzVKMGxTLUJKckd6enJnZWhXemstenN3NGNqUk9HeGlGaFNhSl83TlUzLTVZWW9zYVZZTTZzSjNfd3JkVDNaZVp4dk1GQVMxblJBRW1BWUZLU1VKUFkyQ1dPbndUNjYwdll2U0JxN1FQNk5OaGVYR3U5TXdGNGFVZGVXcS1tS2dwbVc1V3hEeXhVNkJ2cjdGX2FpY1NvOTJhcWFyOUVGOFpOdmd0R29Rb2RIaU01R05LeWRxUE00WlhOQVlMbkZxZDNyUFRXdUFGZ0lOUmp2RzIyaDlzMGxNQk40VzFzYjAwMEhjRVlrNWJ5cFhpVWYxQkxYQ25rUDJ3RTY1VlVFLThiNG1nY1hkdnZTMGoyVlN6dkJleFhndDNCODhlOVl1ZHBkci1hd3l0NGNXeWZ6aUp4S3pHS1c4aDM3WElBTjBwYlNSbmJoMk5SNF81VVNqd0dXY1JUejVsZnpGS1Z5dHFPNUVVM1I5eGhjblZjMV9idFJkc3NZaUdHRlIzQWJQdHhzT01qVW8xUUwxNHZmY3Q1aHBnNHhXTGRjb1BmTmM2X0NmdkpxNS1JMHNQNVg1N0xsd0pmdE8wNktkUGpuX0F3LURyaGhyajg3eWNDdkozUFZIYmpJTTZ3WWVCVFZUd1AtRklFUUxTNXkzalpfdlc4VE1tOHU1Q0MtUWdLbEdYRzdVU1RkM3gyeEY3eXBWLUhXVVo4VkZoUHVkakJPNk0tNTJKTU1JZjVISlR3SmJBQkVhRW51UHg3UjBOMVRPRnF2dzIwRkgxczBBUWZpemFFMzFTeDJfWHZhSkhsTzBhcFIzVmZRODEzRUl1b1ZDUGFqYUxjN2JsbkhYdHVPT00yYlUwbmpVbkU0RkJXbWx5UVFJdHNvNUdxQzMyQnQycDJpMjlnd2xwb3huRUJiZUg5dkhaMjhMV2R5T0NsU0N4WjdBX2ZfODhOdTZOZ0x6WlRIUGI3MzR1ZkJicHN6NzUzRzlsUmVkNlR6MjZjTTA3c290Qzh4ejRiWERHbmFtV1BQV2ZKb2pGU0F1OGsySG9hNHdtSkkxTWpwV2gyaVpWcFpsRWs5a0hSY3UzMk4wQ0dkZWtMbG4xOFZ6TXdEOXBob3I0NjNkT28tZk5IcW5FUkg4YnBtUVFLY1Q5M1lzYzhrRGZOaDF6SnpnejRuM1Y3SW1xMUJmLXpJdEM0UjNHU0t5OEhoamxxLXRmWmtyOS1ud09XeGFzc3VFXzNPWWNGcXFwdHN2cVFEZ0dWdUNKbF9Lc3d6dVhPb3NLMlNEaW1xd3JPLUViYV9GTnNRPT0=
+
+# Feature SyncDelta JIRA configuration
+Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xRnZIVUdfQTg5dXRfenlxanFvdXBCSmltQmNhRFNkUnNYai0zWlZqV1NmS212SlhsRUdwYXRRcjVvRVdqaWN0RWNrMjNYWG1VazJOaU5xWnZDWFJoWGxvTXpTbHNzbk9nTGlxa0ZZQk5WZHNHaHVDdmNQYTBRbHBQaFlvY0FYM3NzU05MUHNZU1AxaWNCM1ZTQllLdFpZX2pWektnUTF1WGRiRGtsOGE0bVdPYXp4aEhlWDVTWmYzWlRuVl84Zk5pREZJay1zMWRpTWxKYnFtYzBXM01vMzdiN0JlQl9kYXN2Sy1ZZUdnT2dYNzFuajA1QUJVbTFKS1dfUjNUMS1MSC0wOGc1ZDd2NTJESTR6M2Y2SDhsUG40OWM0b2djeDdZTS1qOVl2YlhhbGx2dG1KWkd3andQZXJVVC1zWDdBMzdHOVdRMHRJN3RabzZNWWlEY0xqcUg4Nk9RPT0=
diff --git a/env_int.env b/env_int.env
index 5677525e..e853401a 100644
--- a/env_int.env
+++ b/env_int.env
@@ -5,30 +5,32 @@ APP_ENV_TYPE = int
APP_ENV_LABEL = Integration Instance
APP_API_URL = https://gateway-int.poweron-center.net
APP_KEY_SYSVAR = CONFIG_KEY
+APP_INIT_PASS_ADMIN_SECRET = INT_ENC:Z0FBQUFBQm8xRjFaenVxWnprRDJjMUhjWk1CTnpreEd0eDh4bXBaQVdHQVlQWEQ5bERuN2tEV3pDZmlldFhPazhpRll4bjhLd0d5RXhtaldBVUdFU2ttQ3cxdmoxcFNDSkJMT2RRV2JDT2xKeGhHUXNFU2JfMEU9
+APP_INIT_PASS_EVENT_SECRET = INT_ENC:Z0FBQUFBQm8xRjFaLWp4RG5ubVR2M2lXRHRZSFpSVE91WFRSX2hwME1rTV9LdF9MX01rdHMtNkhjdWRlcGd0RDhYdWlxMUZ6VFV3akxfUTZYa3FoMGFmZ3Y5clp1X2gwV0RjNnVvdE5zY25GMGYyS0V2a0ppU2c9
# PostgreSQL Storage (new)
DB_APP_HOST=gateway-int-server.postgres.database.azure.com
DB_APP_DATABASE=poweron_app
DB_APP_USER=heeshkdlby
-DB_APP_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8wTnBISUNVLWVobHJzX0xtS0pMcV9neXY3S05qc1F6RU9SRTdHM2F2VW1ldVlMYU9zRTU2OE9QTDBmcGRjN3ZUb1dobGZrUHZrR2EyWURtUXRYWk5MTExMVUJxY01yaFBTWFE4OTlHNHBsWHFSUnc9
+DB_APP_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhVHBfQnZMRi15aHBVR0o0RkV3dEptRGFEdnJqUEY2c1BONnhXb0pyUDlTYUQ4VmlOOU5POHgycnBLejI2NjAtRDNkMkNtUGxqamVTRkgyWFYwNDVkeG1FazBPSmFXUjU5RGdLSlVsZ1FFVlRnb1ZiSG1yZU0za2IzOXhSZVY3UkpfbzdjSEtWLWpqdEl1b3pONXowRGhKRU8xUXNnUmN2a1VPMXJndWdfYkZSSklGNG5DNFpOdXYxZGJkOU1zRXpsV0lMVzlLa3JHZzdWSVpSbkwwQjQ3TnVUdnJGM2FqTy1IaFY4MWtuaEdKRkVXVjJ4UWprWmR4SWN6ZkQ4TTUyRFJma3J0TkswVUNld3R5Nk9nNWxJbHVWVUE4QnV5LUpmUU9JV1FuVE5wVDA9
DB_APP_PORT=5432
# PostgreSQL Storage (new)
DB_CHAT_HOST=gateway-int-server.postgres.database.azure.com
DB_CHAT_DATABASE=poweron_chat
DB_CHAT_USER=heeshkdlby
-DB_CHAT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIVmhCSEtCcDF6dXBCSkVzOTdaNUZVOUgtZ2JQQ3lMUjVKdUgxbnBkZHdPSFE5amNWTzhKNW4zcV9QSFdNakFVNXRVcDVlTnd4Tm51QjA2MTVVMVY1b3dBZHhQZXZLdUlsc3lBektKRjhIUXc9
+DB_CHAT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhRzFNT3RtV1RFNDlrZnNpSTlqWXdXbnhmeDZMb2VVUG9pd1k2NmQtWlRoeDY2OURDWXVSdHJCeEo4MDBWbTN1SFN2M2dGalBKQzhtN05aS1RVSnRVZjF0T2NRa2o5c0dhRFJvRHlOektwZnZOZm5ZLXNXSEp6Y1RFNUVnSFpWQ3BTbXFGY1dNeEFKb0VEWlgxY1RWVzdLbjRraXdqSldwejY4aVpZWVFuMnNka0xhNXhDeFZvUE5idlRkQ1ZpSUJPWFN4bUZxQ20xNFNyQkRrSlBhMW1SZ2dNbnRHRGtXV0x4NHBvT3dhaWVzUFRsS0U3RVZwajA0OEZLMEVFZmNaTkFNMWdtSkdYT1d2bkZtX2JZWVNzdE9sRVV5VHR4NUZnX21haVJlMmVPTEk9
DB_CHAT_PORT=5432
# PostgreSQL Storage (new)
DB_MANAGEMENT_HOST=gateway-int-server.postgres.database.azure.com
DB_MANAGEMENT_DATABASE=poweron_management
DB_MANAGEMENT_USER=heeshkdlby
-DB_MANAGEMENT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8wTnBISHA2OXVrWjhaQURZM3g4WGxiTmt3WW05WXBIRGVwNFNfdmphOGdUQ0ZCMUdFTlAzZlJTM2ZFaEhVWGRqNXBtREpTalItcDNxS1BJeEZKdWc0dWxHUm41QTBMZ3VqT3pHeFVmVUtJWE1YbTA9
+DB_MANAGEMENT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhN0xPc05UQ25ma1c4eUN3eU1TaHFrZGxpQk1pYzA2aUNQYW44Y3pBVlF5YUJramNCenRQRUJreERBRTB6dnNwLTN5R2dwdzlfRWI5VHFhSUswOUdqY2RmelIyVDk0RlIteGlwNGhlWHY4UmthSF95SXhJVFBjT0JwY0JrVUxPUmt1YWN1RHZmd19TTGlrWmEtdEdHSVExSDFjcFZxM1IxYkhZcVoxc21ndE9HZXVNQU9URDVGektkcktkTlFsUDlrRGNDN0U2S1djVktWMk16dzducllweHlQVW01MU9LbTZORnhkVGw3UllUUnBwNm1YQ1hvZVFjcHcyTjRldkRPM1BCUldReEZ6dm93VEItVG1sRFlrNk00czZQd3NrU09fN2NtYW45Y0stLUE9
DB_MANAGEMENT_PORT=5432
# Security Configuration
-APP_JWT_KEY_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIVXVUQnhWcjhvVFhtTDl5T1M1SXZZdjZDY0tIa0hmbnRuanUweUdoQ04xNzhod3VscG44V0xlNldzY2t1MVE5UjVjUTdSRUU1N3VBUGNVN0ozU0o1akNBX0x0X1FNOGE0TE9paTh0ZEVnZmNTbGFnSjBpNTBXMTZxemJwWmRTdkJOWms4VVRieGpSM3VtaFY3Zmw0NlJTbVVfbDdwYldVYUlfbGVFUGhsajVZPQ==
+APP_JWT_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhWHFFUTU2RlJKUDlrbk5xWDRqYklCUFUzVy0zTGFKdTc5X0RGcE04a0M3VkFqZm9GLXEtUmNhcHBzdDR0WHRNTERPZEYyaGtqb2FlX2RMSU5QYmpuaU5pWEFZMVJwMmFHT2NGUVZMQmdKM1FKTzRDNG9jb0ZiOXhldjMzOVZneTZZUHF3dmdyeFlxVGF3OXpvaElqY3JWekhtT1FISkFOclJsNFR4MVJTRTJNUEZjZWtQVUpVaGE3R3NtUzVMYVFBTEVBb0tCMUlSclVoVzBxQU5OVml0cWNDYzNvUWZfbHcwVEsyVkoyOWxneW1qWGxKZFZlTkdhSFRscDA3YWhjZlpJWi1LWldEVFk2ZUxxUWwwb1l4cE9SdmwzcmNuNWRDeUZBcEowTkRta2lZMnNzd0x3eFJ2VXc5dGVwLW5ZbVc4VjFmTGRXRENRZVZkRHVvNHNFeV9mQmsyTGU4U0lKdUtraV9kSEpCZnJxTWcxYXNxMzYwaWg2bm1vbFNJMnkwSWxfazMyUldpMGZfV0NfQWY3bGJFOWJ0Zzc2RnpRNnY5clF3MnJVLTgzQT0=
APP_TOKEN_EXPIRY=300
# CORS Configuration
@@ -51,29 +53,29 @@ Service_GOOGLE_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/
# OpenAI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
-Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIS0RqLW13RThlbTNNYUdLa3pXeFVYVm5Mc0czREtRczRBSlVjcVJJcVpKU19kRUZTU2pqMGZFR2pHZnZ4TGdMeFJqbHl5aTYwa2pzcTlNZklnMUNIZHZwdGFuWFhGZDlkemI2cnJuRURBZVBmM3Fxbm91c0ZQai1UMGJSM29kanIzMFB4Z2x6QWcycVk2SzRHQXc2YmZRPT0=
+Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8xRjFheDVaRE1LYUhScWpEUFhSNVNSQzNZRGU2VmZIYUh0QXkzUFJ6QlY0Mjc2S2dTWk1rMDNLeGE0bFNNNXZST0JKZ0JKT01BVnM2eU96YUtmTlZqOTRpWDB5blJ2Y0xuTE91eUhJZFlfZE5BWVpRZk9SX3RaMldZTk9TMmdoUXZQRWFpUEh6dGcycDRqYkg0cWt3bHpnc0swT1pGbEU5U1hfelU0Ri1HUjVEWW1tNXR3eXZEOVBfWXRMMzkyeDBIVXFZOXNWQ1R2bF9aU3Q5eG1Gb3M2dlBVRTRVRURxOWJxSWRHOEQtMWVhWTZXUDRnUUhPT0d1Wl9UNmlBelhlOEFuVUFKMUplZmNNS0xVUVlWakNoQUotSk5oNnRRV0phbVJNdDNCdklPZV9jYXBkRzd6YVNCejEtVEpTdi11LUlhdzB6RElHVUxORzBPY3NScnpYM1g0VmlFYTI4QlUtc19NcjRwaTZOUDZfM0s0NWxSZ29Mc1gxRVRtUTRDOE11cTgt
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
Connector_AiOpenai_MAX_TOKENS = 2000
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
-Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIN0pPeHE3SzFWbTNySU1NRThmcURKWWNiZ3pQLTlwSXZmd0JkTUxXb2VGTVIyeUhZb2JKRzJsQ1AwTlZBWl9RYkRaQkVoR3dxQkdGYUFmd0xRdW1jUGxXdjJPbDlDVTVtT1c3aldRVVNoWmRLd09TZW5xU1JOVHp1ZE5Za0xBODR1TlhMQ1ZiaEZ4Nm00QnpPSks4RGVxYUhqaGdvMWVwMzBKSTdIUEVXSE1XM1ZNUjNBWDAzLWxwLXlib29OV0pOV21MTkFpb0ZDLU5seHMyTldxSFdIZz09
+Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhejRGeGtkQkt0Wml4NHZkY0JlV3BGVHBPRmZiQ0RrcmVVMUwxSWczZUExblA5eGFwaFRhTmdEUXVTS1Fad0daZXBWOElpS1FQUEhnT2tGU0hBSnRRdUlOTXVSWndBTlRwUFdsYVlQNWpZbHFBZURUZ19RdGNhZzVDZmF3QWhQaWV3eHFFc3BtS2dnTnI1U2oxRUtSc2V0SVdjSkNqUEVBdUJqeDhJTklJMWh2Wmk4RFBGV2d5Zk9QMGw4b1hQNm1pRnE5MFEwV0lWWFJsMEVOWHpNdWpBQWlMTXFHbmY3dVN1dUV4SVBJcnlRYU1EeV9DeFRucjlXREo4UG9DX3E0dzF2YzZKS2JGeVRtYUhFUEduR0JNTUljcHgzNHhVeVdJWC03X1NUREdJcUNwU043QkFTME45eXBkUUNzanRRNVVZT2xLWG1pcHJKNTQ3a1pjQUxXdlBjcmFRNEIySzVuNHBfSm4zRWRiM21GazdNZGNQWnUtSGN4d1JnVmZoNllabXd6dG0zQ2NKV2NxWlFBeUt3Y1puTEJ5ZkRvYXlQV3Aza3hXYzlxSFBscU85LXNyZC1NdFNNSWFmTmZSQTdiemNPQnlhYWNtaWYwQUpLU2lrLS1iZE44VGdCNzJoNGlZRHpSXzZSZWx5Y0E9
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
Connector_AiAnthropic_MAX_TOKENS = 2000
# Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
-Service_MSFT_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIVXktVWJLTEdLSDd1MENKejQ2bzdCTUlTQ1ZELVJfSGhaeExkMjQ4N1dNVnhjZjRTMl83dlBqeEJCMHVabVpZVlQxRjhjQkRiOHdpMUNaODJqN0UtYW9GallJekY0U2RVZHpORXg4dThuc01uMy11ZGtDb01BQXc3TlE1ZXBjaU4=
+Service_MSFT_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhdFBRZ2NkYlUwZW5STkFWdUdCYzM3b2dLTC1CWGo0eS16Rjd6QUpTNTItZjkxbUdTYXNMejlmTGt1QWsweHlCWl9ETldNcUNwYWtmUzJIUmJSUTkwczktaXlXRjY5ZHBXRElTZXpkdlp4ZnVYc2tNRWE4S25iWlNfYkJYNU03QzdVXzY1N1ZoZ1R2ek1fYmVsQUVQVkZCb0E2N2ZPVmtSWU5Ya0NhclB3Wkt1QlRKQXI0aUVpZngtaGZiN1FGYjYxVHFjcjNzbGphVjdQM0tTQ0FNNUN1bFdYVFJfbzdWajFvVDFPQzFoRzFmYVBTbmFtT3FkT2t1WW1LVkJiVnZCd3Vxd3l6d3ZMZkdLUWtMeEtVWDhmbE5CaXFpNHBnTXJVdTBZWUNnQjJscFV0Q2g5RmtkQ3hTQ3drRjcwWFVsbUk0cGREOGRRR1FwMW5oN0V1dGRVdlRRPT0=
Service_MSFT_TENANT_ID = common
# Google Service configuration
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
-Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8wTnBJeVpuNWVraERfUFBaT3BDRVk0T21KcGdrYU9zNGNyRkljNDR2TnB6R291VGJJM3d4RnBHTVVXYTRCT1F1RGFRYnNTX0xTLXFqVHVHTnN0bG9LeHdEbFpZcUNIMXFWY0dJYko4U3FNSk5vUnY2ZWRWWFJLUjR5WkJrZmpMU0pxNGI=
+Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhVXpEblBSZHh0X2V6bUtTMHp2NVRfV3FOeDNTci1ycG9FeF9WVVNObnJ6VFNiSEkydjNpeExYWkVyOER5TWNSVXFXNlpBdUVNNlZ4bU9yaGdOaFo3UGxSVnRNMWM4SlRVcGZUV2hFTDFuMkxzbVF0Nk4xQVFKMWlCbDdWNG9tT3prUDAyRmkzR1h3alBPSGFMTDNqVFZWNElDeTBhT2VoamRQVnB6bmtvM3BKNWN5dDl5dnZkMVdoek1pZjk3OXFmcDl5bkU4U1BwNEV0MXRjLV9iUHh0NGFGRHFiTTlHclJJdUJoWDUyclhOdEFfY3ZKRktrTkVOZ1Z5ZzZlNVp2ZXFpcXZfQjZTbGhHN3RnSDVqZGZIUEJSclNUd3FOOTNBSXQwclU3Uy1vb0JNUzVaRHlURU4wSlhibHpWRzJqRVo5OXBrYUJaTzdVdkNhcW1VWVRkUW1nPT0=
# Tavily Web Search configuration
-Connector_WebTavily_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8wTnBJV1BlRS1UaTZmZkVYZ2hQU0lBRXVEbDl3N3BFNVI4MlBsN2JRSHdrYV95SC1vdk1pMnQzNGRaQThrRy1HcEJyT1Y2OXdQcmw2Yk9KQ3RDRzRpamx2cFpkYkN5SjkzNVVmZnVaOWJnN0MwTGZMcVdRdU1jY2kwVGhNMXZQUG9kajA=
+Connector_WebTavily_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhY2ZWeWRfbWpSSUl6MElHNk5tVzZqeGVMXzdFZy1Ma211aHlCUm5QSTJzMW92YWg5WWZVREQxX1NJcXVlNE52QjdxekhMVkdOc0xacEZJNmg1VEVlbXJJaWpJaEpRQ0tnZHU0R0hqaGdIRUJLYmRuR28zcWR0SHR6NG9NbXR1NENLTHZpS3FlcGRQc2tPaDRvVU5USUU1QTRjeDd3Yl8zLXVZSGxvRXNqalV1SjJNX1BIRm0zVy1hQ0tNdlBENV9yZ0dmLUpCYnJRc2dzcHpsRUZCWV9Zal9mSzJUSDQyVEFJN0twaExyWDVKSnlNUGlLbTk2N1NmUC14Z1JGX05aTnV2c043Z1JYZ0x5MjZYVEdITDNTZnFuZW9KYzVRUnpDcUVTMk9HTW02TEJaUEVZZm1wYU5CdE4yWGc4UDY1bHozOVdhQUdWd0lDeWlQbS15aWFhY01RPT0=
# Google Cloud Speech Services configuration
-Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8wTnBJSDh5aW9CNE04dDVsYnBUNWdGUWhDMmFlNmY2bnl5X1llVnNZV3VGakI5RFFNYVprYm5mU0F6TVZ5NDZkYlVhMGpzM0RPNGFmNDdvV1c0Y2hUQnowYzRmREhwRk5fMVVnejlGR0Y3V1pVemtFbEZEeTFEOUptbThaSHJJeGtwWGZZQ0VLYkpaTGRXMVFxX0hRX2treG1ES2VheTdsR1U3eUxYV0xPbzExSDZzOHBQR0FSdGh2V0hXRFpRLW52ZlJyMTBDR1VkVkh4VU52MWVwMDdxYlBfbjlMeTd5M0FIVGtaT1c3WmpwZjh6Skp2djB2cXM5NkdOS29ONzdaNGk0WlpzTlJxU2cwQWxTd25XYmllbjJXemQxY0ljZkZqZFV3MEhucXNfYUR3T3diWEFyLS1WQmRiZEJXbERuQXhXanZQUDBJZFphZGk5aHFTQVRkM3B5QllYZ0Q0V19VRlRtVThEb09TWGFHVHRKc0R3eGRoYWpkT0xRbXhGb0pFYUk3MXBGekV6WDdzekMtaU1JNlNaaXdQa19keUotSDJkZDNQTVpZQjlxLWhwRWIta05YR0sxTXRVS1ZLaHRJM1IwUTMtQlUtbHU4dmVfQjdsY1Y3ODFSZXBiQUJIdFNrR3dGelkzWjhQaXR1NlFIYm1KMFVNMmlMcGQtRE1zNmx0ZTRuVUhVRUFuNUEwTnNNSTBnaTRtaVNOT0lLQTR3U01SOHNjZkdOQ2VXQXBuZ3k3Q3NjbDh1dU5fWXVkN0pvNmxZWlQyaVVLNEFEN3dxRkV5NUU1dG5kdGxieXo3WGhIalAzWjQ0TFNLRFFVZkJFRUNjQW1xNWdUUHRTOG0xVklmd09NdGd4SURxdmI2UXU3U01PVDZSM0lhbDFQUjZobkl3VC10eThuV3BSV0l3Nkw5X1dVN1RhYkdqb0ROempfQ2xjc0lQemtaSGNkMjJjR1hjN0V3NFhta2l1MVRGeG9PekdhM1V6NGpCMG5yYmZJb1BmdndyMXdpOGdSSldmRFg0UlZSX3EyTVN3ckotaGJLbU5EMG1jYnY0VmtFNk14dzdzWVloVFhWMkQxNDlmc3QwSWJZV2ZaU3J2NlNkdHlyQUVXUTNXczJZMzBua0Zmbl9MYWxSSi1QdUowQkdINWJIZFNoUlY5V2NYYjFva3A3OHZ6MEd6MGRvNkJrQjNKa21FcHI2Y3pfTWQ2TUFzUEp5M1FZazVUSUVUTnlkQ2U1RVU3OFdYYXE0S3QwT210a1c5aVlPNDhET3JBeUFHeG41MDM3aUdXVWN6TUtUMi01aGJOWGN4WTZDN29WNl9SdGRtR0gzRzJFZmhZa3p6UUpoVEMyb196aFdUVE1nTDNDSkJuN1lsSWlMWlBVS3VhclRxd1ZhWXhNUDZRa0Jlb3N5UkhiZ2pYc29ZZm55bWFZa09DZ2lvZmE4YTRoYmJJREh0ZXMtSkN3MkJBNDlJQTR1MTlEVTFQTUFJMENCQmFCeGtXYlJJVmtSUjBuNXBDa0wtVTJuVk8tVGk2dWxmeEgyV3pkOFdDU1JhRnowLU9EbWZhYWkyZVRfSDVJWDdtd0l0TF9OQ19NRi1tUnAxdHg5a0dFSDY5RzFsR1NiV0p3VG5DckNyREVjcWQ0elV1aktKNlJkNXdIVnpXY3U1bTBUbVJ5a3VucDdualg2cU1rZkEtOWpWa2tGU0puNUpNUzVaV3Y4UmhiZWhLTkdzS0h5NkxkcmNLblB2dG9lb2xYYXlqZkZiM3ZRTFVtM1VwOFFGQ1QxWFh2cUlhMFFyME5rSEJwLW5IUS1pRmNpVXVWYUR3emg0N2lDXzlFN1NnRk9ab0lLaEVvaV9FcEVfR0VBUzRkWG9KUm1sRk9DcDEtSGQzMFFXLUt3QnBpaV9fV2lQVExXSDcwc0E1ejE5SWd5c0NnQTlyWlBuVFNCeFpxN0M4M3kxaHN2RmJXekxiNy0zTEN0N2daLWpERFJ3SFFMUk11N25mRVk0MHlyMHE1d1NDbFpFaTBGSDFFcksyYVJZUEdKemtNWE9qbDBfMEpyaVVaSFdOZWpodGt4N0g5TGN1NVRQUm41cTNxRWdyTDFjd0xTSEZibUt3R0pISkpmRzlSVEYzeWNUdm5qUEZPVlVJX1d5MGpxUWNjTzFMaGlEVG9GY3RhUWZiMFpsbVR2OHNLaUs4ZkFENmNoRFRyd290a1FFZng1ckczaWxMMEVsZ3dBb0ZXdlh2YzJxTlhwZkJTV2VBandjWFB3MjJrRTR4LUUxV2lCRTdGYjNoeVhZTUx3RjFRNlFoY1VYMHRyTmlxdm9jUjAtWndLQ1RNcENDclh4TkR3ZE9tSjFOaGxIcmtWQ2ZIaHRabGNJQVI3RnZHUGtBRWt3YmpuUUhrT0VRTUxfVFZOSWZ1Z25IWUsxVDIwZEY0blQwbEdXY3hETW41UldqcW0zMUNCVHNDZkRyTGlrVlU3c0lWdFpvUzlfTGtLMGxJZUg4dUdjTU01VUtYaVQtSFBqT2F4NXhEUUlBRU1lSU54dzFhd3d2UjUxb3JXSUdQbVRyUTFlc2Z5WkNGWlNzVTc5aWllcGsxbzRmYVlFTWw4VVVtTDdkczdzQ3NFSGMzdVltSjlfY3dyNzlPaEk1cE5jdk4weDFKc3BTUXpPbDI5Y1ptblp0TGJ5UGwzVmU4VEtWUTZEQlRtemp1YnppdDdkSHpyY0c4NlZqSmI1UVBwSTJSZ1NNcEQwRTdySGRySi1XUkhWTHlaajFZSkQwc1k5NGZDZUhzRFdBZXNqSVdwU0ZsMGlNLUZPSU1OT243N0RFQ091RjVyaVRBdEUtOWlfSTdpX25laHNwVFlFU0RjWE42amhxSHlvcDlDdE13Y3JtWFNsQkZoclVFS2hFblQ3MXpxOHJsUGMwbkVmT3ZMMFZiNmpHZVRCa2k0YTJnNHM0dWpyQXJaYjU1Z1hNWGU2aU1hY3RxYWVzU1hVWWI4bjR2Y0R1MmF0NDVlc08xTmVwQ01ENThUMC1kWk9SWG9IWWFDZ1V1RGhDY0pjVEZBUGJreXh3RTRuQUlTWVR4NlVuaGozbmJVTXNzRDdrMGZaclpsb290WmFaTzF0NThITENXN1JSN25JS18yWnpJY19maVlXN21QTWE5M1FhWGJqTFQ0RTZoY3dlcEM2Q0gzQ1RtYk9jTnVkU1drVnhZc1NkMm82c25CWkpaTW5KbTd5dGJHSHhhQzR2TFk1NW9pX2pQNDdOeVhGZ0swTklYUkRZbmdsVzNabTZjWDRrX3BCMC1OT0M4R0dMTWlhQXhIMXJVT0Z0bmp2aTBFWnRDSFYySVFULXRneFpBQ0ZwdEZaTHAyeWx3bFF0Q2o5YzlfbUQ5Xy1uWjFKQXVVTE9Qa2VPNDZmQ0kzaUxBNG9EdGpyZm9VOEZhTC05V2JBMU1KTmt6RlY5aUpILVE5bExnVkdCcDlLZExBeHRSWWxQS1pkcE9BWjdXeV8tb1ltcTVoalQwLTl5Mk5GanphVGxKUzlJUjk0Y1g2QXBSRzBSNkkzTjg3bTFlYU5XbE0wYkNacC05bzVFN3F4NXdkUFBqQU9QYWVtdUM1MWtNR1RzbzBQVHJGR1NzTEV5TzdCUXRmcXJuTUVCVV9QbXFXcG5QdmJfQWlVMXRvU1hOakpHandKbnFhZTlqUWtWbXg3MzFpS2I5SlJzN0dQN0JuaGNKSWtVX0Vxc1FPUk4zMzVTMVIzSWJYcUVYU3Jldm01bHpleU90MlU3XzhhVWI3a3pqajRjd0g3YVppY3MxNUdKU1lUZFhyUnZDYXJWcnltQ2tadEFxdG92aUI3MU96WnFBdXdLRWZkMFVtc0N0SDVJc0RYdjhJZVhVWXZfb2VDY3NxVEdZc05tRWNVZTRUVHphT1RyWlRoX25iNXlLX0pDeXZrbXR1VkxnUTlZOVpUak5GYzVUODZvVmtKNHo2QUg3b2pSZmNhY1lUUVA3WXktMjYxWjBiTE5XeWJlQ0VhY2VyVnRxd3hvUWxtSjhCSGNkOHFLRTF4QmpLc0FoYzg0b2xRZXZpVTZ5TENXNXdiTEZNV01nSXpPR2U4ZlZzXzZZeGdmSG5kN2hLeDR6WU0zWExxUXJBcXR6NmdZb0ZiZHRPSFVuRUVyZV9zOUJsN09GWE5YM25FRlNoLTlNNWVMcTctb2l1Mzdyb0Y5TDU1YjdnQVUwSFoxXzF0VFh3TGktbFl2bVQ3NzdkRHV3SlNfaEZjY1ItYUk0OHhLRk9rUXdqNUVTdGNBc1ZCY1pFQ0N0WE45MUEwUlpwejdrcDNBPT0=
+Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xRjFhYUN6eXZFUVE1dUJRdGNVWktodUF2dUpZUmVGa1o2WlQzdVJlR1dTd0pfRFY4SVNYRWM4aWk3UGJSaEl0MGRiT19vbUdhVTNvbGFzS0NyY2pBMXdVSXFkOHVWRC0tTkpVUFdTVlhzeFd5OTVOdTVyVVFIWUJ5VkpMb2ZyaUtyWFBUeFctcGZDYXVaUEJWQUxmNlh6TDUwQV9RdEZYV09ENnNIM2JqU0xpbHcxcm56QlI2azNIMjdvRUlsZHVEckpwQnA2cnBuRC1IdTJtcUUyZE41NVBoRS01Y0hoYTZxREZZQ1ZJRndObGFmT09QN3Q4MF9sUWhEaWV1ZlJ6dVpXQW1aTk5ERW5jdDY5U2MtUUFBZGE3a0FjcUNiZ3RQY25OZGJOZEJjVG1Id0FOVE41N0ZKY0IwNVRxV2ozd3E5WExpWWFwbnAtZzZoM1M2bnU2MkN5WkFPTVJhd2E1M0lQRXpwaHFjc3cxWEt0dWVHb2NuZ2RmdHdpOW1DMDJwbXJMa25EMTVFbTBHVUhMY1Z3RG4zUjVEZzNPNHRnTjEzVnJoQzlGNDh4NDY4d1oxT0pVSERrV0NScFFfTjVPbTFwM1NHX01lcTUwS2pKMVVycXRDRTQ2aExDYXhla243MkJNWkY5elV0QUJlSVpzRmxtajc4U2RoU0ZWTmZkOVhxYWd5dUlEbWRYeGtVN3YzLWY5cG02T2kzNm1waXcxOFNaV0tNYWRWZkhSYlc1QVlaVVdCVEI5VmVpNXUyMVUyckoxSUh3N1NKczROZnZyM0owbjNlb2h5X1AxQTJ3YzU4aVZFLU81ZHRpcXJSdkhVcEwwWl9ydGxFVkt5c2h1NVM4UDdkOE9icXFoMWNJVGNYNEhWd1NlTFJua2ZZWGJXY2xWY1VlZFM5V0UxdjdBc0JWTDdkdzNuUkdUOUlZMndxeUFoR1V4SkhBT2FwYTREODNiV2dZNlg0Q3dHcm1OTU9aLXlvSElvNzktM2M3TWhoUkhYZzNULTZPeWZUVkRpQTJySERkOXRrdEVLa0plb1N0NHp6Q2Q5OVo2dHAwTXluZGlDbDJSbWNBWEN4b2g4MVlHZlpyaWVyX1hBYzQ1NV9Ba0ZCVXV2YkJPdUoxczNYQ2pBa0tPTTNZX3A1TDExczRRdjdmN0xCNlZzTGlYZzJlcUhXbkpxUTg3VHFsNlpuQ0pHOVhHSEs4eDBZaWdmVlFYcko2cHZWS0FGd19rbXBHeEtqcHVLalo3VzF1blY0bUZKdGhmbFJOdlYwQlFuVUVVUVU1Vm5FX1hqMWxJTzFrVW5FUTF6cWJpNVVCUDcwUFR4ZkZGamVmVkZncVB3OUZRODVjd0lvSU4wWXBtYzVaT1ZWN2ZyM1NQam5BRUFYVzdmMDBLaFRPZ0E0MFJGWnBJWUpwazhSX2hudVdiaGh0dnVMQTZzY3Zvems0b29FMWR2WmhWd0Frbm84R2ZTZzQxaVFfWEVUUW8tRzdFOVhjT29TNWNyMWcwRnVDem1EWHVQMTFzMUViWHpkZUVUSm9Ndkd3cVJaT1BLQlV6VlpBLUZkMWc5X0VzWkU2YzlXSElIVWVlamJqNlYtdGNIdDliYVA5cnRVcmRKczd3OHMwMDhubmdMc3RNSzB2dHJadnRPYWRuZ0pYODNpRWJnNWdyX2JCb2ZoZExQX3RzdXBtb3hLZ1VxVm5feklKMHY3VTdic3NrZzRsVGZ1bVNvVTJUSzBfV3c1RkVYX1VVLU9OWnFBYks0eFVmR2VqdWtFNDg3aU85SWFqTXFzLUNmMWo2bWhqeklBa21peFlQWW9oblBBbU9JWVEwLUdhYkhkSjF3OTVHVi0yR3pqM3RtWDRfdHd0NWhmLWtHaUJzUGx4YkZNalB2OEg4Z0lZN01VZFlndXJ1TW5USTJNY05KXzRIVkh0dXV4U0JudkRseV9wTVNjXzVfRUU2TkhVb2VuUnYtUGpLVFJLbjVoSW8zTGhFTFN1eHdENTBWbEtOcW9KdUh6eUthMmZHQmpyVXBMV05MUDNfZDdiMFpRTWx5YzZPSUVjMFJ1NlNwVWl4ZHFOTEl0VnBHQW1lcmZ0aFVaRm9sRllZcnNwbG10bXJDTnZqOHdDanFTWTJOOV9xb1I4M0lsOC1LaWMxR002d3RoZ0V1dHhOdi10QlBWakRLN0dFeEJ6Vm9yNEdNbXdrV1ZsMVZyX2VfekpSSUdmbWZYV01rWVFRa21fYnJBRmZiaVBneV9tTGdMLW80OEpDUFE0Nk9UQmNsNUlKQ1lrbzl1QzE1MS1rei1zdDZ6MXpYVEhScDV3azkwRnZIdGgyZ015WHpJQkxmeUdlMjdIcHlmOXFpUjlsMmV6V21rdm5sczUxU2F4cTh5OHNoYkRNa24wSGFJZDVOczdsTm9fVjgwY1dJNnRiMlE4ZjZRem41aUtja0J5Q0w3N29vM3p0bWRZN3ptU1VuSnVWYkE3QmQ2VDVWSkRrcEtXV29FUVU5ZVhDazRKb1dLX2pqNzBENThlVks2cUpMRlFyQ3dFa1M3ZXUyeTFrcVFmQ3E2UzB0amMxalp1Y1ZKalZoMEZwUThpaFV0VGJlS1E0eTExMGdZNlJKcnJ6WU5mT2ZDTTh2aUdDaUJ1amxlTlRKQ05STl9saERydmxCb0RsTkdqQUpDRGthaDNRT3d6Z0lmbDJybVJlc3pBSmpqYklEZUVzY25xcXRNaHhSTVNPVHQ2R0o0RU53d3NwSmNJeTIzRGV5NjFtV0V0TVVpczUtcS1wTDgyVXE1bUkzNS1EUGc1Q1NReFEwRFRjdkN0eVFYWEdFcnJGRWZQVDZCX2ZUbWJaUTYzSHRJM2JtVDVQNWZVVlZwZm5QQzJjdmk4QnBYR0pmX3BrcUVQTDkycmoxOVF3bE8yQVFDMFdYejNCWkE1SHJoT1ZuT1FFWmkwby1mUU43MDlYNUxtNXJ4S3JBWGtlMmE0T1BlZ3RDaHllYmhhWTlweFJNdjRiX0IzZWxZY0JqUnNtRjJzNWRXZGFtRUZpX1J4QW9POGEzb0RxaXFia29sZjFhTXZWWF80bEFtaEJsdWRaU05xVnhrcWIwOGViRDZjVGdJTVVYX0l6QV9GSlV1NkdmdlZQOGFOTXJwOHJQX3FXamdRQTdCSDJfMGsxbjZOYXV3NDJRUFFwd3NEaG1qREtLcmtpQWtMLTJncUZncXpaRHpvVV9IVk9Od2NybEU4elNCOGdBTVVJUW5PUjl0SE56UGV5ZG93eUJnUVFueWJsMF82Z3VsQXJxWFlVM2QxeENIYzZPc1BKM2tIRFNIZmR2eDZlMFNjbmF6V216cmZwdkR4YTRuU01pRmpZQzVLYmtySFFPSVdhZXFQdXg2a1RpWWItUXFBRFU3MGJVemJzMl9NdVAxaTZVUVN3TmhTRG56RHJ3NFJzV0FoNmMzMmw0YkdyX3d6bUZzV2xlQ0FTVC1IbWxPdUJnRDY3bjJaanptOW1NemRuaDRqUVNUMzMzN0pNS0dOd3BJNHdDNUZjaVpyOFpkVWxiQ2RCM2h4S2dpNDV0UGV3WlYtWUFnQmVfUUw1QmpJOEZjOW1tclM3WjlLNmUwcWQ4UVJuLTlXdmYwQWNoVUJZWFVTTWh0Z2xuZlJ4OXVveVB2Wm9SWlB5SEhBaTMwaFFISGdaM3NTbHRWaFJXYlBnXzRfNWsxM0VxX1pkNVIzelRIS1Q3djQwc212Nng3dGNmWHFrTkg4ZVBydWk0VEdheHgyRG5tQVVmNmFtd0VJTHZvU3d5R2dldEFQRmU1eHY5S1NmQTE2Ry1lRXFwWjJqYUZabkJWMHFZcThwRU1XWUNPWWJ2RFRybFo0OGFTUVU1OGRtZmJ0Szd1cDV3NXd4QjN5cEozaFk1aFVVLUxxMzg2Sy04R3RiZktjRERJVTVnaWxJSG5CLVdrYjI4Y2RzMnlOcU94S0Y4RFBoSHE2bVlNdWZkN3NQMHZlTVpFNnZISW1VX2Z1NnJPM1ZjNm5zNDcxR2tVc1REWTZYcEJsbjZFOVlOVXZSWkNzOWwyZk93cDAtYklkX19uWEp3d2Z5aGg4S2dVTF9pU2YtWFo4OGtHX2FCZUpieDJ6a0pCSk1CS0JDVTh3cEhGR3VQQWNQUUdHaDdFc3FYUTJqR01UZ0lWb3JRSlpjRkNPcnVWQWxYUUlUNXdBcDdNVkNxMWtrWVplM0JZVFk2OUNXMXFlbDhGTVNTN2VCOWJLQm5aa3RqYlM2UDBrOGMyQU4zaFU3SFg3eUg0WDFqOGJTMFNGT2xIOW4td2hQaUxDSHhKNzNpWEYtTlA1SXo2WlNNdFZLN3ppRUdvYm9oM3NGTjRBd2ZEQ0ZvaW42WVFaSjNkNWhnaTF0SVp1NnFfdDd5RmVsZFpiSzhXWmxyU3l0VEswWmtmN2tuSlV5NDk0ZjJUS2ZhZ3UyWlc5NWx4YlBaalhBRmtLak5DQUNRTnk5VUYtZmx2cVFQTl8yUXFLUzJJYk5Ga0V5dmswVDc4b1BfUXd4NERrdFBaM1otSTZldkQ3SWpqR0xidGNZemNOMXhVa3RlWkkybjQ5YTJaRHFJU092TGRjSGFfTzJ1X0RWMzZkVk9MNFB3Mml2blpWQ3Q5T09Rbk9NWEtNVWs1aFpJc1pxSnF1LUlReXNiZ05JUUUzOW5CTEVseXMya3NJSFp6YVF2V1NHZlluU1M1d3p5YkpJNEFIQlN0ZkxWTGZXbjhpU3F6X3Y4UVJxZ2RxTkNLTURTZ0RUby12TEhiWG9fTkpvQnFEeXc0OURTUTFFRjQ2V0NsTm1QRkhKdGdabFllTFV4MmNpd3FBclFyMEE5czdULU5XNEg2VE9td29tZkFKZk9pdlNJTmptcENpdDU4WHhhU3JEWXlyQm4tNjE5WnlPQnV1ME84VzhjMDYyLWRfeXdidGZiNlpZQW5wazktTjNNMmc0QklDeDgzdUREdHRPWTlKWGdud1dkZFExdXJlY3lrOTNVVXhudktrNl9NclN6SzJjVFRpQXRfWHlaR3FSVzdJbWxVYTNuTU1VRVNuX0hIbTUxXzZGbnV1SXNBU1lYZjFpWlg3bmJHejdiTlZRN2RIZGQ3NGFMekxNNWpHQVpnTF9na2ZtemJKUTdxZ1QtVVJhczQ1ZTV2WGNNc2hQVHNkNnVCSEpuOE1iZ0ZCMGpLOVZzQVo0aThvS05ydC03WnJMVDJzNllIVHBSSEprbF9iN1NINnFXbzZINTg3RUdPeVhSemZHSXZINnJxUFpWYUJOMk1WT3l5SVhtUmdXZVRJLTVlZHFsSTEySXlFdVhHdnJfaXJJWFpNaGgxUVhNcUJNMk9rSTA0Qnc0b21ZUnFyNXJweGw0QzFzczJmd2x6dXI3aFhULVpyb3p6ZFNxb2hIalNCeG0tdDZDU0oyRmx2b0pWemlyTlV3RzJSNWdvWjBZekJvdjVlOEhSRGROVEN2MmV3WktfelNtQTc4M01yeTZIWGJ1bnlscFhmSlo1a1V6T3ZCaGlUcEtnY2xHRzk5LVRvdUxmOW1PejlELUJVNU90Yk4weXRlTnlTVkFqcUFoLUhMYmY1NDJGdjIwTXdWMGlSUXhCM3FmbVQ0UmV4TmdSRnlKcG5PYWxDU1RMbFhiSlNLWW1RSlJSalRDUXZ5ZjRIcVp0SzhsSG42RnhZdElsZ3RNMkU3SkJvQmdmR05mRXBWTllDSk9CSE83NE5ZdXNtcnNRMllyWm5HZ3hTV1Vxcm1veU9HVlJrUklzenVOb3ExN0FXWTJnMGJFamt6d3A1ak0xMjVqUzNpbFZpU2NLbHNmMjdPemo0dXhjSTRUdlhOSDlEUm4tal9pNkFRTEZwSWREaVdXQ0oyQWlETUxVMTVNbDFwUFhCSEZGb1hudXlMUjhOeVRCdVByR1Vfa2s2YTFpMG1id3hwRHJfNlFEb0NJV2hlZTZFYTE5R01CbXJXRzJTcGRQdzdscERyV04tcDlmb19GUUVBNVp5VGpVS2s3M1ZTUFl6TFUxcWlQNHZwaU45RVVSQzJQNEd4LXVYQ3h4M3JITFVMM3hDcTFfZUMzYmM2aVJLUzQzbVZtQWxwMzVzcXdnUXk2MTBSRmo3c2E3cU42OUo0eTdWc1NjdlBwejRGRzJCN0VrYy1WTUE0YTV4S05lVFdmTDVhRzhYWGY0RUt3NzlNZ05qMFFKTlFmekpCZ05QWktZRXdRVFJJb29CYXRqMGVUbzNSajFvRDhoQW4wY1dSY1F1bTllQmdzWFhZcFlFVzVUMFBSMFY2VVQ2bzlvc002bEZvWXJ0cDdpR3gzZklqNFNpR3hpM1cyclFqUnQtTFpqcldvb01pdUlzelo1bEVPWmpQcC1ZclRXVHdUdnV3eGRBeVFWTHI5cUNYX1doWWtqWTFsUDVLMnZKTXFyR0ptOVRqN2U0WkxZRlVCbU9OeFhuRm04WlRrbmNUVDVwcHpSUi1zODVRd3dGMF9RdkUzeldGM2Q2LTl1eFhLSVRKb0kzTTRQOXBScGE5bzhRNXhDVTFzYXRpTC0tWktfbDlWbGR4S2dXZjRyWGM0VFp1TnFPWXRRN0ZobXlLUEdOUVNXNmsxYUtrSG9oZ3I2MTdXSENORjNuc0RmQS02bEtsa1draG5idThaOFhrZkh2U3M5eE9GQ2t1eXBmZHFKM1I5bUI2MW5Hcm4xaU4yYVBST1BqYnlmeUxKR3NORmZlQVp6a1dYWXlTWUp1RHJWNVhLeU5YS01uRVJrN0NiU1ZxQnNwWThMNXFNUWppZno4OUxNenZkWnlmblF3UG9LMjZCYUtJQXg5ZTZVd0tTQUN6aDNWM3VtN244R0FCNDVmZTJ1cWVxTWdpNWlVZ01HUFJhTDEwMGFyMlNuNkd3VkJyRmRwRUV0WG1YRkNPZDhUb1VnNVZDY1hPRGhQc0s1bkNNRjBXR3hzMUV4R3QxWkhhRUJWbmJ3NktnYVltRmloS0VuY0dudVRUZGZsWWNoVGt1Uk8zSkNUaGZaOWU5Ti1CVzI2N1dhZG5mVDM4aDRtSEF3ank5cjQyOGNWUkNyZlRydGxMeVZsU0doSThKbElXR0s3Y1ZrUF9CdkZmQVlWMGl1V1ZyekNYbDRuXzh0MzJVV0E0aFNqNDMxbksweEN5MGJMLWdScWtRNXVncXYyM3ZTTk1qN0FfbjV0cmRDNVZPMnR0TkJqclR4M20xVzBsTkRrM3B6aUZiRXB1MjE2NS05ZXdEcUNCajVfbDcxNDcyOXctVE9KYllNV2V5ZVRoVEFYbF9qdlBCZEdrNkJ5S2djUXFWT05fN1psYlg1ZGgtMFZmQm82R210OWNEMlQ3WW1YdlItcEZma3pSZER4dWtLejZOS0dFSGNCTkwxdWpoNXZxZEhzYnlsTm44cU5WMTN2V053YklrZXlaSXluVFVuWkhaLWpQZUMtOUxMNWNoeDhEUUIxdkppS3QxWlNFbE1lT2F3QVE1TnA3c2UzcjVfekJZMWp3M256SG9iWVdlRHZaajRLOHM4M216cGdHYUxjTWJSWEdkQU1UeGJzTW90VUw1U0U0RjZXcm9GMDlsVnZQUkdsYXBaOUtmR2J6VEhCOS1JWHVIVmMzX1hvbmNoMVZfeVllM1RtajhxMXYyOWlZQjFadFprc0JPN1A4QzZBOGNyVGdZTlJWVmQxYms4UzhNRnV0LWZkWXVWS1VWZDRhQTR4V2p5QVFaYmI1LURfVlBvVDRZeGJ2RGJBMFZPeWR0NTh0ekxMd1pOWlhxXzgyOGxoUlVOZFVpRmc5LVFVZzNZX3pkc3pZQ2c9PQ==
diff --git a/env_prod.env b/env_prod.env
index 62ce7253..a323ee70 100644
--- a/env_prod.env
+++ b/env_prod.env
@@ -5,30 +5,32 @@ APP_ENV_TYPE = prod
APP_ENV_LABEL = Production Instance
APP_API_URL = https://gateway.poweron-center.net
APP_KEY_SYSVAR = CONFIG_KEY
+APP_INIT_PASS_ADMIN_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFlMENTTFpGT0R2SkRjX3hJWi01Tk9mOWhqeUdERVpYa2luUEtlNU9RZGZSVHN4SkpOempuRmhfeHIzV2t5dzNGeGVSaDJaWVRjUUExYU1RNFlBZjNZa3k2Yi16b2JFX2VZbXVRN1VqMXZNRDg9
+APP_INIT_PASS_EVENT_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFlbW40U3Q3bGNVQWdaNDFRZ3FMN3p2UmtYbm9vVG9HcDhrSE1sZGM3elVFUXNRSmpuUTI4czBlSnhSWGJ3dm10X21VRVI5R3N5OVFGNXdGZVZtMkFsT2twcFYwQmtscGNsOFpZVnpYSWdzX3M9
# PostgreSQL Storage (new)
DB_APP_HOST=gateway-prod-server.postgres.database.azure.com
DB_APP_DATABASE=poweron_app
DB_APP_USER=gzxxmcrdhn
-DB_APP_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMd0FKLUpzaTdYT0Zia2V3VExPSktfTUx6RmRDc1hobjhYamxyMTkxakhDeGVHRTA3TmVoNC1Mamh0elFiV0h5MnA3YmpheXRzLVdhN2Ytb2R4a1NiSWY0RlFQMXlJU2hUMFY1RGJ1dEdRTFE9
+DB_APP_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFlQnhET1BoeDJ2aVV3OVNseXdPdzZLQUpRTFdQdmdybE5SYjhEcjdIQS1HWG1ua2EtSjlxUWwyNTRNQU1Gbk94dnVqQ25WOE5KTkRsc0RMcTFzOGRUdjNBY1h6cEdIQ1ZXclF4eGVKNHQzUXoyajBFbjFkMGE4MFVkWm9kTVRQaDMxVGczOHFVaFplblVJV1FRd3BSbGpvTTYyV2hDWXFQM3ZfUFpSM1B1U1BZZTg2eDJSZzNGRzZZMm9KRW1SZkFMUl9TX0FWTGlkdGVBMGZEMHJjTkVyQzlGMDItQXhJN1ZDdlBQRC02cEhWeXYtN0RzNUx1UHhGYnAxcmx2RFFRVFBZbGJzcnZDeFNOaWxlZGZrMVg3TW9WcFBQcEpLZllNMGVFd0dOQmxua01IWmJCS1hzM0ZMazZaODBRRk9sdVlRUTd4eDBpLUx0YUY3SE9lQzdzZ2tuTm5QNHFlTm5uR2F4RXpGaTZwOWlieHl3U0puU3RxODJWcFBFdlVWTV9pZXRXTEZPa19mNmhtRmtwMG5KdmtBZzUybjBMWnRhOW8zd2JNeEFKT1B3b014VlY2eUdKSWxFa3VzM3pnUkdobVF6RjJycng5WEFBVE5KTnVqQlRyaWRMYUpFTzhiYzlaQXhEZXFPeVhjdGNIcVZHT1pDTU5QR3JUZVFGcm5HXzR0dVJvX3N0YXJQcWN4cGlxMjNmNlZVb19Oa0NScHJLcm9JMkNBY2VEM0swblV2czhXZldfajFwRU14SGhZUjdDd1BmZzBNMmxhSEtQTkhfU1kwUXdrRGc5cVdGTC1kbldFdGFBN2dyTVlTeE1Vd3AyTjZZVGk2SlcxalppcjdaZA==
DB_APP_PORT=5432
# PostgreSQL Storage (new)
DB_CHAT_HOST=gateway-prod-server.postgres.database.azure.com
DB_CHAT_DATABASE=poweron_chat
DB_CHAT_USER=gzxxmcrdhn
-DB_CHAT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMM3p1TEY3VTQxT0xrbW9fbHFJLXNDZHJUOVBSUHhhdURpMi1EZTZIaXQ0M1V5ZUZFQVhjSGF5SUVzTDNrWW11UlNQQVhwNEU0al9yZXQxSnRIU1U0akRDbFVIUHVvUV9SMkFkaEFGR1ZVUjA9
+DB_CHAT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFla0NndGhTM1FyRkJKcDkxZmdib0M5VGxxXzkxMGZ1VG5hWHBDQXlFSWhUQUV5ZjNzeE5rSEdoUDBFam82OEdzVzJrbllQdm5QU252Q2JELW1BWThvMUJpTVRSLXdLX24tQ0FvbEI3X2FQT0hlQkowZklWRG1YRXJMbEFRZnVyM2c1M0k5SEZlakpLenlIX2laXzl1LU5FYndrUHFxekRyUjlpb05CWjM2Z1RrbTJYOGloZHlQYjdkejZ6NjBxSmdST1FaYndNNzIwWG9VUXVhbnAwemtJWGotT2RJVnhLUDJaWVU2dUZ0cmNkOERGYzN6Nzc3V2VENnk5ZUgwdzk4anpzTUg3SzNSZi1Xak5MOVNCSFNnRnZSTThzWkZPTVJYcEZkaE9WMkNHWW8wMTRyX0NNRWpZa0kyNnNBY1pqYlN6UWpmWW80SmpubWtTbG44ZElXSmljcmVUTy1Id2pGUDYyNV8yTjBEZ1FnaVNVcVBYcmFGc2hFdFctWDhtbmF5MDdhVFBRTy1WSHhyUkNYNjVHVmJ6Njk2aEs5MnQwYU5EX0FIc0VtM1AwZE1KdGRlSThIOWVqS2lwaUgxVHZ0SDlacW94RVNxa0taQ0tKQ1RKMVFUTlliYUxCR0ZmQTI0WHk3YV8yZG5mbmZKT2FEOHdiLU5hVUVrRHM4QzRpS0lZQXRSYl9NZDBWbC1IcTFRN2htUjBpV1FEdXRmYm9PRVI4RTRpUGRGRGNseGs0M0dtUUpKUkdKMXBGaF9oSXlmOEdYaUtma2NqS3FtdWkyQWtRUURTbHdTOThrcGV1Q1JOc2lhV0l3dl84YVhDRHlzZWhDU0s4T1FZdFpVQ19xbA==
DB_CHAT_PORT=5432
# PostgreSQL Storage (new)
DB_MANAGEMENT_HOST=gateway-prod-server.postgres.database.azure.com
DB_MANAGEMENT_DATABASE=poweron_management
DB_MANAGEMENT_USER=gzxxmcrdhn
-DB_MANAGEMENT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMRDJRY19uM1hTNC1mMzhVaGNtamtScGpVYTY3RUdBTlpTTDdrUF9PdF84WkFSakRoX0VEcGhwanBPSU9OUGJNWXJDblVUS0o0Y0FBd0hMejUyTXFJTFVCaUJmTkpVYVQzWXFRSDV2d1lENHM9
+DB_MANAGEMENT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFlMlFwYzQ4YUNOVFhyZktRaVg5UzNmbkhVN2RZaEtHTU1Xb1hnYnA5T0pZdFVrdHZiaXVCdVpEcnhPczJTQVYxeF85SWloSS1wdkNTWFNSZXZqM3ptNU45QVBXUmlPR0daMDZ3UzRFMHNKWnU0VUtuUEJmeVpUWXhxSnN5dDB3X19oTVJKQzFaMUpoWTJHQnZERU1NazE0WHBmRUJULV9CVnhfR1NUWVpGMEh6NTQxRXpKMnlaNGREazJ5X3ZhX3k2b1ZES2JIcXotOEdDYnltVENka0sxT3ZvUTQxdDR2eDJ2NXNIcHVpa2d5NEtOZHJBRnV1Wmt5cXdRTy1NU1BwdDFnelFBeDR0b0Z0UmlTdm4zU2s0a2ZRV3E4VGljS0prZ3RINWtnMkxlX2o3M1QxLU1od29iWmNfQU96eTRhUldGbnoyN3VKRExVdHZsR2oxdlRzRkdqLU1uUXFHb3RJX3pTa0d5WjNmMUtXc3A1YzNJS0NOR2JpeVVUOTdQNjlCbkhNRTZzTnZyU3dHRi1LemlqNlNaZTZvbmIyRVA5cUFXcUUyZ1lSTFZram5RVnRxX0dqTFBLWXcyUzNWaVlqajVuaEl0elpiaWx0YzVtNGVBRFJaeXZMRzRTTEloQzhuV1JMRkNKVmxjQUZSdGM4RjNfN0dsRFhyN21qbFhCMGFoOEIwOU50RzRUX3d2WFdXQVpodEVmck9WNDdUSGoybjJVWFJJcmF2Z215SzMtNk5IREduNG9IYWV6T1JZa3NMNjZVX3gwR085LWdLTGk3cUgyMkZrYnlLZVNqUXpMMUw0WFF4b3JtOXl0OHdjazdmeGcwZENzdkhEUjYxaXZjaA==
DB_MANAGEMENT_PORT=5432
# Security Configuration
-APP_JWT_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMX2lyNHVQVVkzamE1eURGMkRoVmhJTTVSTEQ1c3E4XzlucExfdUNxTHNwazB2X1h4YzdUeDhsYWNCbUZ5VjJNVTZDYlY2dGhreTg5UGV2Z3A4X1FTc094XzhxdWRNSzBXd20yY3pFNkpUYzhaeml5ME9OMjFkNjZMQkdvczZnWTVYX09fR0RYQXhpVHFPQnA2cWh1T3pqTFVieXpHV1hlUjVQdWRCSEc1bk1ZPQ==
+APP_JWT_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFmQzdxRmgzam9WZE9QX2RoUGlYcGU4V3A4ajExV2tNVjFGNHZaVTB6b1hCTXlmcUphbUhaT2hrb1A3UjBsVGtfUERCSVppc2JxYVJOT2s1ZGpnR3JSOUJETzBqRExYVjVSQlNTQkZGYXc5aVZnWEJWMzFHcEpMSjgxY1RUcm1Wd3JVSDVaaktpSXBZc3hBckE0SFhrOFB5d3B6X2VFdURtVDdrT2p3UUVvdU9qU0hZQ1RNVGJZZkFaTjJHc1E1SGpSTGdFdFU1bF84STRQbG9VRTIxU1BoUnBMa0FRc3VvaE4xc0NOQUI3blZLQkxkM3BldTc3RzJfbEJjQVZ2UlhTb1dMRFpjM1k4YWthUG9kemZRekFTWThoTjRVbm05V1VDeDN1dDJzckhvb2F4d1QzY1BIT0dENEgxOXpNbkNVWHluajA2LUp1MndHYW5uSHpWRjFDYlVoUWFKQnJCRTVtMUNNdjhtV1VrV2J4Q1BUektDaFFkZXpUcTJOakpKc1JoNVFEUGdLb1hqRE4zMnRMaXZJX2tCQTZnZTkyVmMtNll1TU9VOTZZX25NelhqSTV4Tm04VHhoczRFY1QwRWpsZFdEczY0dXM1Qm95dFkyNkpjeXA3VTlReGhUeGF6NXMtV0NoYloxQzFWZndlcG9oZGtSczVyUlM1bkVSelY4aXhLcmZPSmZ5WEZ5VzluTEZOWXpBbVJQc1NvYnZ2UVNzSldvZ1VEN0lyQVhybm1vRHVVVDdBczBLWXMyb1hFSVNheFc1bV9oSVdrdGJRcjYtVHBOczIyM01SZVd3VGh2aDdtZEZubVU3al9GcHNacmRDSGVXZE9VVEJ5NmNzLTFGUnJxZ01qVmU4TlBDNWhmS2ZWNlpkdXZYeXVOWHFfLTdHYVNlTTlZR0o1NWh3V2JCMjhyNWt1eUM2S1pMTHhITHhmcE9hbHpILXZEYlJ4ZmpyMFE3aWQ3cUhnV3Ruc0NZQ1dzUWNEV3lmZFJQU3l5bWxnVkpJc0ZZT0tFN1lZYUlrX1R3Rkx3ckh0cUxfbFlwc29YQXR0dnJvNWprUzRmYnNrQVdOUldxbVZuZVNlYUNwN1VKWDN6V1Vnd241NDNxRGU3a3RXSDZTb1Q2YTQzaDN3RWJ4V1dZS0JKUHY5R2xiS3VZUG1XWVYyRTA9
APP_TOKEN_EXPIRY=300
# CORS Configuration
@@ -50,29 +52,32 @@ Service_GOOGLE_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google
# OpenAI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
-Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMSlhwejcyRl9EUWpPX3M5bnI3QTNiRDd1QXVaVkFCczBzeUczcHhyenJvRDN0SDZGaHp6dGJqNjNiLW9oTjJPZGV1b0VxWElfT29jQ19vNWF4aG11bkRlS1JMa1VoeG82VWVmWkV0VDZUWTFmcXZXYUh6ZWs0bEswNXhhZ1ZEU1JNYk1jU0p3YVZkZmZVWmF4dURDcGR3PT0=
+Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFma2p6ZWhwWDdLNzQya2E5MlFiTUt5d2lOaHlnZTNNWTdQcXh5amNTaTlWalpaMXFWQVQ0NFNkSlN3OFVOVVBGYkJkUWdqenh3WVJ6SkVLYW91SGN5VHhUeDFsSlFkUGdlWk03NmE1MGtTbHBQdDFXNng5YXY5MGluN0ZKVjFTaVIxTVNhQUZaNzA3MWJwODVFWVdwelVBX0RRN1FPOF81TEpCTnpaYjVlSzJxTXNnbVZVWlZBQklqSzkzZzB0bWJhRlJ6cnZIRVl1SDdlZ1lSWEU0eFQzdnZhbVRGSkd0UDNHR2YzVjB3d19VUlVaV05LOGk0dVVMN3V4MjVaU2xPcXB0dDdNUE54U2tfRDFMM0t5R2M0UlRVdU95YzIyU3VqSFFVNVVmNll3YnY2M3F5d3NzbnVPenlkeXB0X05IUHF5MU1hWlBZZV9ubFpZcDFwYzRNdTZVaExqeFk5SGhsTTZLRmdwQTFINTFteVdQSDhRUDZMaV83bkllb2IzYXYzWGhhQ1p0TnpQUkdvLVRwOVUxN3V4MlZfSGdGS3B5WWlDSDVzRXhxN2ljWDB4QWpkWXVXdlAtdE9nMnNNX05lVThaRkNIZHV3blEzMW9obUw3Q1FMM2piYzdTdm1MajhLVGFIbVJ3aGhaNFJteFlPNVkzemRadGhGaUhhTVc3eUltVWlXMkE0UGEtZW5MaW9NSjhlMS1CT2l3S1RXLVE4dEZjbXgtdVhJOElXZjl4LVhDMmRTanhvOEJZQ3dfYU5GVEVPNGxBTmhjNlcza2dNenEwdEQ2MHBGemRtbDV5cG11dF9qLW11VU8yekxrMnRLeVRZOEx2TVBKU3EwS0VLQUFtVWluaWQxSE5qeFphZVlDTnpSbjg3UW9VWHBDQVMtUVJxOW13anI4ZHo0YmhRZlF3SGw4STlkeTJ1ZUJVVXNWUTdpaExDOElLbXQ2YVhYZlV6NlpaTnNROGJUaWYtMzlDcnhwMmZqTVNiV2tLMHFnWjJFaUdhQkxqcW9adTJLYnF5SUw5ay1yYVJJVEZxMXNJSk1HQT09
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
Connector_AiOpenai_MAX_TOKENS = 2000
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
-Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMaEFnaHBDYndpTkZJSFp5OGdmY2xtNDZEZmFmbk1rUUQ2STZCQlprMjRhY3BLdkhTWWdDRlIzcm94NE5LZ2dCdlNkdWpkVVk2QnIzTzQ5TGEtX2p6a2kzeF9PR3QtNWs4aWFKX1ozUTNYT09sMkJNb1JMRk1vbTE0U0Y2eU1SUjhwY3Z2TWIyU2d4Nk1iS2d0YkRKUm0wNjNEbWNxYTg3SGNnU3FMSzVtYjhLVnhxbXd1SmZyam9QSGtna1dkSGlpeENEREZQck1tZk4tTkJvTERTcjZSdz09
+Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFmNkx4WERQNTZmUl9CS29RblR4Nm1jbnZ3LXV4ZEd5MUhvMWMzZUZqUmo2M2pCOWFkcUFjbUZleUVnUEZoZDdlMmNTcjdZRUQ3RnU3VjV6Tm0wd3RmUWIxZDZJTHlDNmtfSVNYSEVUbEFVSVVLbDFsVFlsSUNZVU56TU5tbk5ZbG4xVTZZNGVNSHR5ZWozb1k4NjZTMkRucmo3cTAzdFpVbEFlaGNtM2h1dEpYWDhJT3V1aHFsZF9xbmw1VHNpeXNWZUVyQVUwT1J5NzM2VVpuLWstNHV0UENlSmlZc3QyVGJzdXRldUhJNTFnSnJGSUUyZnh1SHJhTzcxbVpaVllOVXN2cTh5YnVxalJOZVF4V0xKTUs4Nk1yUTFSUWJJYk1zV1hTanpjZHdHdXk3a2lEcDdDOGtZNkZYMkxCRlFQY0hLcXFWVUhUZjI5dEQ3S29QenRYN1pFbG5uN0N6cDlXUFNSSEFILVZOWVpmTmdZSlFrRDVmc0Y0dEEwTHpmRmlVaE5VaDJvNVJldVE2NV9zNVJFN0VLMW9CcmZwR0VQVkk1cnF2czNxQTN4dnc3RnpvZHVGejJYUkljSGdPS2RQM1JFYy04RVVtYkhOTnNJSW5XRW9aQ0l1TzdnSU1fNUVkcm1wc2wwbGV0WEpFNlJIZmJGSmNLWVRKcVlaNzBfWVdJX2FPM3NuTzFyVHZVUTljc0N6bjJoVVYyQ1pJcXFzbDZySmNVekNGdUtqdk1lM0tJY1pfOFcwUDhzVjdtQ1h4TjFxMlJ3OGhILUUyU0g3ajU3akhiTDF5S3lzSXdUbUVDMGVLY1NrY0VKVE1pd1R1cmtrVnZEX1hYUm8wcUtPVWtxR0twVFdMYmYwX0R6UXRiX0FkNlpoX0xlUDk5VXRweHBUb2JXQ21YTlB1b2kyck1KcHRIMDBZdjVtNW9sb2g2a2cwNW5sQURhTnlhNThKUGZhQmY5blN2LVNIRHdhN1ZDRGY0U29iM1hQaW9HSTlQUXRCN0ZKMUlsU1JtRkowTktCbnFweE9GcG1NOGZ6eDg5OGNaTEdKdDZoTGlwb180Sm43OVcyMGI4WHp5RDAxMF9Xa3hOZGZYMm9fSzNwTEtfcUZzSWVJUVhmWHJxTnRJUFNOZFgydDY2VmQ3VE9vRU10dVFNYm5LVmNHZElDRGhRRk1fRHBzVDdERk9ab09sRTBKMG8ydUxGZFoyMWFLNFJ3MnhkSm9oUjNIQ3ItbllLM1pqczdlQUE1RmdaWTMwbFRneVpSZVkzNFJGODJlOC0xMi16SC1KWW1rdWdicUdPa0VVQT09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
Connector_AiAnthropic_MAX_TOKENS = 2000
# Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
-Service_MSFT_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMMXV1OE5qODFrcGJqVEt2Zlk1TkFyQ3VoMzVad21UcTgwSXJqRjdiWmdsS0J3VWRBWWg4WWllNzE5X21ubGItMl96b0hZYTlXbVBkTmVhQVRadGlnWUlWQWdOZUV2U0pDSDdiWEhMNHJQUVllYzFpWFNJUnY0M0FpZ1ZWcExyWmk=
+Service_MSFT_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFmWV9mVkRraEczbjBqbHJSVFdYLVViTzNpd014RWlJRGRxVjNKdVhyVXEyS1dna3JBRlNMdTBkTVVXR09hWkNabktiZVhJdUxSUHhUQjNJN3RFeHlqdlFSRlNFWGU3NTdTcmVEYXQzZ3FzX09Fak1iUzhDVUN2NjUzNFhtaGpxX3hvZ180U21FSllxTnE5c3FKVkEtTVJFTFdraFBlLS03ZUlsY0FaRkVBRHVmNEdmdnlxcTQtSEUxLWlqakFueS1TcVBFMHhYV0tfM2tNSDFFMzMzc0NHSVNsdHNHTU9BVjhJSVFzblpzR19RMFp1VFJFZXBnLUI1VWZWMDJLVFNBSWNSVElRcVRNNk1KOGJRZVBsNjBnZGlrZFFfdzlqTmZFbmVPMWs2ZFBpaVVPQm5YbExRaE9KOUlZS3piTi0yTFhGM0kySTlYQldnQW9zVnFyeXFhYTFjSUZfN2VrY20wWUlpb0lCWnMtMTBRQV92TTE3aEM5RXZaYlNxWEZoVXJxLVNJT2RNOGg1YlFqaXI1dUh2dk1VY21hNW1KRzloZl95aTBMUVJZZ3E2VTE1S0JhenpkNVRvdHRfcHM5b29UVjd5MVdRdTV6clh5NEFudkNlelczYUN6ZkVIMWZxcy10TE9kYV9LNWRTX0dyRHBkN01EeDIzV0R3MDhmMnFtNjBDUzJYalFuRTlSMkJyMGRvVXZ1MFpqSGQxd2tTVUpQOVc1T3FTU19YSEhmUkFXalB2cG1aVEFZc1NNZ3FDQXNoS25ic1RmWjJXblQ5TnZDSkFVZXpYeVpCemFJNXBDVGRmckJ0UFl3Wnp0Q1FXT19XVW9tWnNOMGx3cVRFR3dqSmJZd1BFMERBdjEtbTlGQnhVX0dOOUhXekM2b29uZy1Qd0lrcE9uS1M4RFNfbmMwYl94ZkN4U3V5WGpiWmVIUExNM1hUalN6ck1CUXpVeENvSXFuUE53PT0=
Service_MSFT_TENANT_ID = common
# Google Service configuration
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
-Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMYmNCSXM5cnRBVUxlYm83VG11MlBGZHhiV2hWOWxWYk5XRk1hSmhsTGdsX2dHSGhxYk5FWEpEbXdQM3hCNE1nRjZHNjlDb0RMWTIwb2pqczdocjFkSWxfYWlLOU9KbmtUcTl1SmZJZUh2V1RwM2kzVkZhRFIyTERsaThXYS1OVFk=
+Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFmUU4yNTRwTDFvU205NF96WENmRVUyRlBER1YwbmgxVVpucXRDUzkyd3o2SVNiVmprTzFiZ3lrbHVaQjVYbVVpV09jMFVSR0lCUEUtYmpBajloaUVtcUw3QjBoQ0lUT0NkOWdOMC1zckxZX1ljZFNNRENqb3VodlZhZ3RmQkNjUTJidUFsMlNrWi15RG5xbTdWdXlmejJGT0VLUmRSWnUtUDZpSWFzTVRPbjF2cFFSZHNRdzV6cjlMaU8tTnRtSEFkRjBERTV6RS0taHhKOXQ2YzBEamN3eEVtTnVvMEY5UzB6MWd4Qk1JdUIxQ2t5UG9SbWR5NlMxV3RtbzFpRDl2bk5IMjR3XzE4bm1OdDZlVmpHTnRNdElMSUN5TUlvdW5FRjNOZzRmR2s1YzBiT1h5U2dIeTVyMVo5UzlJQktXTWNWRHpnbzM3Z1RqSFZkSXBIRlg1WS1veWczeG4yZ2xyak1yVEpQWURXblhuZ29UMUNUZXJsMTJRWE5NUVFDV252Qm5nU2NOWElPV3J5OXRYRnNqX1J5eTZUYm1OSjhrVEVpTnBtcXVWTnJ6ZDV0Y1dhR19jaTBSQjNvR3J3eTZKWVNRMnFFNEcyVFR1cTNYSzNJZzJmMk1hR3MwVGtMQTY5NUgzb2x6R053RFVnd1pOLXMyTnE4Ti1KUDk5M09pOWt4aHhRVFNpZWxBWGc1VzFDSEl3d2E0VFQ3a3M4clBPYzNFM19NRGpSa3FUblN3R1gwTE5hNEZxX0JHNFNXLTVrQnMwR0pmbVFtcnZTNWNwQWhhUG5qMEZaUG02YWpJNTY2NU5EQUNwX0xpdUx6R0w2Rl9UQWxjeE9xYUdBMmpkVGxZNTIxRlRuVGlybmJIaFZTeGMwZG9lMkpyTTA3V3c2QWV4ZTBhSE8xSG9Nb2hfTnhIRV8yU09lWUNweHRGNkwzcHBPTkg4S2NheWVmR0ExUmdKVTRBPT0=
# Tavily Web Search configuration
-Connector_WebTavily_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBNVU9JZEcwUWFuQ0lfRElGdDRhSFJDNVVBNUhBVzVKQlhBZXNsUDluRXYyV1NuaWw3eEJMdnhscGNZNW5KVmgtMzNfSGRfX1RMZlB5SmtHSzNTMC1RUlp1c2dqOWhSVnhuVUVGVUlaak16ZjlpWW00OFVIRFU1aEZXYzNaN3VNS1I=
+Connector_WebTavily_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFmSmxORGhlMzd4dkpmS0tVaUVLdG5UZ3RUckp1cTg5Y1dnXzBoYWZtc0FhR21xdzRvVnhiOVY1eU5PQTM5S1R2cUhhODljLTQzUUJHMUpJRFpucjVIbC0xTDRUVDV3UzQ0V3gxQjI0SkNLWEdHYUtSM1g0UmFueWo3SUlROXRBMjFGQWR3a2p5SnAxVzFrSWxFWHVrdnNsYjFNb0RvMmdmY3ZCUmxfd0FRS3BQUnpNUEZEMlZCSFZfY1VXa3F1Y2V2QW1aNG5uN2ctTnZvMGw1Vlp0eV9VejlZNjRtcEJPYWxtVUdVb0xpbHN6VzZnR0hZTk8wMC1YQS1DMVA3UkY4dV91OTN6dXYtczhKeGxLa3F5UTlROW5GOW9qZS05akRvZ2lfWVY1ZFk4Y21fS3AwMDU5NjY0ekdwd2pFRE9vYXFSMF9PcXRrcWwzTEZrN29VSXVIenJ5MGJ0TGVydTVUR3RVbWItd3dkX00yQTIyeUdFZktGNHJBZkNpbjQ0VjJrd09nQkx4Q3d0eTdIYVZQbU44VGE2d0g2bFd6czRWcG1pSmhhSkxSYnJ4bnRENGdqWHBKVkJHa0h3LW4yU0VCQWYxZ21kdTlqc3g5bktJN3pycFhGNDZoU2R0eU1sSVVCc19LdFIyd3MwYVEwb2tJdXBpc1Y3VXRWVUd6SnozMWhrNVFZLTk3R1FzUDlTMWgwNDFaWHctM3NjOWJiY1g5cTE5SUlIZmUzcnFQaXdJdUozMjZGYmdKTTN3UEQ2ZXZITWM4MU5tYzAxaFRrOTdWWW9iTnJBREJSYXFVZnpCRjVFc2lyam1Bd3g0cVpidV9ISU5qOEdqQWcyaTN4Z08zWUdhX1EzNTc0UXZBWVUzQTdCRWxLWW1tYzJxeXU2NkdOSVhwVVpGV0x6NTNIcERtQWdXbHNhQW1LcVE4eDJGd0E2U2FjTjVuekRiaWJ4aGpEdlNCb1Z3PT0=
# Google Cloud Speech Services configuration
-Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBNR2pBeUR6NmFTcFZkQmZnSmlweEpfQ1RSNnFobHZ2N0Z5Q1luRVJWSmx1WFJTMlloX2hROVQ2TjlvRTJQWjVuT1F4WjJmUVdCbTEyYTQxY2tjSXA2S2Q2NTN6RnRiQmxTTWdBeC1GdnNZUV9KWU1IQVFaQnc2VmJFSHVPVDFhaF9VYUJjWS1MRTlNSEkzUl9lTDZjX016MHZKSVE0bUNjZ3Q3Ni0zOXNfMmQxYUhTcnRzSDdQSUlyUExjUHpYQTF2cG9CS2dZajJLUzZUS09JeGRDRkdPd3Y4VFhWaS1DY3FOc0hQWXhNNVc1LTI4RjdKYjktWXc1X3hVUmg5VnhQX3BuQzJzZ0ZnbUVLNHZHWTRqNjI3VDlrNDlMLWNFVl9sVFd5cm0xbjlXa3VMOEthdXdWTkFPR2ZjQm5ReGNqbFN4NTg5NTVqcmlETGoteWhqLVEwSTNBQTN0Y2ptc0JBd1BabjUwaml0NVllOF90ejhObkR0VHdIU28yRmY1QWk1VktURW1DNWZPR2FnQ2dqS3lWSU8yT3ZDN3J5Y2FzRmJOOFNTWGZhUXNzbmFKOXdxakRfekFyZEFSQlF1QnJIMF9idktTQlp5MWI3eklrOHJPUmVxOFRyWmJNSGVNXzhDSTkwd29tSTJhTVM0T3lMYWhQZzEyd0RYX21NVVFTMm5JMHVpOFFfSHdkc3RZX1A2czVtRGwzU2RZUTVYaEp1TGpfejNwLXdZY0pZQmotOXVGbGRxcnFNdU9XUEZKODJvWnA5TE1mYktjUGUtRkJJbHZuTF81em0wVTdVWi1QRGk1dG45cWR6QkZmRHd0WWRqZE9xR0FCRXktenhLYmNpY2pfYTU0bUtPYk5KWFVRc1E2M0dsSWVFWWJucXc4QnBBanRjZk5GbzVKXzVMX0ZzTFZNYUZXTm8zNjMtb1dEU0VkTmtaR2xPZmZXam5qTVIzMXJpeG5raXVJOHdBZXhXeTA1SjBJU0xHZjZubDk1Z2k3ZFltdzRQbzZZNlJfcHVHWjRzWXJIVlAyRVU1eVVYVzd0R0JoaU5DWEF6ZkdDeGZXdDFiMXpiTkEtTUtuMEt6ZFJyMG0yN2NFbDFna2V6dEl5cmJkS2liOHV5OUhBVGJRX29vVmk5VnptUEhoT01oOTlHeHZaUTVCbTQ3Z1ZkY3J0NnpPcl9JLXVYOXBCZVh6blNSVFJFVUJBT0lOckNyQjI4SlpBV283TUtBYWpZMWVpV1p2czQxZHlBYnJ6d2JUQjZ1WXRGdEdkYy1keHFjRG51N0NSN1c1ZEttTXRfSGZEbzhsV0p3aW1rbHJpS0pEaV9IYTRiVi1WLWF5TUJGMnhVZ3c4dUxiY2ljTXE0S0JJUWM2M21NemwtdlhxcTJta1ROYUpUTmVaLThyQWRxS2NpM0QtaVJyQVU3WWQ5V1kwdUp6dzlpbUxzekxwVGgtQl83MTdWeFoweUZLcFZwNnQ4cVdfMkw2MHBZUU1CSXJNMjk3YThpczRqWTBuZjlRSUJfMXQtdk1uZUVTMzVvNFNYekFmRFVJSG1ib3RzMHZiQXVqa0VWNzNid3RfTzNiTDJrU1VBY0xoWDV3Y21OMWpzUW5IZ25RRWZRT3ZwLVVESG8wUGNBcXFfWGZKUVZPLWVnNlRjaFRibmlrLTgwUmhRRWVNNFRUWjBjbjNHSFUydGY3NWNQSTc3NWlXY2s5U0lOQ2hUeWZiVXdVOV8wRjdFTXNzN25nSE9JMTJqNmxEMTN2U0N1TnRDWWJpMm9WM3FRbWY3bWZGY2huUURlQmdIdkRBNGhLWEl4M1hqNkotY1FBWm9xX2FJZVRBekZoaWx0R2k4eEF2T042dkV3cmVhMG42Q3NTM3dILTI3NEVadUpiUjU2cmxVeExMTlFpVzBfWmxmZVRXSU4xWmVhdTZqaVpmeWhwUjB1VE15SVFtMFhqUTVLOHd4dEVkQ2hiSk5nczJ5aUV1Vk85OW05YWJMYkdMSXRCZV9WLWxudEtUX0FKR0hDNjdMcEREMUlWMHJaV3RUcUpMLXlEU0Q2ZUhETVJycENlTjV2VktibXhTelJDcUFYWWRwV1VHbldtOFdHRlZJcDc2dzBXZGlJTnlSeU9mUzZHRzJNd25WS1FfdUpCRUd0NDdGaFoxVU41Qi1pM1ZQdHZ1THJBdkRMeVYwNGpkRWl2WWtrbnpoRWVFMzc2ME9WVUNNYXEtRnJKWnprekhxcHJVRVRTdHNlSE1ZdWFtLVRjWDJvVVFBTS01NXBjbGlNNC1Dcm1NaUdiUk1uUDFDdlJ0UFRlSkR0eDJwOWladzFxOFpoY1lUc3o1MmUzWm1MYUs3MjYzdk9KbWtrOWxDVWhlOTY5TlVEM210YnlmZXhnUzR6bTVHbi1IS3ZyX0lSc0FtOVBJV1BBeldJNDk1UUxaSW90UG5IY1hJZVhhWHQ3ZzQyd29YOU9DNEthWXdnRkx0aDB1LTBUN0VOUlBGZ2ZPLWY1UXdTQVRCRmhJQ090a2xUbzN6YXhuTmNqaWJpajR2Mm1Rd2lxelc3UjRSRm11dFBlVlRsTlRoRUtPYkJVbzh2Q2Y2MUhqZnNsb3E5cUhLV0hFRTlQVzcwVm5DZ283ekJPNDRxSi1neTZYR3E3UEdXT2kwLWVaZGxFUnlaazRHa01TSEtyRy15S3QydHBHcnlMZWx5Z2xqY1ZkTEVIMERIajl1dW5JZzY2NWVjV0JSb1NzSG5OeXNheFR6QmoybjlBeThHVkJtNHduaXlJYkVySlNYTDMzNWpneHlXeklvYWRvQzAzV1lDa2lzaU5CUEFNNVdLMEpWZ1BVbUhhNm1NX0k0ZVM1dFNFOXFaSkVuTXhFUHYtdF82bGxfcmFMMG9kTUtMOG9adWF3VFlYVnBTSjREbWRMX0pLR2ZHRHpIdGMzYW5OVUtzVGp3OXc5WDJfd0l2T0lVN2xvMjBzSHZSaWY1cDhTUmhVdDR0dWgtMGp4V2V2bDRQSC05cFdyNHBYaHF2dldwTVVNWExYZEZFQTlmaHFMald2LTdJX3lfRi1WbXhfXzRtUzNXVXBHUTBocFRST1h0akVZc1BleDc0aGxidS0zTjl1cTA1aEdWRXUzaXlNQUR4N0ctdkhISE9DSGl2amdyTFZ5bTFnakFrVndhQ0ZTYi1GcHM2TG5aU0xXUGhjMVRBaTRNYzBlcXdtYkdsQ0xwTmxmRzJpV3NnUkdMMDA5czFaUnR0R01TSmZqeHB3MGM0WkxpNzE2SXpSaFhENzh0OWU2M01JZ1pCWHhZdGN0aUJmS3lSWHNMWXhZNnVZSHlrYkFkNHlaVm1FcEwyTzRYN3dhdGItVEh6TS1NU2R6YkNhNjEwSEpwdzF1WUdtWldJQ2ZkUEhqM3VhTFpvWjhxQ2dpNlhpY0NTcTEycm9GQk9NM3Bla1F2REJRLV9ZYW1Dc19JZ1NFOW5yLUxCX0tvakE4ODhhOEh0SXA5anJjRGJ6d3dObzVfU3FYbW1kLWVCSThIWnl5TEh1OFlVTml5QTRDVlJPaU9mZTB0NjhSZjQtT0lpa1piWmwybUFQOURlOHBtQW9KdE5SZTJzR1A3MkJVS0xuZTRKSmYtcW5OT2M4Y3BWaUVYQUJTblpsR0xYckx1eklwNzh4ckpBNUxaMWlKd1dINjQ0UWUzejNPcnpMWHZHZTg4NlhyTnlhdXZSQ093QWktTnNLcU40aTBGUFFNUWFKcU1EZWRQalVxbEtIcENTWXl4NXRRLUkxYXhGeDkzY3pfcVgyUlFWa0dVVXNHU3dvOXRDZC1TYnZVS1d4SjdKT3NKTjRMbFZqUlJXSkRQQnNUM2VkbWRhaG1qMFBVQ1VxX0FNYnJmbFI1RjgxSjE4c25VNm1EOXpaanhQLXVJcXEyN2VaTExZMURzYThwdzh5NEVUY3Rab0plT0tWRU5rQmJvOTlWRlp3elRQSWd0LW5laGpQaFFtUzdZVzExSjQ3ZVowck82TjZjektjZzZ4NElGa2pIYjJHa29FR1BsSG5HZGlmd0xJU0RSUW96emZVSUNzSzNCTlNUcGNLQzVSaTRacWtOUWxwOTBLdkt5Q19PWDRONGp4bVNXZHd5ZmtHSVFfX0lCNFhrY2NVU0N1MllJOGtmcVFXQXpJRkdiX2dSVWRuakdldHZ5bEo2U21jQmVtcHpTaHlVZ0k4ZF9xemxMOU92bHllM2VVd1FPUDVRNC1DTFZ1U25yQnlrNm4yUi1qNTRtOU9OaVVYVzZ6Z28wT3lubDk1SF95Zm9rSWZsMVg5NnNDQTl1YzhRPT0=
+Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:gAAAAABo1F1fdPSqCVU7KJEMZMLJE_0SxyoglHMeWt8fUgatnu7mqtxrfQ55gmYUcS0ayJDxtBv16JOiIGpEf08PYAlg40_DPGJrpQotQot3m0oFwQcUuQVNfDsnhj_0TEa_GOuh9ABk78NwLpbuwDZ9ixiFLCcUm6kjYJfXZ6KQ6D22nbNx03x-Bfi55lJKcuuncoINfegpUaGlEvc1ghMVA3dODVmw97iPnmSeQyElrnJ-aqABOCIbGL2TTM88KGQEYooP6IQ0U13vk3nSMKHsiV9SqC4dd1Eq4SOnuIPHEJmKVi5Xyr1zYei3__ysf4dthDs-lZAgLCJ9huZjBUh9hPTQceewXHhCPf8IUHEaMjXvGBL6GLGMXz2BED8hAH4_mU6V0Z0A6NA5RGqIZEs5lx9AuJW4peIzZ8WbZHeLtCpeaNDg32jeKg77xfKje1a4nGcfQzR5yQwCSQgAQ_noYOeoUB04nik5tQfGFUL58qt0wplZj2lysEtBpf9hXWWkErvwFQ_fdfDNiovOiRTzaOPF6cYdoAz8uNmNIJ0gxtWGhVHMZVIubhZ-B8v1rZgRzxGp553k0-MR2OxyBxBr-9hPgf_-4jELziNguhUojfAAEz5n-mc1TiWTWiB6j78TceuoKE1vwFL8A5vlni_I8evAxhfKmpb0g0m7PjxPs_3jotzCalL_6cIuewdmFaiYoaNU1KKoTXfNdcqunWC1Og-1QRwC6NtwSFcp-uG546qJYysGfz00o3-1ROBVM0WR658s00NUash5zXM60ZTHiVUrkDmliHcGsAGaWd775y8t6e2uMcI6awqN9DHSnvZaJRuOoikfV_FfuRhmzpCo4Zlxt9pb-69zB0kTKq1A8Mg3HuLeDNOmUNLgxGFsaAPKpXX0H5uh4W7Mag4DTK4SLUboMhoRQSa7oJPAlT2Tkjbt72hBLhYVTvUmZsQ8cCuiCgL-N3nfjoSev54bp4RkbdrNQeUu214SydAm4OVHvHZaxfdlx9t5tYWdoAUVl00wx_cFdUHHyjdG3m8kolhMOlZ5nosQi7lq2-wFV2W9vx2LmQxg9FNvHO5Kce46I-ZahwJxiToBa2Hni5FENc3j796Qh9MAGxIHLLarD_VhVa0O2mJMXiAAbjveFqM6426Qlra55S7n1eaByQ4xd8ZHfctJuDQahbSoE9fQ5C0Fv3zjo9OrVkliv0HDeI45EMrwgNnahJzDZpG-HqyJbVt0riwJT2Mgwf3YEI9YAy09uyxii1SnHS1uXI-XybOM7IbJpKf2ceyYG7nreBiJwXAR8lgwvpoG_yZlTIXdYVC44COJT3HbnOD0d-0GuzogipiNOWaH98erOisCbqQRfgdC9meKG8Q0nhCFfYTxWduBrpeTUaYARUA2E95qthLqwBJh4woY07S79icno4uO_ROKtVqrKckmgCt5BJzicwwX0BYs_HGNyzeUTyUtdyxTRi7wsoQjoo5sexdSRAYLIQd_kKlvZRjKTYX-77ZZ8mNwYJXGCytztR6GhHezgc1UZrEpRRyffTCVsJz-fRkODFha98PSkd_BF9Y1Z2ljtDTvf3iSaYssvXql4liYI4Ot79wDtcj89ULshq1kCI-5EqFenmFfI5vonrWQ6tu2f45eCz27l9tJ6Sn1ILKYTfyVpQipXZDZ-Wziaz8fbX4L_a4nXZ89CQFJygVciUwQaf-ZD6CHqjLMGgSZB5U9_u7eoG13rdyvjRe1gPGwaQzkGqNY5b19OobfDOL3tB7aM2uJp9MfYR9EgJgL5ofC4uN2wOU-7MT9WJqkB2g4yEO04WZvll6wdyKx9FjxbyMp7USUNUdokxKMuXP7cRWPyaG7BbGPS6s1gEk4hu9ZQeHCj_a6AJnVfJtVn2rFJ6wjnw2NWY5d3mFa_9pNUBPYixuht8_MLhBjC1s6LCvjZM-5QY2oZ2MOeKQZISYwW4cCLTKonN4a_FHkgrPopso4iswkr2ZiWul7eBtMKnZ4k9U17G99a04cUu7YM2jWxlfbQUFwqH7n1L9n0ZrsKaX8yQczCVMy-_gswsaH8Qgdhc2D6V1B26_6OQ623b5_h9cosogNxnzZdeBp-7Bp2vOaXq-EOTPQOvVBuJR2EC_Z76KHomQr-FJkYlCdjBEgFLKkMyRrF5sn700FzRFBClHWgVr9Vj3ayNzIAfCpvm-2lpMWJg849I9zdOVB8r-s2BlFPGMvRRAdmQ-Ip3t2eSTFvGUgYUVpEA2-xLHO4apsCykNCBGeh50J7OPrLdezvCFCOYp-LCVzLFlGzbXy1aljmtQDcVozagsI6jygiTnagvYT-Tz12MMq81oFd_Hz6WrRgEyG6WpvCD0uMZDEmXtrbnFcBcUfeSs4TrvrTvz9Bu6nlgVBB8xjDIpmxJbC4c__rSe-UjM3rMyXiChy69bquMWvtsverhGS5r_kwhBRdx5pGrVuDA0iHkSJAPkuTQ19clFtkME1r1yNT2VJ1xdoYcIweZpEfCswhoVRD88gHBbFWwoLMhVHiVKDtQuxngQgvCb-owbG7mT-MVqq3Mbkjn9CzihVQMh77LW6lwqLRkfEP5ytrEPf01qtbaTT32OljfKuhIaWWTqLfhetgRFZIUD_mG0IdluFzdu9DoKpx5ZoSn5oPOM6NRTv21ZsNygGgxUXpLdUHK-K-gC-CFdWCGwGTr6-ULhNBp8jDLGrtySzzi6ESOMW7Ho2F8jt9BRZ7AZm0Da1i_XnnOD6R1_oUNtauuqPT5w2MB-c2Stc67zTUuwJrQlEVM-o1ll3NX4X7_StW-zD9Lkjvcdt4b0afN1so4ZVG_hweCBcZaei61NbinNIOJfoF-_DWl_Ntb_PKxgmga8QtoqB23Efcyyh0jG2EeS7WtEFFWd7zLXUydyWVAxwnLLXBCi6YreHq_yluUV4StaX_C-36kkfGSh2H3VLhoz3KUFVDXVoSttZbBaiufZy9NoHCZn9-fT9BrB6wzMB4SI5Ptpo7akaUpyPzvq78dz70M7wS3urzA_iinUKIdrCjXlDrSBGuOu1KH9n4S2MZp0LaB2Qe5-IyZRfcuVEhdBKwVFqP0cyJToOJ7bY8qW2y0cWKnGSzy_ISR4y12H4X3rnqlWa7EJn_vRi1qvhDkebbfmQ8rDBDqSQqJLE73_ynuWLFDn58MipDk3hBmR6icA2fBhwRS2HP3o9st3ihJyEL5MbfvlMcqtLjby72eO78mvHYR0fHJIipH-XaVYODFgMLnfSwtAx3Dvhrt5VKoPvDSgTRIihcExuENSupVFWZ3nRbJxIPmB5YE071jhqVoqd9HQhuParX-RdnMVX_bvS6-Ay9XRPbL157RurM2R8pSqqhKIArQDRcsonOlLjZ5rve7U-treQfy4zGhMzlOSv_-fQWJVGUsoFkmeIQLqg9fICosMbx8aVhanozSNpsg3ZClrjwwi7KngyB9sMJe8mcZ6u2MdDSQ1OmGc-hWYh8lfte8p0kqKq83RF7oW3JTZRX8k104zA93c17TMFEYDcTikKtcIaGtg8MjsObdiRm96yv9Xm9C2lC0UtQcwl6ZioVn7UHfwRhXgLWN8a-86Km8vzRosIRyuU4t9oq8QYgwl2aQH1dQdU7ifnq1Ewi1sr6y_Qz5TxLgHFNzTc-7_K4Mh92EfD08DUP4GfjOfD-mB34BFvHc4RMN0UMkuj9Hi8seBwN6lnUFH8rKUAJ2t7yqdebGfPM-7eZGi0VhgEEGAAPvuFxkcPXecF6_cwhaBLPxeRGkunrXGS-jW3VYca0Cd8TRFlv3jZf8mcufuJVzzCIm4bgHHAnL8nF_FczFziIrHpF8FWlNvgLHnjvFE5eHc8MFrsnu3MHfbwbpG_IVCfmKRW9WXGrYBqontB8SaV_BHMuy2ypnxlWT7BzHyzRq7nQ7xRfs30m-85Znm7he-f05vptyIBYo4r7WD3okVdj6UTXb15EaNDrAanM7rhCc7Qg2k1X_WMqb30nfQfMf-TGv9FPuCRS0fy4ppUDMd1MWXmT4mS0brG1U6Kztt_n0hxfXXn8aoUHVfCkjo-CissSrTGagepZR_-UvVn89vprB1vs1DjKmDgZ8fwbjpx0rK5ET3e32sKA4YC7wNhW2s1tbhjqp675hHn0RtSPqzJX9NMf8Wp_on3juXirSvSvNGEdVSasd8_-the89PdFga285C88boe-1wYUlG8JmAbaT8ruGaM9hReodGh01krgETNrE88NMD7U0rtLAsU2-5g2v-dnfalvTRfkGnPyVtIjQrMZ-cGjMJqOI_S7lzFfsx7dbehZbDF4huRsmYCcnhfUfExVCJogGt9mKaEhK13EJMrV6jrEqf3k2gh515g4aYtAwLELZpJXGWdXLavk5Bt-yi9Jpi1O_SI2NQ7-wnIE0oFJei1_jTt_YeNQGej02oS1IO5hiKipAUG58xj4XvnJ77GhxGZ4RcpnDzfLlhrclSJEDxdmNZsaAHApOp0XzEaml6HN3iUsC9kjw134r7I9ZkRDWSsdl1uGC8g8-P5OUjfMwFhUm6TET0Bi6yxwMgeQ_QRZ8EBS0hMn8eFMqz1tZHjgO-nJLrpdN7joC66ZWUCama3hqTtElvyqsoWeYnbBjzm00-40GCxUs0xVZwVcW41KN9M3QNDrjopSuTMYXe33EqUq9Hozg_CuZh7Or5XketITk9zk55eKFoogog7-kl54hrGOsSmBM8JChAnH1pEAI8Y-WAUhfdQaEl_6xLjQnUTeuC5M08qEVz5KMgpSunj3WQF1X6AW0tqTNmIhjUwfb0CUOHVNaID_IY-OWSClnwUxf9r5qLb3qGlqM_uxxmr_-2kIJ4GIe8Mq-GMc2slJLK4uC36zbPmajnxUxeGBcfmAVoy_YuC4HvmlkLlRlS52ODoQ6VQ_2DTx3JOgWrJrSuCL993KXIeXaKY3fm8zC429_SMutJCGt9uOihRWka9W3qh3NOZUTcAlPDdCLY3rFPosYMu02qh-Mv0DfevNfngJHnsBojNztKjpEL75Xr7Xl_FQL1pzh5pkK548Nlip8BUdYuTZwaFMvEUcwPkeAbNtoo5evWzgVb1XK_ycUW-TEraqFvo9Ke7tSG88S9PweMbIDIJw5J4XeSRXPpZx4nbd7SIb8yAlhiP1ScJ6TJGf1GNqgU_pNYpFCHasxS-xQvPMsnHun5KcqLb8dgwe16mu1hu2jkYSgH7AYuoyHTdYwt852BOk1_9f6i0vZrtP4hmeZUykEZNfpu1VVW7XTTTu-MlYtnu4pluB06QMwNLAeZ3ZDijzW9PsrzO2_MkPtw7UruM4upWpWIjDghwdOdkcdLBO_kGfUJsfMsvF1sVEuMorwvZpxnLHCvSCoYocTDQdGI88kHgwM-czlpcI8isOE6uqO9iKvTFQFXIUgn2rMC1PtKMBbA1KRC-548ofmTHXRr6rv8gm_XDU0S6UZ4wxubSu9IbvgIptPL9U26GdQFV30hybTDsz4eWwdkXBzA6xbqq1p_YBSLYONm9L0BYJJbFkEnDoUQbCvubHHiu0f0Gi38e3aooQ2LkWTbpyE1ZipdrW-0FZmEctIDq7JNZol_TOWXP3_12YyY-_E7ZMtobabuowZrBE_DYPtM-kcu3v2Jr1Zc59WM9bjcwJHca3CcvbQl6I39d3UZMrDH8lm_aMY837jtFv0cfnJZ6wsLPoEJbHgE1hGmdVeuEeoNWYKi_BaMUI-4GiCOosEkXbh8lBfTYJAt1Ob3G_BLNCg0xMmHmlaGl40PxntNypfBZ8oq-GzlQa9X77OGMnsCgDAv4R262ESVbxi14Yc21lsC0J_VQH50l7d9o67-Na_5Vixcft6PbX3ax5Jt7n6njZYqrdD8CtowWJkNpt4dMOhsxMyNYk5D0aEOpa8ZA_6koRWHirDIgX6ByFtIS7bxWqIhTT2pAmqAWjPqf_SPj2N4sy--rBqkZKwwmbY3gOZGBzFqfhfP98PxFWn38OuRzAgq8I2ftsrhFTNpTRNmK5bzNFX-EIwZBLggJcuvicWiIU-6uDQNIwpFEVhqLDSmesmzvbznGwXpSjnkVJZ_ZwWjNj98ABdTdoZJQTBWGaenInuec7rTbIU1wvO275UuFI-hlf2SgqkXFoUr-SgMnO2Z797m3ziulLwJpxOSSAYfykAmPAghBHZcLYX-t10vs4vWgRiZnQ6Ao9fUd67b9RGRiAjDfIBLCCH9GjLAePLiyDCgMzI5HIOpsVO89ZhaKv9wieuVk4RKkFHmPfA8JteWzEv-b6SU3ZipWliQ19u4FkPduQiXklX8Njri7HXV-bs-aud4r_MMyg6kGDuvnm6KIRvk7-q0ioD_ZsLytN5lz0TD0a_Ef-liagWu2sMCbnVUdxiO7Y3wKOrvwRCFlkJ_vufneohDV4H2PMxGt564xdZq8lQNNUwk6H_lOenZzf5eTxIkiGDDhvUfqIq6cMqNJ0AAm6HKxsuh21_ApMFzz1Xr8jz8_vCM-jqfKNQEZe-M6SsU45a_nJTL22mG8yk9hPM-wVZmbV-a6roIlK-wwmLfsjjC-7hV_ArOgI7CN-oi-aTcHqWAGZ-L7o4DSNA5q_gb4NnNZa0PELH5cmemLSYkY1oER_8oe52jxzbsmnIgt-fwZ4366BicPRrpzTyCkPzgUss2YxwcrVDIo-rHJtDdn8wabFhPB4_61nFMgmezBl-FO9Kdo1ILd32E_fAzjDTjKvbiLIX_wxDI8WFzFZbabXtmhOnoL4ebBYXBGbk5E-RNoT2-8JVq-z3nG-tt1qNGnTovmOef5H_PODbO1COp7fOfMZQUK0gw5IoD1PWu6BIf4-UavhHMZX2JITo10GvmgG8K5m2TxXqSXkiVTJGMDxtHkAEuSpd2CFuQeM_5tjdwkrE19yP06a03oH8xPjjc7TXG3et280kXe4KPyXil4-i3BDB06bWFiNK6C8EhmtLgOrQAcrmQReirzo6yIAPZheDIwu5raGKLrnU0i5Y1ErWfF_0ANFnvxicIA4P1RhsKPC2HvQpLB4HttnNTu0gt5L9vcwJ33lh_zUsfsOdjqMc6avOmM3CUF2UIGg2OTjsYCWNwwVGZ9zYm28JUYHiQGB9DEraW_CKVRCHyfe_yjLqajBhLX737VgxcbYEdIVbcy5S5Qy8Raf1JLriFIYukt8qsh455ar0MXgnr5TlHo8BtKUeZOw1BxdgvHE49QSudqd4s22uRLPUmv7IbmxK6eFUpcT1YyCS6IeoHe8lBnrXnsC3RsWmeSdHxHAP2TYy3khnPB-f2U1jqmQD2EwJgTwmch9cFgoYPt3LrSn4KaPDJuO0DwReMJJaSfomQE7zTMclefZByJW-rgLBJAMynvtkDg8WvljuNwk0rNjDwEeTQc7uzv0MbHb3bAX5tAhF8ZMSD18aniOz_C91TSGkv6vtVi4RRd1XzpFmOpkRHQw5ypN70HNMCWZU78jiTWVZDTx2fGxGx9iTK9Hp705SitAc9-IeTo7_N3Ofl85OQeZp-VPliFqZMoQ5D13B54XcD1XnmYHldOk_GB7GEpgFkmqJYsC9gkadn-_pypAgf-_szO34dti61gyb_67B9wliJ8x04VartfKNa8p1citQUt_KeDtUacgTuKABJva-LCukKEHOEh8zicFdURhPuTB14xsLJQ7Mwk59hVr-uAFHhbbrejUT3TFpu7Pk-lrHgWEoI8JrfZJ8YCoAMA8SQdfEoYAFnKjDhoBlqExSrwOVQbgfvYHprNAU-rIqiiMCKmc1yuVq_i2qa-UlWgIKqe05zYbLovKPpU8uzc46ugQaG5KsyiYZ90pI4YMcpwccLCY2oysp-uGFuIQ6LTzizY41wgM5y9dBkEETRDwNl9OKuArKYXV-tzO-NW2BwAmyCGNALkwuGbx09BLxZnOBT74ADO-or4SsmV9Vdw2zrpIXmOfCxX02637R6RjsvPQZkeeMJfI8_L3gZeCf0_qlM1KuNLfdUjfWN7MT2oqBh1dj5DDS0XcFhtHyXrmq-VL4j9MAk8saiwWssOnlmstS59zCXai60O2_4QUFcSmnyFkhMlTalIkpd1YBepbMKuNluP_3YZOpIDNt9cTYKRY8RJaUT0pm_p-C3wRTx6YmpzEpzt_i1cpnnvDYns3H1sY4YnG5FL9gUzlRGgOZ_t0TIA5qQr7EQLBU0K-r3dmd_oIlZhEsuVR4hKA4Tj-OXuMgmKKYqdXXDS3331mOzEaYEEnuSjvOuMU_nzKdtJ2Wfj6pLO9ZDV8dR6V1aXGFn6UDXyPGbxgvm7AKmV68jxfj5GTCLmKpi8tFLBklgwvT3VXLOyQJbdi3_3FrexIdEjuHG0vPhDAUpcL3yVORNbPEzrv_Hc4oGJp5e8QyHVGdoLDX4e-JSrmXZe0H0SQr51nWotqpu700pQcyQr_-hs0J894boqsuHNgbCqGlW5NEUmMDDyIe2XqXhf0vQ3BW8AnS1-A25liexO8JoextvjtO8zZ2Xf85SCdmmX4J9NU1xlqMYa6rjQRj8aZkK5GT-QEy6S67L6dRAd0jRIfqds3RmxgjG3myET4TAqcZiMO2p8jFaauobQ9kWQa15kdBN95iCEOkwRsnHTrElkysU5viWhvZ16RaNz2yK8GNcyxoF-mgtOalRD17YjXTR7jxlfgO_Z8fmvQjhJgN3wHw0dXNa-xuPvrKd4xiIExvXZJ6F1zu_r7QJsQCAXGpJM5pw6cKxOue6oC8Afa6Y4r6VKO39EjrWzGF9kSjjZVWuhKzGq3RkSgrrEv4fe4tGBklXxjsdOst4p0XaB1oTEbEqMnJryA2TSRCzrIOi4F2SWNikAEh6o0V1XBjk5wUirAggHl05zgavCf7k-t3x860By2AGPk0q-haRqM9zmQED-TzToODLXygZe22X_wGmGmzBsjiJpYshF4-u8BMnVhWEnb0Q0CTxgs4BM08Q_NSNjGAEbWmD-EQJTtlXfjF31gIzbffz7BV_3gx2t3I661WNYnuT2-bUcgqVcPFwDv9Eb71Nz6Fgfbuz6Aj_t1-INEjDcaX24yDOcPECVcoEMisMBbzMY4wb7jKKcrwcs3WBPyRL3iR8e-ALz7UBofB8Z_IkGBsXGoeFG2gDoecVvKQdZtgrgvV5zGKn4v4_du5O2bH106FCBFNF02Q1-xbh01CZ7ZueduToNWk6Ii5EE9JvbTzwo7OJC9WTeMomtu9kqSsKuF5E5RYIR5utBK6n0yus6ZvBCUpAfXo3cM8svA7ulUvHXoMX_j_ypHt_Z1dMjPeZCzGshAaUNKcGePfTQXA2hn6YszV2rYg94YwlF2Dx2K5Hui0A56lEcjd_LOY-KvqEppP_xdn8YPhFtQDZw6PyGeJifgTEl2gjQkfqvWy62dt0gizI-GJ2Ovwkh76iPdnV2gTRW48DLqEe0-z-55WSH1FK211fWQSPWc0OnWsKDwUTzE1wUfdDx8FeW50K9T9QOaDAFTa0aBkftvACIuYdvfKM4tv2ff0ymThGYhYNG9eHNIoy18bfaV18ZgnfacNEXtSU6BWxXqlay0Z6lJiqMZi2VutUMVR8f5asoFg8cLpTaf9mrJ4tnpVBmZLVNSIr4mVLrVudm6yicFw3B1iun3XkcF4JrkNRoWyrz6v_lu87wkpDl07PEUNa7BXizLnsQhMeR-2B83r0QZC16J2EaG9yPmeOmM1YKJca1P1P0NcMwYCONuz7x45WoO6IEAjxN4W87tFutQlxeM4ut7BVoBM19iRgXI2HlCjbOeuQ3zCZR9khbGNyOkPn-Hi7qLMRbolDjbEYDdYX4eM1LlJB-LQZiUGsa2kXNiJ2y6WwDmDsbZQ6fmIGfPPnl7tto3qszYxHwoRMxzJFMJqRg3zjpdQRsTA4hSCYkZLC4ow7q1fAv-DSgXlEIiCNzQ-aaudjUkmpvPfHUe8zhX9i74z38LNZ6uw00yfxy7mDbTPC-Rr1ZPpK2R2UUP4SjF_H87rQPLRgVsVwKgP5tJ8jA6lBbuwKdPlrUFOYicwhQpPQQ0QLAzlS2ve8t2zyYpwK5Xr4AJlFP3rXs9cZstsLfi-0SOemNETfcuwzmPMcQZyg0Pu1dwER1LtEdu8Uz0oKvpVWVr_t-eMvpOzcSRRo6ALA-f48cQMvfkWuSJHyZoTNdk_3sok1cbWoLTzK3_JVVD1ML_rl1JLmVekvWclI5X4a3bJPIMmEYBJ_M9dDEzPj8p9LeRUb9mn2JNF7OAtfSDInuoal8-JCPOe6c9Hs2B3ulTqMO9d3cLDk-h9LR3LwSlSCTCRcXY1DR0Cwqmf4V1Q9QCGJgzdh9D0Hioh72WdHlhkhn11JqCMRFQR0guIXvTaIx0aXB2ytNWo-2zFQEqNVM2NsB2KKIMHpjuRkO13pWT3m4Thy2liJFHMwKgGkoCcmTxVYT051fpWZJCnaiZqNwPheNfCY2nDTAOv5r8Fnd1nB2KXNcD9CycBv2tQ5Wn5LQeXbyM_hNauEBjM2i9qyYqGDSSMIjjkXeeJhd1u3k0F_CIwWBaPCV384NgW1sCQ==
+
+# Feature SyncDelta JIRA configuration
+Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = PROD_ENC:Z0FBQUFBQm8xRjFmUHNhOGsxT0lOaHhLTUxNUElMRlZtNWl0TlhKRWxQNWxTT0ljX3N4VEVUNnZGUzFfMy1qZVFlZzl6Z3FIT2NXdXdZYXpyUnNGUXFUMF9oZVIzdXpqRVlBWlp6TUhkMURnb2E2R1R4dGtRZ0J3VHFaM2w5STBEaW5TN0JtRS01UXdUd0xZbjd2LXc2U0Zpc3duem1SMmJCUjg0cGxPaHA0YXhITFJrZmIyUjYxWm9PZ2ZVUGNHYXl4ZkVkZ21KakdJNzhMNkRMWGFzeUNWcGhTaUM2UWdjTHZVemJ5WnJ0M3hPbndrLURYYV9SSVJwM190NEd0M3V3SzNyaW1vLWZJdHc5QlZwLXNUV2hJdHNKbTZiREY1YTVUclVpYTVkSFAxYVR1ek9JZ3VyZzVFRzQyYUpiZ3psdFlZbHdpblZVNF85Y1lwWEdTT1hOajNBUGpMMi1zOWdhNElBa0cwMFFGeTZpV25rSGJRRjZWenZPRXdWdGczSGNfM2lPUFdsYUcxWHRrTlFzRGViY1hWWU1LMG1EWUl0cnJvVHRCZnNNZGZ0MGFQUDF6TGpqTlRKcjV2YjF2RDBEVktaWmF1SkE4ZVJWeDJ3TWlVSmpCeEtXdlNsOXY5RDY3d2JPWUtINHRZVmJoaEUwYnpyZFExYWlKV3JGeDZSd20yTlE4LU14YzlYNUlIWG9NbXhqbWJUQ0hJTzhRbTlnenRxQ2V0dEhDcWpEN2lETjR1TkhRMFdpYjBuSEN3MzlVZ1dQbld1YXlhSDdfV3FCdXVkRHZMZDVUZkpjdHdoazl2OFNQc0VHM2dWdEZLaVh4Y2FsWFRwci1GQ3pxTkhoNk9uR0dDVTdndG9UMmR5SlFKOUxqXzR1YVpKNjBfd2FCY0xsMDk1bHpFd05LV3NjT3FFOFZPUEdnbElUdDZpZ3JhNFMtalVWT09PbWxvMWlMUTVHdU5JZnZTbjNLek1nPT0=
diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py
index ce1cfe9b..c17fa2c3 100644
--- a/modules/connectors/connectorDbPostgre.py
+++ b/modules/connectors/connectorDbPostgre.py
@@ -6,19 +6,34 @@ import logging
from typing import List, Dict, Any, Optional, Union, get_origin, get_args
from datetime import datetime
import uuid
-from pydantic import BaseModel
+from pydantic import BaseModel, Field
import threading
import time
-from modules.shared.attributeUtils import to_dict
+from modules.shared.attributeUtils import to_dict, ModelMixin
from modules.shared.timezoneUtils import get_utc_timestamp
from modules.shared.configuration import APP_CONFIG
-from modules.interfaces.interfaceAppModel import SystemTable
logger = logging.getLogger(__name__)
# No mapping needed - table name = Pydantic model name exactly
+class SystemTable(BaseModel, ModelMixin):
+ """Data model for system table entries"""
+ table_name: str = Field(
+ description="Name of the table",
+ frontend_type="text",
+ frontend_readonly=True,
+ frontend_required=True
+ )
+ initial_id: Optional[str] = Field(
+ default=None,
+ description="Initial ID for the table",
+ frontend_type="text",
+ frontend_readonly=True,
+ frontend_required=False
+ )
+
def _get_model_fields(model_class) -> Dict[str, str]:
"""Get all fields from Pydantic model and map to SQL types."""
if not hasattr(model_class, '__fields__'):
diff --git a/modules/connectors/connectorTicketsClickup.py b/modules/connectors/connectorTicketsClickup.py
new file mode 100644
index 00000000..7e07a830
--- /dev/null
+++ b/modules/connectors/connectorTicketsClickup.py
@@ -0,0 +1,145 @@
+"""ClickUp connector for CRUD operations (compatible with TicketInterface)."""
+
+from dataclasses import dataclass
+from typing import Optional
+import logging
+import aiohttp
+
+from modules.interfaces.interfaceTicketModel import (
+ TicketBase,
+ TicketFieldAttribute,
+ Task,
+)
+
+
+logger = logging.getLogger(__name__)
+
+
+class ConnectorTicketClickup(TicketBase):
+ def __init__(
+ self,
+ *,
+ apiToken: str,
+ teamId: str,
+ listId: Optional[str] = None,
+ apiUrl: str = "https://api.clickup.com/api/v2",
+ ) -> None:
+ self.apiToken = apiToken
+ self.teamId = teamId
+ self.listId = listId
+ self.apiUrl = apiUrl
+
+ def _headers(self) -> dict:
+ return {
+ "Authorization": self.apiToken,
+ "Content-Type": "application/json",
+ }
+
+ async def read_attributes(self) -> list[TicketFieldAttribute]:
+ """Fetch field attributes. Uses list custom fields if listId provided; else basic fields."""
+ attributes: list[TicketFieldAttribute] = []
+ try:
+ async with aiohttp.ClientSession() as session:
+ if self.listId:
+ url = f"{self.apiUrl}/list/{self.listId}/field"
+ async with session.get(url, headers=self._headers()) as response:
+ if response.status != 200:
+ logger.warning(f"ClickUp fields fetch status: {response.status}")
+ else:
+ data = await response.json()
+ for field in data.get("fields", []):
+ fieldId = field.get("id")
+ fieldName = field.get("name", fieldId)
+ if fieldId:
+ attributes.append(TicketFieldAttribute(fieldName=fieldName, field=fieldId))
+
+ # Add common top-level fields
+ core_fields = [
+ ("ID", "id"),
+ ("Name", "name"),
+ ("Status", "status.status"),
+ ("Assignees", "assignees"),
+ ("DateCreated", "date_created"),
+ ("DueDate", "due_date"),
+ ]
+ for name, fid in core_fields:
+ attributes.append(TicketFieldAttribute(fieldName=name, field=fid))
+ except Exception as e:
+ logger.error(f"ClickUp read_attributes error: {e}")
+ return attributes
+
+ async def read_tasks(self, *, limit: int = 0) -> list[Task]:
+ """Read tasks from ClickUp, always returning full task records.
+ If list_id is set, read from that list; otherwise read from team.
+ """
+ tasks: list[Task] = []
+ try:
+ async with aiohttp.ClientSession() as session:
+ page = 0
+ pageSize = 100
+ while True:
+ if self.listId:
+ url = f"{self.apiUrl}/list/{self.listId}/task?subtasks=true&page={page}&order_by=created&reverse=true"
+ else:
+ # Team-level search for open tasks
+ url = f"{self.apiUrl}/team/{self.teamId}/task?subtasks=true&page={page}&order_by=created&reverse=true"
+
+ # Request with parameters to include all fields where possible
+ async with session.get(url, headers=self._headers()) as response:
+ if response.status != 200:
+ errorText = await response.text()
+ logger.error(f"ClickUp read_tasks failed: {response.status} {errorText}")
+ break
+
+ data = await response.json()
+ items = data.get("tasks", [])
+ for item in items:
+ tasks.append(Task(data=item))
+ if limit and len(tasks) >= limit:
+ return tasks
+
+ if len(items) < pageSize:
+ break
+ page += 1
+ except Exception as e:
+ logger.error(f"ClickUp read_tasks error: {e}")
+ return tasks
+
+ async def write_tasks(self, tasklist: list[Task]) -> None:
+ """Update tasks in ClickUp. Expects Task.data to contain {'ID' or 'id' or 'task_id', 'fields': {...}}"""
+ try:
+ async with aiohttp.ClientSession() as session:
+ for task in tasklist:
+ data = task.data
+ taskId = data.get("ID") or data.get("id") or data.get("task_id")
+ fields = data.get("fields", {})
+ if not taskId or not isinstance(fields, dict) or not fields:
+ continue
+
+ # Map generic fields to ClickUp payload
+ payload: dict = {}
+ for fieldId, value in fields.items():
+ # Heuristics: map common field ids
+ if fieldId in ("name", "summary"):
+ payload["name"] = value
+ elif fieldId in ("status",):
+ payload["status"] = value
+ elif fieldId.startswith("customfield_") or fieldId.startswith("cf_"):
+ # ClickUp custom fields need separate endpoint; attempt inline update if supported
+ if "custom_fields" not in payload:
+ payload["custom_fields"] = []
+ payload["custom_fields"].append({"id": fieldId, "value": value})
+ else:
+ # Best-effort assign to description for unknown text fields
+ if isinstance(value, str) and value:
+ payload.setdefault("description", value)
+
+ url = f"{self.apiUrl}/task/{taskId}"
+ async with session.put(url, headers=self._headers(), json=payload) as response:
+ if response.status not in (200, 204):
+ err = await response.text()
+ logger.error(f"ClickUp update failed for {taskId}: {response.status} {err}")
+ except Exception as e:
+ logger.error(f"ClickUp write_tasks error: {e}")
+
+
diff --git a/modules/connectors/connectorTicketJira.py b/modules/connectors/connectorTicketsJira.py
similarity index 87%
rename from modules/connectors/connectorTicketJira.py
rename to modules/connectors/connectorTicketsJira.py
index 508a357a..ffeed2ca 100644
--- a/modules/connectors/connectorTicketJira.py
+++ b/modules/connectors/connectorTicketsJira.py
@@ -1,47 +1,32 @@
-"""Jira connector for CRUD operations."""
+"""Jira connector for CRUD operations (neutralized to generic ticket interface)."""
from dataclasses import dataclass
-import os
import logging
import aiohttp
import asyncio
import json
-from modules.interfaces.interfaceTicketModel import (
- TicketBase,
- TicketFieldAttribute,
- Task,
-)
-
+from modules.interfaces.interfaceTicketModel import (TicketBase, TicketFieldAttribute, Task, )
logger = logging.getLogger(__name__)
-@dataclass
class ConnectorTicketJira(TicketBase):
- jira_username: str
- jira_api_token: str
- jira_url: str
- project_code: str
- issue_type: str
-
- @classmethod
- async def create(
- cls,
+ def __init__(
+ self,
*,
- jira_username: str,
- jira_api_token: str,
- jira_url: str,
- project_code: str,
- issue_type: str,
- ):
- return ConnectorTicketJira(
- jira_username=jira_username,
- jira_api_token=jira_api_token,
- jira_url=jira_url,
- project_code=project_code,
- issue_type=issue_type,
- )
+ apiUsername: str,
+ apiToken: str,
+ apiUrl: str,
+ projectCode: str,
+ ticketType: str,
+ ) -> None:
+ self.apiUsername = apiUsername
+ self.apiToken = apiToken
+ self.apiUrl = apiUrl
+ self.projectCode = projectCode
+ self.ticketType = ticketType
+
async def read_attributes(self) -> list[TicketFieldAttribute]:
"""
@@ -52,22 +37,22 @@ class ConnectorTicketJira(TicketBase):
list[TicketFieldAttribute]: List of field attributes with names and IDs
"""
# Build JQL dynamically; allow empty or '*' issue_type to mean "all types"
- if self.issue_type and self.issue_type != "*":
- jql_query = f"project={self.project_code} AND issuetype={self.issue_type}"
+ if self.ticketType and self.ticketType != "*":
+ jql_query = f"project={self.projectCode} AND issuetype={self.ticketType}"
else:
- jql_query = f"project={self.project_code}"
+ jql_query = f"project={self.projectCode}"
# Prepare the request URL (use JQL search endpoint)
- url = f"{self.jira_url}/rest/api/3/search/jql"
+ url = f"{self.apiUrl}/rest/api/3/search/jql"
# Prepare authentication
- auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
+ auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
try:
async with aiohttp.ClientSession() as session:
headers = {"Content-Type": "application/json"}
payload = {
- "jql": jql_query,
+ "jql": jql_query,
"maxResults": 1
# Don't specify fields to get all available fields
}
@@ -100,9 +85,9 @@ class ConnectorTicketJira(TicketBase):
fields = issue.get("fields", {})
for field_id, value in fields.items():
- field_name = field_names.get(field_id, field_id)
+ fieldName = field_names.get(field_id, field_id)
attributes.append(
- TicketFieldAttribute(field_name=field_name, field=field_id)
+ TicketFieldAttribute(fieldName=fieldName, field=field_id)
)
logger.info(
@@ -122,8 +107,8 @@ class ConnectorTicketJira(TicketBase):
async def _read_all_fields_via_fields_api(self) -> list[TicketFieldAttribute]:
"""Fallback: use Jira fields API to list all fields with id->name mapping."""
- auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
- url = f"{self.jira_url}/rest/api/3/field"
+ auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
+ url = f"{self.apiUrl}/rest/api/3/field"
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, auth=auth) as response:
@@ -138,10 +123,10 @@ class ConnectorTicketJira(TicketBase):
attributes: list[TicketFieldAttribute] = []
for field in data:
field_id = field.get("id")
- field_name = field.get("name", field_id)
+ fieldName = field.get("name", field_id)
if field_id:
attributes.append(
- TicketFieldAttribute(field_name=field_name, field=field_id)
+ TicketFieldAttribute(fieldName=fieldName, field=field_id)
)
logger.info(
f"Successfully retrieved {len(attributes)} field attributes via fields API"
@@ -162,10 +147,10 @@ class ConnectorTicketJira(TicketBase):
list[Task]: List of tasks with their data
"""
# Build JQL dynamically; allow empty or '*' issue_type to mean "all types"
- if self.issue_type and self.issue_type != "*":
- jql_query = f"project={self.project_code} AND issuetype={self.issue_type}"
+ if self.ticketType and self.ticketType != "*":
+ jql_query = f"project={self.projectCode} AND issuetype={self.ticketType}"
else:
- jql_query = f"project={self.project_code}"
+ jql_query = f"project={self.projectCode}"
# Initialize variables for pagination (cursor-based /search/jql)
max_results = 100
@@ -176,8 +161,8 @@ class ConnectorTicketJira(TicketBase):
seen_issue_ids: set[str] = set()
# Prepare authentication
- auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
- url = f"{self.jira_url}/rest/api/3/search/jql"
+ auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
+ url = f"{self.apiUrl}/rest/api/3/search/jql"
try:
async with aiohttp.ClientSession() as session:
@@ -286,7 +271,7 @@ class ConnectorTicketJira(TicketBase):
tasklist: List of Task objects containing task data to update
"""
headers = {"Accept": "application/json", "Content-Type": "application/json"}
- auth = aiohttp.BasicAuth(self.jira_username, self.jira_api_token)
+ auth = aiohttp.BasicAuth(self.apiUsername, self.apiToken)
try:
async with aiohttp.ClientSession() as session:
@@ -346,7 +331,7 @@ class ConnectorTicketJira(TicketBase):
update_data = {"fields": processed_fields}
# Make the update request
- url = f"{self.jira_url}/rest/api/3/issue/{task_id}"
+ url = f"{self.apiUrl}/rest/api/3/issue/{task_id}"
async with session.put(
url, json=update_data, headers=headers, auth=auth
@@ -365,3 +350,5 @@ class ConnectorTicketJira(TicketBase):
except Exception as e:
logger.error(f"Unexpected error while updating Jira tasks: {str(e)}")
raise
+
+
diff --git a/modules/connectors/connectorWebTavily.py b/modules/connectors/connectorWebTavily.py
index 59eb1396..6270d10f 100644
--- a/modules/connectors/connectorWebTavily.py
+++ b/modules/connectors/connectorWebTavily.py
@@ -35,20 +35,7 @@ from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
-# Configuration loading functions
-def get_web_crawl_timeout() -> int:
- """Get web crawl timeout from configuration"""
- return int(APP_CONFIG.get("Web_Crawl_TIMEOUT", "30"))
-
-
-def get_web_crawl_max_retries() -> int:
- """Get web crawl max retries from configuration"""
- return int(APP_CONFIG.get("Web_Crawl_MAX_RETRIES", "3"))
-
-
-def get_web_crawl_retry_delay() -> int:
- """Get web crawl retry delay from configuration"""
- return int(APP_CONFIG.get("Web_Crawl_RETRY_DELAY", "2"))
+# Cached configuration values are loaded into the connector instance on creation
@dataclass
@@ -66,13 +53,26 @@ class TavilyCrawlResult:
@dataclass
class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase):
client: AsyncTavilyClient = None
+ # Cached settings loaded at initialization time
+ crawl_timeout: int = 30
+ crawl_max_retries: int = 3
+ crawl_retry_delay: int = 2
@classmethod
async def create(cls):
api_key = APP_CONFIG.get("Connector_WebTavily_API_KEY_SECRET")
if not api_key:
raise ValueError("Tavily API key not configured. Please set Connector_WebTavily_API_KEY_SECRET in config.ini")
- return cls(client=AsyncTavilyClient(api_key=api_key))
+ # Load and cache web crawl related configuration
+ crawl_timeout = int(APP_CONFIG.get("Web_Crawl_TIMEOUT", "30"))
+ crawl_max_retries = int(APP_CONFIG.get("Web_Crawl_MAX_RETRIES", "3"))
+ crawl_retry_delay = int(APP_CONFIG.get("Web_Crawl_RETRY_DELAY", "2"))
+ return cls(
+ client=AsyncTavilyClient(api_key=api_key),
+ crawl_timeout=crawl_timeout,
+ crawl_max_retries=crawl_max_retries,
+ crawl_retry_delay=crawl_retry_delay,
+ )
async def search_urls(self, request: WebSearchRequest) -> WebSearchActionResult:
"""Handles the web search request.
@@ -240,9 +240,9 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase):
"""Calls the Tavily API to extract text content from URLs with retry logic."""
import asyncio
- max_retries = get_web_crawl_max_retries()
- retry_delay = get_web_crawl_retry_delay()
- timeout = get_web_crawl_timeout()
+ max_retries = self.crawl_max_retries
+ retry_delay = self.crawl_retry_delay
+ timeout = self.crawl_timeout
for attempt in range(max_retries + 1):
try:
diff --git a/modules/features/init.py b/modules/features/init.py
new file mode 100644
index 00000000..e1ddec8d
--- /dev/null
+++ b/modules/features/init.py
@@ -0,0 +1,18 @@
+# Launch features as events
+
+import asyncio
+import logging
+from modules.interfaces.interfaceAppObjects import getRootInterface
+
+# GET EVENT USER
+
+logger = logging.getLogger(__name__)
+eventUser = getRootInterface().getUserByUsername("event")
+if not eventUser:
+ logger.error("Event user not found")
+
+# LAUNCH FEATURES
+
+from modules.features.syncDelta.mainSyncDelta import ManagerSyncDelta, performSync
+managerSyncDelta = ManagerSyncDelta(eventUser)
+asyncio.create_task(performSync(eventUser))
diff --git a/modules/features/neutralizePlayground/mainNeutralizePlayground.py b/modules/features/neutralizePlayground/mainNeutralizePlayground.py
index 4b48a495..256b50c2 100644
--- a/modules/features/neutralizePlayground/mainNeutralizePlayground.py
+++ b/modules/features/neutralizePlayground/mainNeutralizePlayground.py
@@ -1,8 +1,8 @@
import logging
from typing import Any, Dict, List, Optional
-from modules.interfaces.interfaceAppModel import User
-from modules.services.serviceNeutralization.mainNeutralization import NeutralizationService
+from modules.interfaces.interfaceAppModel import User, DataNeutralizerAttributes, DataNeutraliserConfig
+from modules.services.serviceNeutralization.mainServiceNeutralization import NeutralizationService
logger = logging.getLogger(__name__)
@@ -75,6 +75,53 @@ class NeutralizationPlayground:
'error': str(e),
}
+ # Additional methods needed by the route
+ def get_config(self) -> Optional[DataNeutraliserConfig]:
+ """Get neutralization configuration"""
+ return self.service.getConfig()
+
+ def save_config(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig:
+ """Save neutralization configuration"""
+ return self.service.saveConfig(config_data)
+
+ def neutralize_text(self, text: str, file_id: str = None) -> Dict[str, Any]:
+ """Neutralize text content"""
+ return self.service.processText(text)
+
+ def resolve_text(self, text: str) -> str:
+ """Resolve UIDs in neutralized text back to original text"""
+ return self.service.resolveText(text)
+
+ def get_attributes(self, file_id: str = None) -> List[DataNeutralizerAttributes]:
+ """Get neutralization attributes, optionally filtered by file ID"""
+ if not self.service.app_interface:
+ return []
+ try:
+ all_attributes = self.service._getAttributes()
+ if file_id:
+ return [attr for attr in all_attributes if attr.fileId == file_id]
+ return all_attributes
+ except Exception as e:
+ logger.error(f"Error getting attributes: {str(e)}")
+ return []
+
+ async def process_sharepoint_files(self, source_path: str, target_path: str) -> Dict[str, Any]:
+ """Process files from SharePoint source path and store neutralized files in target path"""
+ return await self.processSharepointFiles(source_path, target_path)
+
+ def batch_neutralize_files(self, files_data: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """Process multiple files for neutralization"""
+ file_ids = [file_data.get('fileId') for file_data in files_data if file_data.get('fileId')]
+ return self.processFiles(file_ids)
+
+ def get_processing_stats(self) -> Dict[str, Any]:
+ """Get neutralization processing statistics"""
+ return self.getStats()
+
+ def cleanup_file_attributes(self, file_id: str) -> bool:
+ """Clean up neutralization attributes for a specific file"""
+ return self.cleanAttributes(file_id)
+
# Internal SharePoint helper module separated to keep feature logic tidy
class SharepointProcessor:
diff --git a/modules/features/syncDelta/mainSyncDelta.py b/modules/features/syncDelta/mainSyncDelta.py
index 3fac4f7b..a54c504d 100644
--- a/modules/features/syncDelta/mainSyncDelta.py
+++ b/modules/features/syncDelta/mainSyncDelta.py
@@ -1,22 +1,17 @@
"""
-Delta Group JIRA-SharePoint Sync Manager
+Delta Group Sync Manager
-This module handles the synchronization of JIRA tickets to SharePoint using the new
+This module handles the synchronization of tickets to SharePoint using the new
Graph API-based connector architecture.
"""
import logging
import os
-import csv
import io
from datetime import datetime, UTC
from typing import Dict, Any, List, Optional
-from modules.services.serviceSharepoint.mainSharepoint import SharepointService
-from modules.connectors.connectorTicketJira import ConnectorTicketJira
-from modules.interfaces.interfaceAppObjects import getRootInterface
-from modules.interfaces.interfaceAppModel import UserInDB
-from modules.interfaces.interfaceTicketObjects import TicketSharepointSyncInterface
-from modules.shared.timezoneUtils import get_utc_timestamp
+from modules.services import getInterface as getServices
+# Removed direct import - now using services.ticket
from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
@@ -25,165 +20,16 @@ logger = logging.getLogger(__name__)
APP_ENV_TYPE = APP_CONFIG.get("APP_ENV_TYPE", "dev")
-def convert_adf_to_text(adf_data):
- """Convert Atlassian Document Format (ADF) to plain text.
-
- Based on Atlassian Document Format specification for JIRA fields.
- Handles paragraphs, lists, text formatting, and other ADF node types.
-
- Args:
- adf_data: ADF object or None
-
- Returns:
- str: Plain text content, or empty string if None/invalid
- """
- if not adf_data or not isinstance(adf_data, dict):
- return ""
-
- if adf_data.get("type") != "doc":
- return str(adf_data) if adf_data else ""
-
- content = adf_data.get("content", [])
- if not isinstance(content, list):
- return ""
-
- def extract_text_from_content(content_list, list_level=0):
- """Recursively extract text from ADF content with proper formatting."""
- text_parts = []
- list_counter = 1
-
- for item in content_list:
- if not isinstance(item, dict):
- continue
-
- item_type = item.get("type", "")
-
- if item_type == "text":
- # Extract text content, preserving formatting
- text = item.get("text", "")
- marks = item.get("marks", [])
-
- # Handle text formatting (bold, italic, etc.)
- if marks:
- for mark in marks:
- if mark.get("type") == "strong":
- text = f"**{text}**"
- elif mark.get("type") == "em":
- text = f"*{text}*"
- elif mark.get("type") == "code":
- text = f"`{text}`"
- elif mark.get("type") == "link":
- attrs = mark.get("attrs", {})
- href = attrs.get("href", "")
- if href:
- text = f"[{text}]({href})"
-
- text_parts.append(text)
-
- elif item_type == "hardBreak":
- text_parts.append("\n")
-
- elif item_type == "paragraph":
- paragraph_content = item.get("content", [])
- if paragraph_content:
- paragraph_text = extract_text_from_content(paragraph_content, list_level)
- if paragraph_text.strip():
- text_parts.append(paragraph_text)
-
- elif item_type == "bulletList":
- list_content = item.get("content", [])
- for list_item in list_content:
- if list_item.get("type") == "listItem":
- list_item_content = list_item.get("content", [])
- for list_paragraph in list_item_content:
- if list_paragraph.get("type") == "paragraph":
- list_paragraph_content = list_paragraph.get("content", [])
- if list_paragraph_content:
- indent = " " * list_level
- bullet_text = extract_text_from_content(list_paragraph_content, list_level + 1)
- if bullet_text.strip():
- text_parts.append(f"{indent}• {bullet_text}")
-
- elif item_type == "orderedList":
- list_content = item.get("content", [])
- for list_item in list_content:
- if list_item.get("type") == "listItem":
- list_item_content = list_item.get("content", [])
- for list_paragraph in list_item_content:
- if list_paragraph.get("type") == "paragraph":
- list_paragraph_content = list_paragraph.get("content", [])
- if list_paragraph_content:
- indent = " " * list_level
- ordered_text = extract_text_from_content(list_paragraph_content, list_level + 1)
- if ordered_text.strip():
- text_parts.append(f"{indent}{list_counter}. {ordered_text}")
- list_counter += 1
-
- elif item_type == "listItem":
- # Handle nested list items
- list_item_content = item.get("content", [])
- if list_item_content:
- text_parts.append(extract_text_from_content(list_item_content, list_level))
-
- elif item_type == "embedCard":
- # Handle embedded content (videos, etc.)
- attrs = item.get("attrs", {})
- url = attrs.get("url", "")
- if url:
- text_parts.append(f"[Embedded Content: {url}]")
-
- elif item_type == "codeBlock":
- # Handle code blocks
- code_content = item.get("content", [])
- if code_content:
- code_text = extract_text_from_content(code_content, list_level)
- if code_text.strip():
- text_parts.append(f"```\n{code_text}\n```")
-
- elif item_type == "blockquote":
- # Handle blockquotes
- quote_content = item.get("content", [])
- if quote_content:
- quote_text = extract_text_from_content(quote_content, list_level)
- if quote_text.strip():
- text_parts.append(f"> {quote_text}")
-
- elif item_type == "heading":
- # Handle headings
- heading_content = item.get("content", [])
- if heading_content:
- heading_text = extract_text_from_content(heading_content, list_level)
- if heading_text.strip():
- level = item.get("attrs", {}).get("level", 1)
- text_parts.append(f"{'#' * level} {heading_text}")
-
- elif item_type == "rule":
- # Handle horizontal rules
- text_parts.append("---")
-
- else:
- # Handle unknown types by trying to extract content
- if "content" in item:
- content_text = extract_text_from_content(item.get("content", []), list_level)
- if content_text.strip():
- text_parts.append(content_text)
-
- return "\n".join(text_parts)
-
- result = extract_text_from_content(content)
- return result.strip()
-
-
class ManagerSyncDelta:
- """Manages JIRA to SharePoint synchronization for Delta Group.
+ """Manages Tickets to SharePoint synchronization for Delta Group.
Supports two sync modes:
- CSV mode: Uses CSV files for synchronization (default)
- Excel mode: Uses Excel (.xlsx) files for synchronization
- To change sync mode, use the set_sync_mode() method or modify SYNC_MODE class variable.
+ To change sync mode, use the setSyncMode() method or modify SYNC_MODE class variable.
"""
- SHAREPOINT_SITE_ID = "02830618-4029-4dc8-8d3d-f5168f282249"
+
SHAREPOINT_SITE_NAME = "SteeringBPM"
SHAREPOINT_SITE_PATH = "SteeringBPM"
SHAREPOINT_HOSTNAME = "deltasecurityag.sharepoint.com"
@@ -192,24 +38,22 @@ class ManagerSyncDelta:
SHAREPOINT_AUDIT_FOLDER = "/General/50 Docs hosted by SELISE/SyncHistory"
SHAREPOINT_USER_ID = "patrick.motsch@delta.ch"
- # Sync mode: "csv" or "xlsx"
SYNC_MODE = "xlsx" # Can be "csv" or "xlsx"
-
# File names for different sync modes
SYNC_FILE_CSV = "DELTAgroup x SELISE Ticket Exchange List.csv"
SYNC_FILE_XLSX = "DELTAgroup x SELISE Ticket Exchange List.xlsx"
- # JIRA connection parameters (hardcoded for Delta Group)
+ # Tickets connection parameters
JIRA_USERNAME = "p.motsch@valueon.ch"
- JIRA_API_TOKEN = "ATATT3xFfGF0d973nNb3R1wTDI4lesmJfJAmooS-4cYMJTyLfwYv4himrE6yyCxyX3aSMfl34NHcm2fAXeFXrLHUzJx0RQVUBonCFnlgexjLQTgS5BoCbSO7dwAVjlcHZZkArHbooCUaRwJ15n6AHkm-nwdjLQ3Z74TFnKKUZC4uhuh3Aj-MuX8=2D7124FA"
+ JIRA_API_TOKEN = APP_CONFIG.get("Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET", "")
JIRA_URL = "https://deltasecurity.atlassian.net"
JIRA_PROJECT_CODE = "DCS"
JIRA_ISSUE_TYPE = "Task"
- # Task sync definition for field mapping (like original synchronizer)
+ # Task sync definition for field mapping
TASK_SYNC_DEFINITION={
- #key=excel-header, [get:jira>excel | put: excel>jira, jira-xml-field-list]
+ #key=excel-header, [get:ticket>excel | put: excel>ticket, tickets-xml-field-list]
'ID': ['get', ['key']],
'Module Category': ['get', ['fields', 'customfield_10058', 'value']],
'Summary': ['get', ['fields', 'summary']],
@@ -226,25 +70,108 @@ class ManagerSyncDelta:
'SELISE Comments': ['put', ['fields', 'customfield_10168']],
}
- def __init__(self):
- """Initialize the sync manager with hardcoded Delta Group credentials."""
- self.root_interface = getRootInterface()
- self.jira_connector = None
- self.sharepoint_connector = None
- self.target_site = None
- # Initialize centralized services with root user
- from modules.services import getInterface as getServices
- root_user = self.root_interface.getUserByUsername("admin")
- self.services = getServices(root_user, None)
-
- def get_sync_file_name(self) -> str:
+ def __init__(self, eventUser=None):
+ self.targetSite = None
+ self.services = None
+ self.sharepointConnection = None
+ self.eventUser = eventUser
+ self.sync_audit_log = [] # Store audit log entries in memory
+
+ try:
+ if not eventUser:
+ logger.error("Event user not found - SharePoint connection required")
+ self._log_audit_event("SYNC_INIT", "FAILED", "Event user not found")
+ else:
+ self.services = getServices(eventUser, None)
+ # Resolve SharePoint connection for the configured user id
+ self.sharepointConnection = self.services.workflowService.getUserConnectionByExternalUsername(
+ "msft", self.SHAREPOINT_USER_ID
+ )
+ if not self.sharepointConnection:
+ logger.error(
+ f"No SharePoint connection found for user: {self.SHAREPOINT_USER_ID}"
+ )
+ self._log_audit_event("SYNC_INIT", "FAILED", f"No SharePoint connection for user: {self.SHAREPOINT_USER_ID}")
+ else:
+ # Configure SharePoint service token and set connector reference
+ if not self.services.sharepoint.setAccessToken(
+ self.sharepointConnection, self.services.interfaceApp
+ ):
+ logger.error("Failed to set SharePoint token from UserConnection")
+ self._log_audit_event("SYNC_INIT", "FAILED", "Failed to set SharePoint token")
+ else:
+ logger.info(
+ f"SharePoint token configured for connection: {self.sharepointConnection.id}"
+ )
+ self._log_audit_event("SYNC_INIT", "SUCCESS", f"SharePoint token configured for connection: {self.sharepointConnection.id}")
+ except Exception as e:
+ logger.error(f"Initialization error in ManagerSyncDelta.__init__: {e}")
+ self._log_audit_event("SYNC_INIT", "ERROR", f"Initialization error: {str(e)}")
+
+ def _log_audit_event(self, action: str, status: str, details: str):
+ """Log audit events for sync operations to memory."""
+ try:
+ timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
+ user_id = str(self.eventUser.id) if self.eventUser else "system"
+ log_entry = f"{timestamp} | {user_id} | {action} | {status} | {details}"
+ self.sync_audit_log.append(log_entry)
+ logger.info(f"Sync Audit: {log_entry}")
+ except Exception as e:
+ logger.warning(f"Failed to log audit event: {str(e)}")
+
+ def _log_sync_changes(self, merge_details: dict, sync_mode: str):
+ """Log detailed field changes for sync operations."""
+ try:
+ # Log summary statistics
+ summary = f"Sync {sync_mode} - Updated: {merge_details['updated']}, Added: {merge_details['added']}, Unchanged: {merge_details['unchanged']}"
+ self._log_audit_event("SYNC_CHANGES_SUMMARY", "INFO", summary)
+
+ # Log individual field changes
+ for change in merge_details['changes']:
+ self._log_audit_event("SYNC_FIELD_CHANGE", "INFO", f"{sync_mode}: {change}")
+
+ except Exception as e:
+ logger.warning(f"Failed to log sync changes: {str(e)}")
+
+ async def _save_audit_log_to_sharepoint(self):
+ """Save the sync audit log to SharePoint."""
+ try:
+ if not self.sync_audit_log or not self.targetSite:
+ return False
+
+ # Generate log filename with current timestamp
+ timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
+ log_filename = f"log_{timestamp}.log"
+
+ # Create log content
+ log_content = "\n".join(self.sync_audit_log)
+ log_bytes = log_content.encode('utf-8')
+
+ # Upload to SharePoint audit folder
+ await self.services.sharepoint.upload_file(
+ site_id=self.targetSite['id'],
+ folder_path=self.SHAREPOINT_AUDIT_FOLDER,
+ file_name=log_filename,
+ content=log_bytes
+ )
+
+ logger.info(f"Sync audit log saved to SharePoint: {log_filename}")
+ self._log_audit_event("AUDIT_LOG_SAVE", "SUCCESS", f"Audit log saved to SharePoint: {log_filename}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to save audit log to SharePoint: {str(e)}")
+ self._log_audit_event("AUDIT_LOG_SAVE", "FAILED", f"Failed to save audit log: {str(e)}")
+ return False
+
+ def getSyncFileName(self) -> str:
"""Get the appropriate sync file name based on the sync mode."""
if self.SYNC_MODE == "xlsx":
return self.SYNC_FILE_XLSX
else: # Default to CSV
return self.SYNC_FILE_CSV
- def set_sync_mode(self, mode: str) -> bool:
+ def setSyncMode(self, mode: str) -> bool:
"""Set the sync mode to either 'csv' or 'xlsx'.
Args:
@@ -261,60 +188,19 @@ class ManagerSyncDelta:
logger.error(f"Invalid sync mode: {mode}. Must be 'csv' or 'xlsx'")
return False
- async def initialize_connectors(self) -> bool:
- """Initialize JIRA and SharePoint connectors."""
+ async def initializeInterface(self) -> bool:
+ """Initialize SharePoint connector; tickets connector is created by interface on demand."""
try:
- logger.info("Initializing JIRA connector with hardcoded credentials")
-
- # Initialize JIRA connector using class constants
- self.jira_connector = await ConnectorTicketJira.create(
- jira_username=self.JIRA_USERNAME,
- jira_api_token=self.JIRA_API_TOKEN,
- jira_url=self.JIRA_URL,
- project_code=self.JIRA_PROJECT_CODE,
- issue_type=self.JIRA_ISSUE_TYPE
- )
-
- # Use the admin user for SharePoint connection
- adminUser = self.root_interface.getUserByUsername("admin")
- if not adminUser:
- logger.error("Admin user not found - SharePoint connection required")
+ # Validate init-prepared members
+ if not self.services or not self.sharepointConnection or not self.services.sharepoint:
+ logger.error("Service or SharePoint connection not initialized")
return False
-
- logger.info(f"Using admin user for SharePoint: {adminUser.id}")
-
- # Get SharePoint connection for admin user
- user_connections = self.root_interface.getUserConnections(adminUser.id)
- sharepoint_connection = None
-
- for connection in user_connections:
- if connection.authority == "msft" and connection.externalUsername == self.SHAREPOINT_USER_ID:
- sharepoint_connection = connection
- break
-
- if not sharepoint_connection:
- logger.error(f"No SharePoint connection found for user: {self.SHAREPOINT_USER_ID}")
- return False
-
- logger.info(f"Found SharePoint connection: {sharepoint_connection.id}")
-
- # Get fresh SharePoint token for this connection
- from modules.security.tokenManager import TokenManager
- sharepoint_token = TokenManager().getFreshToken(self.root_interface, sharepoint_connection.id)
- if not sharepoint_token:
- logger.error("No SharePoint token found for Delta Group user connection")
- return False
-
- logger.info(f"Found SharePoint token: {sharepoint_token.id}")
-
- # Initialize SharePoint connector with Graph API
- self.sharepoint_connector = SharepointService(access_token=sharepoint_token.tokenAccess)
# Resolve the site by hostname + site path to get the real site ID
logger.info(
f"Resolving site ID via hostname+path: {self.SHAREPOINT_HOSTNAME}:/sites/{self.SHAREPOINT_SITE_PATH}"
)
- resolved = await self.sharepoint_connector.find_site_by_url(
+ resolved = await self.services.sharepoint.find_site_by_url(
hostname=self.SHAREPOINT_HOSTNAME,
site_path=self.SHAREPOINT_SITE_PATH
)
@@ -325,7 +211,7 @@ class ManagerSyncDelta:
)
return False
- self.target_site = {
+ self.targetSite = {
"id": resolved.get("id"),
"displayName": resolved.get("displayName", self.SHAREPOINT_SITE_NAME),
"name": resolved.get("name", self.SHAREPOINT_SITE_NAME)
@@ -333,14 +219,14 @@ class ManagerSyncDelta:
# Test site access by listing root of the drive
logger.info("Testing site access using resolved site ID...")
- test_result = await self.sharepoint_connector.list_folder_contents(
- site_id=self.target_site["id"],
+ test_result = await self.services.sharepoint.list_folder_contents(
+ site_id=self.targetSite["id"],
folder_path=""
)
if test_result is not None:
logger.info(
- f"Site access confirmed: {self.target_site['displayName']} (ID: {self.target_site['id']})"
+ f"Site access confirmed: {self.targetSite['displayName']} (ID: {self.targetSite['id']})"
)
else:
logger.error("Could not access site drive - check permissions")
@@ -352,14 +238,16 @@ class ManagerSyncDelta:
logger.error(f"Error initializing connectors: {str(e)}")
return False
- async def sync_jira_to_sharepoint(self) -> bool:
- """Perform the main JIRA to SharePoint synchronization using sophisticated sync logic."""
+ async def syncTicketsOverSharepoint(self) -> bool:
+ """Perform Tickets to SharePoint synchronization using list-based interface and local CSV/XLSX handling."""
try:
logger.info(f"Starting JIRA to SharePoint synchronization (Mode: {self.SYNC_MODE})")
+ self._log_audit_event("SYNC_START", "INFO", f"Starting JIRA to SharePoint sync (Mode: {self.SYNC_MODE})")
- # Initialize connectors
- if not await self.initialize_connectors():
+ # Initialize interface
+ if not await self.initializeInterface():
logger.error("Failed to initialize connectors")
+ self._log_audit_event("SYNC_INTERFACE", "FAILED", "Failed to initialize connectors")
return False
# Dump current Jira fields to text file for reference
@@ -375,203 +263,539 @@ class ManagerSyncDelta:
logger.warning(f"Failed to dump JIRA data (non-blocking): {str(e)}")
# Get the appropriate sync file name based on mode
- sync_file_name = self.get_sync_file_name()
+ sync_file_name = self.getSyncFileName()
logger.info(f"Using sync file: {sync_file_name}")
- # Create the sophisticated sync interface
- sync_interface = await TicketSharepointSyncInterface.create(
- connector_ticket=self.jira_connector,
- connector_sharepoint=self.sharepoint_connector,
- task_sync_definition=self.TASK_SYNC_DEFINITION,
- sync_folder=self.SHAREPOINT_MAIN_FOLDER,
- sync_file=sync_file_name,
- backup_folder=self.SHAREPOINT_BACKUP_FOLDER,
- audit_folder=self.SHAREPOINT_AUDIT_FOLDER,
- site_id=self.target_site['id']
+ # Create list-based ticket interface (initialize connector by type)
+ sync_interface = await self.services.ticket.createTicketInterfaceByType(
+ taskSyncDefinition=self.TASK_SYNC_DEFINITION,
+ connectorType="Jira",
+ connectorParams={
+ "apiUsername": self.JIRA_USERNAME,
+ "apiToken": self.JIRA_API_TOKEN,
+ "apiUrl": self.JIRA_URL,
+ "projectCode": self.JIRA_PROJECT_CODE,
+ "ticketType": self.JIRA_ISSUE_TYPE,
+ },
)
# Perform the sophisticated sync based on mode
if self.SYNC_MODE == "xlsx":
- logger.info("Performing JIRA to Excel sync...")
- await sync_interface.sync_from_jira_to_excel()
- logger.info("Performing Excel to JIRA sync...")
- await sync_interface.sync_from_excel_to_jira()
+ # Export tickets to list
+ data_list = await sync_interface.exportTicketsAsList()
+ self._log_audit_event("SYNC_EXPORT", "INFO", f"Exported {len(data_list)} tickets from JIRA")
+ # Read existing Excel headers/content
+ existing_data = []
+ existing_headers = {"header1": "Header 1", "header2": "Header 2"}
+ try:
+ file_path = f"{self.SHAREPOINT_MAIN_FOLDER}/{sync_file_name}"
+ excel_content = await self.services.sharepoint.download_file_by_path(
+ site_id=self.targetSite['id'], file_path=file_path
+ )
+ existing_data, existing_headers = self.parseExcelContent(excel_content)
+ except Exception:
+ pass
+ # Merge and write
+ merged_data, merge_details = self.mergeJiraWithExistingDetailed(data_list, existing_data)
+
+ # Log detailed changes for Excel mode
+ self._log_sync_changes(merge_details, "EXCEL")
+
+ await self.backupSharepointFile(filename=sync_file_name)
+ excel_bytes = self.createExcelContent(merged_data, existing_headers)
+ await self.services.sharepoint.upload_file(
+ site_id=self.targetSite['id'],
+ folder_path=self.SHAREPOINT_MAIN_FOLDER,
+ file_name=sync_file_name,
+ content=excel_bytes,
+ )
+ # Import back to tickets
+ try:
+ excel_content = await self.services.sharepoint.download_file_by_path(
+ site_id=self.targetSite['id'], file_path=file_path
+ )
+ excel_rows, _ = self.parseExcelContent(excel_content)
+ self._log_audit_event("SYNC_IMPORT", "INFO", f"Importing {len(excel_rows)} Excel rows back to tickets")
+ except Exception as e:
+ excel_rows = []
+ self._log_audit_event("SYNC_IMPORT", "WARNING", f"Failed to download Excel for import: {str(e)}")
+ await sync_interface.importListToTickets(excel_rows)
else: # CSV mode (default)
- logger.info("Performing JIRA to CSV sync...")
- await sync_interface.sync_from_jira_to_csv()
- logger.info("Performing CSV to JIRA sync...")
- await sync_interface.sync_from_csv_to_jira()
+ # Export tickets to list
+ data_list = await sync_interface.exportTicketsAsList()
+ self._log_audit_event("SYNC_EXPORT", "INFO", f"Exported {len(data_list)} tickets from JIRA")
+ # Prepare headers by reading existing CSV if present
+ existing_headers = {"header1": "Header 1", "header2": "Header 2"}
+ existing_data: list[dict] = []
+ try:
+ file_path = f"{self.SHAREPOINT_MAIN_FOLDER}/{sync_file_name}"
+ csv_content = await self.services.sharepoint.download_file_by_path(
+ site_id=self.targetSite['id'], file_path=file_path
+ )
+ csv_lines = csv_content.decode('utf-8').split('\n')
+ if len(csv_lines) >= 2:
+ existing_headers["header1"] = csv_lines[0].rstrip('\r\n')
+ existing_headers["header2"] = csv_lines[1].rstrip('\r\n')
+ # Parse existing CSV rows after the two header lines
+ import pandas as pd
+ df_existing = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python')
+ existing_data = df_existing.to_dict('records')
+ except Exception:
+ pass
+ await self.backupSharepointFile(filename=sync_file_name)
+ merged_data, _ = self.mergeJiraWithExistingDetailed(data_list, existing_data)
+ csv_bytes = self.createCsvContent(merged_data, existing_headers)
+ await self.services.sharepoint.upload_file(
+ site_id=self.targetSite['id'],
+ folder_path=self.SHAREPOINT_MAIN_FOLDER,
+ file_name=sync_file_name,
+ content=csv_bytes,
+ )
+ # Import from CSV
+ try:
+ csv_content = await self.services.sharepoint.download_file_by_path(
+ site_id=self.targetSite['id'], file_path=file_path
+ )
+ import pandas as pd
+ df = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python')
+ csv_rows = df.to_dict('records')
+ self._log_audit_event("SYNC_IMPORT", "INFO", f"Importing {len(csv_rows)} CSV rows back to tickets")
+ except Exception as e:
+ csv_rows = []
+ self._log_audit_event("SYNC_IMPORT", "WARNING", f"Failed to download CSV for import: {str(e)}")
+ await sync_interface.importListToTickets(csv_rows)
logger.info(f"JIRA to SharePoint synchronization completed successfully (Mode: {self.SYNC_MODE})")
+ self._log_audit_event("SYNC_COMPLETE", "SUCCESS", f"JIRA to SharePoint sync completed successfully (Mode: {self.SYNC_MODE})")
+
+ # Save audit log to SharePoint
+ await self._save_audit_log_to_sharepoint()
+
return True
except Exception as e:
logger.error(f"Error during JIRA to SharePoint synchronization: {str(e)}")
+ self._log_audit_event("SYNC_ERROR", "FAILED", f"Error during sync: {str(e)}")
+
+ # Save audit log to SharePoint even on error
+ await self._save_audit_log_to_sharepoint()
+
return False
+
+ async def backupSharepointFile(self, *, filename: str) -> bool:
+ try:
+ timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
+ backup_filename = f"backup_{timestamp}_{filename}"
+ await self.services.sharepoint.copy_file_async(
+ site_id=self.targetSite['id'],
+ source_folder=self.SHAREPOINT_MAIN_FOLDER,
+ source_file=filename,
+ dest_folder=self.SHAREPOINT_BACKUP_FOLDER,
+ dest_file=backup_filename,
+ )
+ self._log_audit_event("SYNC_BACKUP", "SUCCESS", f"Backed up file: {filename} -> {backup_filename}")
+ return True
+ except Exception as e:
+ if "itemNotFound" in str(e) or "404" in str(e):
+ self._log_audit_event("SYNC_BACKUP", "SKIPPED", f"File not found for backup: {filename}")
+ return True
+ logger.warning(f"Backup failed: {e}")
+ self._log_audit_event("SYNC_BACKUP", "FAILED", f"Backup failed for {filename}: {str(e)}")
+ return False
+
+ def mergeJiraWithExistingDetailed(self, jira_data: list[dict], existing_data: list[dict]) -> tuple[list[dict], dict]:
+ existing_lookup = {row.get("ID"): row for row in existing_data if row.get("ID")}
+ merged_data: list[dict] = []
+ changes: list[str] = []
+ updated_count = added_count = unchanged_count = 0
+ for jira_row in jira_data:
+ jira_id = jira_row.get("ID")
+ if jira_id and jira_id in existing_lookup:
+ existing_row = existing_lookup[jira_id].copy()
+ row_changes: list[str] = []
+ for field_name, field_config in self.TASK_SYNC_DEFINITION.items():
+ if field_config[0] == 'get':
+ old_value = "" if existing_row.get(field_name) is None else str(existing_row.get(field_name))
+ new_value = "" if jira_row.get(field_name) is None else str(jira_row.get(field_name))
+ if old_value != new_value:
+ row_changes.append(f"{field_name}: '{old_value}' → '{new_value}'")
+ existing_row[field_name] = jira_row.get(field_name)
+ merged_data.append(existing_row)
+ if row_changes:
+ updated_count += 1
+ changes.append(f"Row ID {jira_id} updated: {', '.join(row_changes)}")
+ else:
+ unchanged_count += 1
+ del existing_lookup[jira_id]
+ else:
+ merged_data.append(jira_row)
+ added_count += 1
+ changes.append(f"Row ID {jira_id} added as new record")
+ for remaining in existing_lookup.values():
+ merged_data.append(remaining)
+ unchanged_count += 1
+ details = {"updated": updated_count, "added": added_count, "unchanged": unchanged_count, "changes": changes}
+ return merged_data, details
+
+ def createCsvContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes:
+ import pandas as pd
+ from io import StringIO
+ timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
+ if existing_headers is None:
+ existing_headers = {"header1": "Header 1", "header2": "Header 2"}
+ if not data:
+ cols = list(self.TASK_SYNC_DEFINITION.keys())
+ df = pd.DataFrame(columns=cols)
+ else:
+ df = pd.DataFrame(data)
+ for column in df.columns:
+ df[column] = df[column].astype("object").fillna("")
+ df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False)
+ import csv as csv_module
+ header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), [])
+ header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), [])
+ if len(header2_row) > 1:
+ header2_row[1] = timestamp
+ header_row1 = pd.DataFrame([header1_row + [""] * (len(df.columns) - len(header1_row))], columns=df.columns)
+ header_row2 = pd.DataFrame([header2_row + [""] * (len(df.columns) - len(header2_row))], columns=df.columns)
+ table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns)
+ final_df = pd.concat([header_row1, header_row2, table_headers, df], ignore_index=True)
+ out = StringIO()
+ final_df.to_csv(out, index=False, header=False, quoting=1, escapechar='\\')
+ return out.getvalue().encode('utf-8')
+
+ def createExcelContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes:
+ import pandas as pd
+ from io import BytesIO
+ timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
+ if existing_headers is None:
+ existing_headers = {"header1": "Header 1", "header2": "Header 2"}
+ if not data:
+ cols = list(self.TASK_SYNC_DEFINITION.keys())
+ df = pd.DataFrame(columns=cols)
+ else:
+ df = pd.DataFrame(data)
+ for column in df.columns:
+ df[column] = df[column].astype("object").fillna("")
+ df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False)
+ import csv as csv_module
+ header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), [])
+ header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), [])
+ if len(header2_row) > 1:
+ header2_row[1] = timestamp
+ header_row1 = pd.DataFrame([header1_row + [""] * (len(df.columns) - len(header1_row))], columns=df.columns)
+ header_row2 = pd.DataFrame([header2_row + [""] * (len(df.columns) - len(header2_row))], columns=df.columns)
+ table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns)
+ final_df = pd.concat([header_row1, header_row2, table_headers, df], ignore_index=True)
+ buf = BytesIO()
+ final_df.to_excel(buf, index=False, header=False, engine='openpyxl')
+ return buf.getvalue()
+
+ def parseExcelContent(self, excel_content: bytes) -> tuple[list[dict], dict]:
+ import pandas as pd
+ from io import BytesIO
+ df = pd.read_excel(BytesIO(excel_content), engine='openpyxl', header=None)
+ header_row1 = df.iloc[0:1].copy()
+ header_row2 = df.iloc[1:2].copy()
+ table_headers = df.iloc[2:3].copy()
+ df_data = df.iloc[3:].copy()
+ df_data.columns = table_headers.iloc[0]
+ df_data = df_data.reset_index(drop=True)
+ for column in df_data.columns:
+ df_data[column] = df_data[column].astype('object').fillna('')
+ data = df_data.to_dict(orient='records')
+ headers = {
+ "header1": ",".join([str(x) if pd.notna(x) else "" for x in header_row1.iloc[0].tolist()]),
+ "header2": ",".join([str(x) if pd.notna(x) else "" for x in header_row2.iloc[0].tolist()]),
+ }
+ return data, headers
+ def convertAdfToText(self, adf_data):
+ """Convert Atlassian Document Format (ADF) to plain text.
+
+ Based on Atlassian Document Format specification for JIRA fields.
+ Handles paragraphs, lists, text formatting, and other ADF node types.
+
+ Args:
+ adf_data: ADF object or None
+
+ Returns:
+ str: Plain text content, or empty string if None/invalid
+ """
+ if not adf_data or not isinstance(adf_data, dict):
+ return ""
+
+ if adf_data.get("type") != "doc":
+ return str(adf_data) if adf_data else ""
+
+ content = adf_data.get("content", [])
+ if not isinstance(content, list):
+ return ""
+
+ def extract_text_from_content(content_list, list_level=0):
+ """Recursively extract text from ADF content with proper formatting."""
+ text_parts = []
+ list_counter = 1
+
+ for item in content_list:
+ if not isinstance(item, dict):
+ continue
+
+ item_type = item.get("type", "")
+
+ if item_type == "text":
+ # Extract text content, preserving formatting
+ text = item.get("text", "")
+ marks = item.get("marks", [])
+
+ # Handle text formatting (bold, italic, etc.)
+ if marks:
+ for mark in marks:
+ if mark.get("type") == "strong":
+ text = f"**{text}**"
+ elif mark.get("type") == "em":
+ text = f"*{text}*"
+ elif mark.get("type") == "code":
+ text = f"`{text}`"
+ elif mark.get("type") == "link":
+ attrs = mark.get("attrs", {})
+ href = attrs.get("href", "")
+ if href:
+ text = f"[{text}]({href})"
+
+ text_parts.append(text)
+
+ elif item_type == "hardBreak":
+ text_parts.append("\n")
+
+ elif item_type == "paragraph":
+ paragraph_content = item.get("content", [])
+ if paragraph_content:
+ paragraph_text = extract_text_from_content(paragraph_content, list_level)
+ if paragraph_text.strip():
+ text_parts.append(paragraph_text)
+
+ elif item_type == "bulletList":
+ list_content = item.get("content", [])
+ for list_item in list_content:
+ if list_item.get("type") == "listItem":
+ list_item_content = list_item.get("content", [])
+ for list_paragraph in list_item_content:
+ if list_paragraph.get("type") == "paragraph":
+ list_paragraph_content = list_paragraph.get("content", [])
+ if list_paragraph_content:
+ indent = " " * list_level
+ bullet_text = extract_text_from_content(list_paragraph_content, list_level + 1)
+ if bullet_text.strip():
+ text_parts.append(f"{indent}• {bullet_text}")
+
+ elif item_type == "orderedList":
+ list_content = item.get("content", [])
+ for list_item in list_content:
+ if list_item.get("type") == "listItem":
+ list_item_content = list_item.get("content", [])
+ for list_paragraph in list_item_content:
+ if list_paragraph.get("type") == "paragraph":
+ list_paragraph_content = list_paragraph.get("content", [])
+ if list_paragraph_content:
+ indent = " " * list_level
+ ordered_text = extract_text_from_content(list_paragraph_content, list_level + 1)
+ if ordered_text.strip():
+ text_parts.append(f"{indent}{list_counter}. {ordered_text}")
+ list_counter += 1
+
+ elif item_type == "listItem":
+ # Handle nested list items
+ list_item_content = item.get("content", [])
+ if list_item_content:
+ text_parts.append(extract_text_from_content(list_item_content, list_level))
+
+ elif item_type == "embedCard":
+ # Handle embedded content (videos, etc.)
+ attrs = item.get("attrs", {})
+ url = attrs.get("url", "")
+ if url:
+ text_parts.append(f"[Embedded Content: {url}]")
+
+ elif item_type == "codeBlock":
+ # Handle code blocks
+ code_content = item.get("content", [])
+ if code_content:
+ code_text = extract_text_from_content(code_content, list_level)
+ if code_text.strip():
+ text_parts.append(f"```\n{code_text}\n```")
+
+ elif item_type == "blockquote":
+ # Handle blockquotes
+ quote_content = item.get("content", [])
+ if quote_content:
+ quote_text = extract_text_from_content(quote_content, list_level)
+ if quote_text.strip():
+ text_parts.append(f"> {quote_text}")
+
+ elif item_type == "heading":
+ # Handle headings
+ heading_content = item.get("content", [])
+ if heading_content:
+ heading_text = extract_text_from_content(heading_content, list_level)
+ if heading_text.strip():
+ level = item.get("attrs", {}).get("level", 1)
+ text_parts.append(f"{'#' * level} {heading_text}")
+
+ elif item_type == "rule":
+ # Handle horizontal rules
+ text_parts.append("---")
+
+ else:
+ # Handle unknown types by trying to extract content
+ if "content" in item:
+ content_text = extract_text_from_content(item.get("content", []), list_level)
+ if content_text.strip():
+ text_parts.append(content_text)
+
+ return "\n".join(text_parts)
+
+ result = extract_text_from_content(content)
+ return result.strip()
-
-# Utility: dump all Jira fields (name -> field id) to a text file
-async def dump_jira_fields_to_file(filepath: str = "delta_sync_fields.txt") -> bool:
- """Write all available JIRA fields for the configured project/issue type to a text file.
-
- The output format matches the legacy fields.txt, e.g.:
- 'Summary': ['get', ['fields', 'summary']]
-
- Args:
- filepath: Target text file path to write.
-
- Returns:
- True on success, False otherwise.
- """
+# Utility: dump all ticket fields (name -> field id) to a text file (generic)
+async def dumpTicketFieldsToFile(
+ *,
+ filepath: str = "ticket_sync_fields.txt",
+ connectorType: str = "Jira",
+ connectorParams: dict | None = None,
+ taskSyncDefinition: dict | None = None,
+ ) -> bool:
+ """Write available ticket fields (name -> field id) to a text file (generic)."""
try:
- # Initialize Jira connector with the hardcoded credentials/constants
- jira = await ConnectorTicketJira.create(
- jira_username=ManagerSyncDelta.JIRA_USERNAME,
- jira_api_token=ManagerSyncDelta.JIRA_API_TOKEN,
- jira_url=ManagerSyncDelta.JIRA_URL,
- project_code=ManagerSyncDelta.JIRA_PROJECT_CODE,
- issue_type=ManagerSyncDelta.JIRA_ISSUE_TYPE,
+ connectorParams = connectorParams or {}
+ taskSyncDefinition = taskSyncDefinition or ManagerSyncDelta.TASK_SYNC_DEFINITION
+ ticket_interface = await createTicketInterfaceByType(
+ taskSyncDefinition=taskSyncDefinition,
+ connectorType=connectorType,
+ connectorParams=connectorParams,
)
-
- attributes = await jira.read_attributes()
+ attributes = await ticket_interface.connector_ticket.read_attributes()
if not attributes:
- logger.warning("No JIRA attributes returned; nothing to write.")
+ logger.warning("No ticket attributes returned; nothing to write.")
return False
-
- # Ensure directory exists if a directory part is provided
dir_name = os.path.dirname(filepath)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
-
- # Write in the expected mapping format
with open(filepath, "w", encoding="utf-8") as f:
for attr in attributes:
- # attr.field_name (human name), attr.field (Jira field id)
f.write(f"'{attr.field_name}': ['get', ['fields', '{attr.field}']]\n")
-
- logger.info(f"Wrote {len(attributes)} JIRA fields to {filepath}")
+ logger.info(f"Wrote {len(attributes)} ticket fields to {filepath}")
return True
except Exception as e:
- logger.error(f"Failed to dump JIRA fields: {str(e)}")
+ logger.error(f"Failed to dump ticket fields: {str(e)}")
return False
-# Utility: dump actual JIRA data for debugging
-async def dump_jira_data_to_file(filepath: str = "delta_sync_data.txt") -> bool:
- """Write actual JIRA ticket data to a text file for debugging field mapping.
-
- Args:
- filepath: Target text file path to write.
-
- Returns:
- True on success, False otherwise.
- """
+# Utility: dump actual ticket data for debugging (generic)
+async def dumpTicketDataToFile(
+ *,
+ filepath: str = "ticket_sync_data.txt",
+ connectorType: str = "Jira",
+ connectorParams: dict | None = None,
+ taskSyncDefinition: dict | None = None,
+ sampleLimit: int = 5,
+ ) -> bool:
+ """Write actual ticket data to a text file for debugging field mapping (generic)."""
try:
- # Initialize Jira connector with the hardcoded credentials/constants
- jira = await ConnectorTicketJira.create(
- jira_username=ManagerSyncDelta.JIRA_USERNAME,
- jira_api_token=ManagerSyncDelta.JIRA_API_TOKEN,
- jira_url=ManagerSyncDelta.JIRA_URL,
- project_code=ManagerSyncDelta.JIRA_PROJECT_CODE,
- issue_type=ManagerSyncDelta.JIRA_ISSUE_TYPE,
+ connectorParams = connectorParams or {}
+ taskSyncDefinition = taskSyncDefinition or ManagerSyncDelta.TASK_SYNC_DEFINITION
+ ticket_interface = await createTicketInterfaceByType(
+ taskSyncDefinition=taskSyncDefinition,
+ connectorType=connectorType,
+ connectorParams=connectorParams,
)
-
- # Get a few sample tickets to see the actual data structure
- tickets = await jira.read_tasks(limit=5)
+ tickets = await ticket_interface.connector_ticket.read_tasks(limit=sampleLimit)
if not tickets:
- logger.warning("No JIRA tickets returned; nothing to write.")
+ logger.warning("No tickets returned; nothing to write.")
return False
-
- # Ensure directory exists if a directory part is provided
dir_name = os.path.dirname(filepath)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
-
- # Write the actual ticket data
with open(filepath, "w", encoding="utf-8") as f:
- f.write("=== JIRA TICKET DATA DEBUG ===\n\n")
+ f.write("=== TICKET DATA DEBUG ===\n\n")
for i, ticket in enumerate(tickets):
f.write(f"--- TICKET {i+1} ---\n")
- f.write(f"Raw ticket data:\n")
+ f.write("Raw ticket data:\n")
f.write(f"{ticket.data}\n\n")
-
- # Also show the specific fields we're trying to map
f.write("Field mapping analysis:\n")
- for field_name, field_path in ManagerSyncDelta.TASK_SYNC_DEFINITION.items():
- if field_path[0] == 'get': # Only analyze 'get' fields
+ for fieldName, fieldPath in taskSyncDefinition.items():
+ if fieldPath[0] == 'get':
try:
- # Navigate through the field path
value = ticket.data
- for key in field_path[1]:
+ for key in fieldPath[1]:
if isinstance(value, dict) and key in value:
value = value[key]
else:
value = f"KEY_NOT_FOUND: {key}"
break
-
- # Convert ADF fields to text
- if field_name in ['Description', 'References', 'DELTA Comments', 'SELISE Comments']:
- if isinstance(value, dict) and value.get("type") == "doc":
- value = convert_adf_to_text(value)
- elif value is None:
- value = ""
-
- f.write(f" {field_name}: {value}\n")
+ if isinstance(value, dict) and value.get("type") == "doc":
+ pass # value = self.convertAdfToText(value)
+ elif value is None:
+ value = ""
+ f.write(f" {fieldName}: {value}\n")
except Exception as e:
- f.write(f" {field_name}: ERROR - {str(e)}\n")
+ f.write(f" {fieldName}: ERROR - {str(e)}\n")
f.write("\n" + "="*50 + "\n\n")
-
- logger.info(f"Wrote JIRA data for {len(tickets)} tickets to {filepath}")
+ logger.info(f"Wrote ticket data for {len(tickets)} tickets to {filepath}")
return True
except Exception as e:
- logger.error(f"Failed to dump JIRA data: {str(e)}")
+ logger.error(f"Failed to dump ticket data: {str(e)}")
return False
-# Global sync function for use in app.py
-async def perform_sync_jira_delta_group() -> bool:
- """Perform JIRA to SharePoint synchronization for Delta Group.
+# Main part of the module
+async def performSync(eventUser=None) -> bool:
+ """Perform tickets to SharePoint synchronization
This function is called by the scheduler and can be used independently.
+ Args:
+ eventUser: Optional event user to use for synchronization
+
Returns:
bool: True if synchronization was successful, False otherwise
"""
try:
- if APP_ENV_TYPE != "prod" and APP_ENV_TYPE != "tst":
- logger.info("JIRA to SharePoint synchronization: TASK to run only in PROD")
- return True
-
- logger.info("Starting Delta Group JIRA sync...")
-
-
- sync_manager = ManagerSyncDelta()
- success = await sync_manager.sync_jira_to_sharepoint()
+ logger.info("Starting DG tickets sync...")
+
+ # Sync audit logging is handled by ManagerSyncDelta instance
+
+ syncManager = ManagerSyncDelta(eventUser)
+ success = await syncManager.syncTicketsOverSharepoint()
if success:
- logger.info("Delta Group JIRA sync completed successfully")
+ logger.info("DG tickets sync completed successfully")
else:
- logger.error("Delta Group JIRA sync failed")
+ logger.error("DG tickets sync failed")
return success
except Exception as e:
- logger.error(f"Error in perform_sync_jira_delta_group: {str(e)}")
+ logger.error(f"Error in performing DG tickets sync: {str(e)}")
return False
# Register scheduled job on import using the shared event manager
try:
from modules.shared.eventManagement import eventManager
- # Schedule sync every 20 minutes (at minutes 00, 20, 40)
- eventManager.register_cron(
- job_id="jira_delta_group_sync",
- func=perform_sync_jira_delta_group,
- cron_kwargs={"minute": "0,20,40"},
- replace_existing=True,
- coalesce=True,
- max_instances=1,
- misfire_grace_time=1800,
- )
- logger.info("Registered jira_delta_group_sync via EventManagement (every 20 minutes)")
+ # Register scheduler only in production
+ if APP_ENV_TYPE == "prod" or APP_ENV_TYPE == "dev":
+ # Schedule sync every 20 minutes (at minutes 00, 20, 40)
+ eventManager.register_cron(
+ job_id="dgsync",
+ func=performSync,
+ cron_kwargs={"minute": "0,20,40"},
+ replace_existing=True,
+ coalesce=True,
+ max_instances=1,
+ misfire_grace_time=1800,
+ )
+ logger.info("Registered DG ticket sync via EventManagement (every 20 minutes)")
+ else:
+ logger.info(f"Skipping DG scheduler registration for ticket sync in env: {APP_ENV_TYPE}")
except Exception as e:
- logger.error(f"Failed to register jira_delta_group_sync: {str(e)}")
\ No newline at end of file
+ logger.error(f"Failed to register DG ticket sync: {str(e)}")
diff --git a/modules/interfaces/interfaceAppModel.py b/modules/interfaces/interfaceAppModel.py
index e8ca0caa..8d58325a 100644
--- a/modules/interfaces/interfaceAppModel.py
+++ b/modules/interfaces/interfaceAppModel.py
@@ -564,20 +564,3 @@ register_model_labels(
"patternType": {"en": "Pattern Type", "fr": "Type de modèle"}
}
)
-
-class SystemTable(BaseModel, ModelMixin):
- """Data model for system table entries"""
- table_name: str = Field(
- description="Name of the table",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=True
- )
- initial_id: Optional[str] = Field(
- default=None,
- description="Initial ID for the table",
- frontend_type="text",
- frontend_readonly=True,
- frontend_required=False
- )
-
\ No newline at end of file
diff --git a/modules/interfaces/interfaceAppObjects.py b/modules/interfaces/interfaceAppObjects.py
index 069556cf..9320b102 100644
--- a/modules/interfaces/interfaceAppObjects.py
+++ b/modules/interfaces/interfaceAppObjects.py
@@ -121,6 +121,7 @@ class AppObjects:
"""Initialize standard records if they don't exist."""
self._initRootMandate()
self._initAdminUser()
+ self._initEventUser()
def _initRootMandate(self):
"""Creates the Root mandate if it doesn't exist."""
@@ -154,7 +155,7 @@ class AppObjects:
language="en",
privilege=UserPrivilege.SYSADMIN,
authenticationAuthority="local", # Using lowercase value directly
- hashedPassword=self._getPasswordHash("The 1st Poweron Admin"), # Use a secure password in production!
+ hashedPassword=self._getPasswordHash(APP_CONFIG.get("APP_INIT_PASS_ADMIN_SECRET")),
connections=[]
)
createdUser = self.db.recordCreate(UserInDB, adminUser)
@@ -164,6 +165,27 @@ class AppObjects:
self.currentUser = createdUser
self.userId = createdUser.get("id")
+ def _initEventUser(self):
+ """Creates the Event user if it doesn't exist."""
+ # Check if event user already exists
+ existingUsers = self.db.getRecordset(UserInDB, recordFilter={"username": "event"})
+ if not existingUsers:
+ logger.info("Creating Event user")
+ eventUser = UserInDB(
+ mandateId=self.getInitialId(Mandate),
+ username="event",
+ email="event@example.com",
+ fullName="Event",
+ enabled=True,
+ language="en",
+ privilege=UserPrivilege.SYSADMIN,
+ authenticationAuthority="local", # Using lowercase value directly
+ hashedPassword=self._getPasswordHash(APP_CONFIG.get("APP_INIT_PASS_EVENT_SECRET")),
+ connections=[]
+ )
+ createdUser = self.db.recordCreate(UserInDB, eventUser)
+ logger.info(f"Event user created with ID {createdUser['id']}")
+
def _uam(self, model_class: type, recordset: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Unified user access management function that filters data based on user privileges
@@ -1075,34 +1097,6 @@ def getInterface(currentUser: User) -> AppObjects:
return _gatewayInterfaces[contextKey]
-def getRootUser() -> User:
- """
- Returns the root user from the database.
- This is the user with the initial ID in the users table.
- """
- try:
- # Create a temporary interface without user context
- tempInterface = AppObjects()
-
- # Get the initial user directly
- initialUserId = tempInterface.getInitialId(UserInDB)
- if not initialUserId:
- raise ValueError("No initial user ID found in database")
-
- users = tempInterface.db.getRecordset(UserInDB, recordFilter={"id": initialUserId})
- if not users:
- raise ValueError("Initial user not found in database")
-
-
- # Convert to User model and return the model instance
- user_data = users[0]
-
- return User.parse_obj(user_data)
-
- except Exception as e:
- logger.error(f"Error getting root user: {str(e)}")
- raise ValueError(f"Failed to get root user: {str(e)}")
-
def getRootInterface() -> AppObjects:
"""
Returns a AppObjects instance with root privileges.
@@ -1111,7 +1105,28 @@ def getRootInterface() -> AppObjects:
global _rootAppObjects
if _rootAppObjects is None:
- rootUser = getRootUser()
- _rootAppObjects = AppObjects(rootUser)
+ try:
+ # Create a temporary interface without user context to get root user
+ tempInterface = AppObjects()
+
+ # Get the initial user directly
+ initialUserId = tempInterface.getInitialId(UserInDB)
+ if not initialUserId:
+ raise ValueError("No initial user ID found in database")
+
+ users = tempInterface.db.getRecordset(UserInDB, recordFilter={"id": initialUserId})
+ if not users:
+ raise ValueError("Initial user not found in database")
+
+ # Convert to User model
+ user_data = users[0]
+ rootUser = User.parse_obj(user_data)
+
+ # Create root interface with the root user
+ _rootAppObjects = AppObjects(rootUser)
+
+ except Exception as e:
+ logger.error(f"Error getting root user: {str(e)}")
+ raise ValueError(f"Failed to get root user: {str(e)}")
return _rootAppObjects
diff --git a/modules/interfaces/interfaceTicketModel.py b/modules/interfaces/interfaceTicketModel.py
index 98329a7b..a7cff7ad 100644
--- a/modules/interfaces/interfaceTicketModel.py
+++ b/modules/interfaces/interfaceTicketModel.py
@@ -6,8 +6,8 @@ from abc import ABC, abstractmethod
class TicketFieldAttribute(BaseModel):
- field_name: str = Field(description="Human-readable field name")
- field: str = Field(description="JIRA field ID/key")
+ fieldName: str = Field(description="Human-readable field name")
+ field: str = Field(description="Ticket field ID/key")
class Task(BaseModel):
diff --git a/modules/interfaces/interfaceTicketObjects.py b/modules/interfaces/interfaceTicketObjects.py
index 28dd3c3d..727aa6bc 100644
--- a/modules/interfaces/interfaceTicketObjects.py
+++ b/modules/interfaces/interfaceTicketObjects.py
@@ -1,705 +1,64 @@
-from dataclasses import dataclass
-from io import BytesIO, StringIO
from typing import Any, Optional
from datetime import datetime, timezone
-import pandas as pd
-import openpyxl
-from modules.shared.timezoneUtils import get_utc_now
-
-from modules.services.serviceSharepoint.mainSharepoint import SharepointService
from modules.interfaces.interfaceTicketModel import TicketBase, Task
-
-@dataclass(slots=True)
-class TicketSharepointSyncInterface:
- connector_ticket: TicketBase
- connector_sharepoint: SharepointService
- task_sync_definition: dict
- sync_folder: str
- sync_file: str
- backup_folder: str
- audit_folder: str
- site_id: str # Keep for compatibility but not used with REST API
-
- @classmethod
- async def create(
- cls,
- connector_ticket: TicketBase,
- connector_sharepoint: SharepointService,
- task_sync_definition: dict,
- sync_folder: str,
- sync_file: str,
- backup_folder: str,
- audit_folder: str,
- site_id: str,
- ) -> "TicketSharepointSyncInterface":
- return cls(
- connector_ticket=connector_ticket,
- connector_sharepoint=connector_sharepoint,
- task_sync_definition=task_sync_definition,
- sync_folder=sync_folder,
- sync_file=sync_file,
- backup_folder=backup_folder,
- audit_folder=audit_folder,
- site_id=site_id,
- )
-
- async def create_backup(self):
- """Creates a backup of the current sync file in the backup folder."""
- timestamp = get_utc_now().strftime("%Y%m%d_%H%M%S")
- backup_filename = f"backup_{timestamp}_{self.sync_file}"
-
- try:
- await self.connector_sharepoint.copy_file_async(
- site_id=self.site_id,
- source_folder=self.sync_folder,
- source_file=self.sync_file,
- dest_folder=self.backup_folder,
- dest_file=backup_filename,
- )
- except Exception as e:
- # If the source file doesn't exist (404 error), that's okay for first-time sync
- if "itemNotFound" in str(e) or "404" in str(e) or "could not be found" in str(e):
- raise Exception(f"Source file does not exist - no backup needed: {self.sync_file}")
- else:
- # Re-raise other errors
- raise
-
- async def sync_from_jira_to_csv(self):
- """Syncs tasks from JIRA to a CSV file in SharePoint."""
- start_time = get_utc_now()
- audit_log = []
-
- audit_log.append("=== JIRA TO CSV SYNC STARTED ===")
- audit_log.append(f"Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Sync File: {self.sync_file}")
- audit_log.append(f"Sync Folder: {self.sync_folder}")
- audit_log.append("")
-
- try:
- # 1. Read JIRA tickets
- audit_log.append("Step 1: Reading JIRA tickets...")
- tickets = await self.connector_ticket.read_tasks(limit=0)
- audit_log.append(f"JIRA issues read: {len(tickets)}")
- audit_log.append("")
-
- # 2. Transform tasks according to task_sync_definition
- audit_log.append("Step 2: Transforming JIRA data...")
- transformed_tasks = self._transform_tasks(tickets, include_put=True)
- jira_data = [task.data for task in transformed_tasks]
- before_count = len(jira_data)
- # Remove records without an ID to avoid blank rows
- jira_data = self._filter_empty_records(jira_data)
- after_count = len(jira_data)
- audit_log.append(f"JIRA issues transformed: {before_count}")
- audit_log.append(f"JIRA issues after ID filter: {after_count}")
- # Log a sample of IDs to diagnose empty export issues
- try:
- sample_ids = [str(row.get("ID")) for row in jira_data[:5]]
- audit_log.append(f"Sample IDs: {', '.join(sample_ids)}")
- except Exception:
- pass
- audit_log.append("")
-
- # 3. Create JIRA export file in audit folder
- audit_log.append("Step 3: Creating JIRA export file...")
- try:
- timestamp = get_utc_now().strftime("%Y%m%d_%H%M%S")
- jira_export_filename = f"jira_export_{timestamp}.csv"
- # Use default headers for JIRA export
- jira_export_content = self._create_csv_content(jira_data, {"header1": "JIRA Export", "header2": "Raw Data"})
- await self.connector_sharepoint.upload_file(
- site_id=self.site_id,
- folder_path=self.audit_folder,
- file_name=jira_export_filename,
- content=jira_export_content,
- )
- audit_log.append(f"JIRA export file created: {jira_export_filename}")
- except Exception as e:
- audit_log.append(f"Failed to create JIRA export file: {str(e)}")
- audit_log.append("")
-
- # 4. Create backup of existing sync file (if it exists)
- audit_log.append("Step 4: Creating backup...")
- backup_created = False
- try:
- await self.create_backup()
- backup_created = True
- audit_log.append("Backup created successfully")
- except Exception as e:
- audit_log.append(
- f"Backup creation failed (file might not exist): {str(e)}"
- )
- audit_log.append("")
-
- # 5. Try to read existing CSV file from SharePoint
- audit_log.append("Step 5: Reading existing CSV file...")
- existing_data = []
- existing_file_found = False
- existing_headers = {"header1": "", "header2": ""}
- try:
- file_path = f"{self.sync_folder}/{self.sync_file}"
- csv_content = await self.connector_sharepoint.download_file_by_path(
- site_id=self.site_id, file_path=file_path
- )
-
- # Read the first two lines to get headers
- csv_lines = csv_content.decode('utf-8').split('\n')
- if len(csv_lines) >= 2:
- # Store the raw first two lines as headers (preserving original formatting)
- existing_headers["header1"] = csv_lines[0].rstrip('\r\n')
- existing_headers["header2"] = csv_lines[1].rstrip('\r\n')
-
- # Try to read with robust CSV parsing (skip first 2 rows)
- df_existing = pd.read_csv(
- BytesIO(csv_content),
- skiprows=2,
- quoting=1, # QUOTE_ALL
- escapechar='\\',
- on_bad_lines='skip', # Skip malformed lines
- engine='python' # More robust parsing
- )
- existing_data = df_existing.to_dict("records")
- existing_file_found = True
- audit_log.append(
- f"Existing CSV file found with {len(existing_data)} records"
- )
- audit_log.append(f"Preserved headers: Header1='{existing_headers['header1']}', Header2='{existing_headers['header2']}'")
- except Exception as e:
- audit_log.append(f"No existing CSV file found or read error: {str(e)}")
- audit_log.append("")
-
- # 6. Merge JIRA data with existing data and track changes
- audit_log.append("Step 6: Merging JIRA data with existing data...")
- merged_data, change_details = self._merge_jira_with_existing_detailed(
- jira_data, existing_data
- )
-
- # Log detailed changes
- audit_log.append(f"Total records after merge: {len(merged_data)}")
- audit_log.append(f"Records updated: {change_details['updated']}")
- audit_log.append(f"Records added: {change_details['added']}")
- audit_log.append(f"Records unchanged: {change_details['unchanged']}")
- audit_log.append("")
-
- # Log individual changes
- if change_details["changes"]:
- audit_log.append("DETAILED CHANGES:")
- for change in change_details["changes"]:
- audit_log.append(f"- {change}")
- audit_log.append("")
-
- # 7. Create CSV with 4-row structure and write to SharePoint
- audit_log.append("Step 7: Writing updated CSV to SharePoint...")
- csv_content = self._create_csv_content(merged_data, existing_headers)
- await self.connector_sharepoint.upload_file(
- site_id=self.site_id,
- folder_path=self.sync_folder,
- file_name=self.sync_file,
- content=csv_content,
- )
- audit_log.append("CSV file successfully written to SharePoint")
- audit_log.append("")
-
- # Success summary
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("=== SYNC COMPLETED SUCCESSFULLY ===")
- audit_log.append(f"End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration: {duration:.2f} seconds")
- audit_log.append(f"Total JIRA issues processed: {len(jira_data)}")
- audit_log.append(f"Total records in final CSV: {len(merged_data)}")
-
- except Exception as e:
- # Error handling
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("")
- audit_log.append("=== SYNC FAILED ===")
- audit_log.append(f"Error Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration before failure: {duration:.2f} seconds")
- audit_log.append(f"Error: {str(e)}")
- raise
- finally:
- # Write audit log to SharePoint
- await self._write_audit_log(audit_log, "jira_to_csv")
-
- async def sync_from_csv_to_jira(self):
- """Syncs tasks from a CSV file in SharePoint to JIRA."""
- start_time = get_utc_now()
- audit_log = []
-
- audit_log.append("=== CSV TO JIRA SYNC STARTED ===")
- audit_log.append(f"Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Sync File: {self.sync_file}")
- audit_log.append(f"Sync Folder: {self.sync_folder}")
- audit_log.append("")
-
- try:
- # 1. Read CSV file from SharePoint
- audit_log.append("Step 1: Reading CSV file from SharePoint...")
- try:
- file_path = f"{self.sync_folder}/{self.sync_file}"
- csv_content = await self.connector_sharepoint.download_file_by_path(
- site_id=self.site_id, file_path=file_path
- )
- # Try to read with robust CSV parsing
- df = pd.read_csv(
- BytesIO(csv_content),
- skiprows=2,
- quoting=1, # QUOTE_ALL
- escapechar='\\',
- on_bad_lines='skip', # Skip malformed lines
- engine='python' # More robust parsing
- )
- csv_data = df.to_dict("records")
- audit_log.append(
- f"CSV file read successfully with {len(csv_data)} records"
- )
- except Exception as e:
- audit_log.append(f"Failed to read CSV file: {str(e)}")
- audit_log.append("CSV to JIRA sync aborted - no file to process")
- return
- audit_log.append("")
-
- # 2. Read current JIRA data for comparison
- audit_log.append("Step 2: Reading current JIRA data for comparison...")
- try:
- current_jira_tasks = await self.connector_ticket.read_tasks(limit=0)
- current_jira_data = self._transform_tasks(
- current_jira_tasks, include_put=True
- )
- jira_lookup = {
- task.data.get("ID"): task.data for task in current_jira_data
- }
- audit_log.append(f"Current JIRA data read: {len(jira_lookup)} tasks")
- except Exception as e:
- audit_log.append(f"Failed to read current JIRA data: {str(e)}")
- raise
- audit_log.append("")
-
- # 3. Detect actual changes in "put" fields
- audit_log.append("Step 3: Detecting changes in 'put' fields...")
- actual_changes = {}
- records_with_changes = 0
- total_changes = 0
-
- for row in csv_data:
- task_id = row.get("ID")
- if not task_id or task_id not in jira_lookup:
- continue
-
- current_jira_task = jira_lookup[task_id]
- task_changes = {}
-
- for field_name, field_config in self.task_sync_definition.items():
- if field_config[0] == "put": # Only process "put" fields
- csv_value = row.get(field_name, "")
- jira_value = current_jira_task.get(field_name, "")
-
- # Convert None to empty string for comparison
- csv_value = "" if csv_value is None else str(csv_value).strip()
- jira_value = (
- "" if jira_value is None else str(jira_value).strip()
- )
-
- # Include if values are different (allow empty strings to clear fields like the reference does)
- if csv_value != jira_value:
- task_changes[field_name] = csv_value
-
- if task_changes:
- actual_changes[task_id] = task_changes
- records_with_changes += 1
- total_changes += len(task_changes)
-
- audit_log.append(f"Records with actual changes: {records_with_changes}")
- audit_log.append(f"Total field changes detected: {total_changes}")
- audit_log.append("")
-
- # Log detailed changes
- if actual_changes:
- audit_log.append("DETAILED CHANGES TO APPLY TO JIRA:")
- for task_id, changes in actual_changes.items():
- change_list = [
- f"{field}: '{value}'" for field, value in changes.items()
- ]
- audit_log.append(f"- Task ID {task_id}: {', '.join(change_list)}")
- audit_log.append("")
-
- # 4. Update JIRA tasks with actual changes
- if actual_changes:
- audit_log.append("Step 4: Updating JIRA tasks...")
-
- # Convert to Task objects for the connector
- tasks_to_update = []
- for task_id, changes in actual_changes.items():
- # Create task data structure expected by JIRA connector
- # Build the nested fields structure that JIRA expects
- fields = {}
- for field_name, new_value in changes.items():
- # Map back to JIRA field structure using task_sync_definition
- field_config = self.task_sync_definition[field_name]
- field_path = field_config[1]
-
- # Extract the JIRA field ID from the path
- # For "put" fields, the path is like ['fields', 'customfield_10067']
- if len(field_path) >= 2 and field_path[0] == "fields":
- jira_field_id = field_path[1]
- # Parse date fields back to JIRA format
- if self._is_date_field(field_name) and new_value:
- parsed_date = self._parse_date_from_excel(str(new_value))
- if parsed_date:
- fields[jira_field_id] = parsed_date
- else:
- fields[jira_field_id] = new_value
- else:
- fields[jira_field_id] = new_value
-
- if fields:
- task_data = {"ID": task_id, "fields": fields}
- task = Task(data=task_data)
- tasks_to_update.append(task)
-
- # Write tasks back to JIRA
- try:
- await self.connector_ticket.write_tasks(tasks_to_update)
- audit_log.append(
- f"Successfully updated {len(tasks_to_update)} JIRA tasks"
- )
- except Exception as e:
- audit_log.append(f"Failed to update JIRA tasks: {str(e)}")
- raise
- else:
- audit_log.append("Step 4: No changes to apply to JIRA")
- audit_log.append("")
-
- # Success summary
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("=== SYNC COMPLETED SUCCESSFULLY ===")
- audit_log.append(f"End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration: {duration:.2f} seconds")
- audit_log.append(f"Total CSV records processed: {len(csv_data)}")
- audit_log.append(f"Records with actual changes: {records_with_changes}")
- audit_log.append(f"JIRA tasks updated: {len(actual_changes)}")
-
- except Exception as e:
- # Error handling
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("")
- audit_log.append("=== SYNC FAILED ===")
- audit_log.append(f"Error Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration before failure: {duration:.2f} seconds")
- audit_log.append(f"Error: {str(e)}")
- raise
- finally:
- # Write audit log to SharePoint
- await self._write_audit_log(audit_log, "csv_to_jira")
-
- async def sync_from_jira_to_excel(self):
- """Syncs tasks from JIRA to an Excel file in SharePoint."""
- start_time = get_utc_now()
- audit_log = []
-
- audit_log.append("=== JIRA TO EXCEL SYNC STARTED ===")
- audit_log.append(f"Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Sync File: {self.sync_file}")
- audit_log.append(f"Sync Folder: {self.sync_folder}")
- audit_log.append("")
-
- try:
- # 1. Read JIRA tickets
- audit_log.append("Step 1: Reading JIRA tickets...")
- tickets = await self.connector_ticket.read_tasks(limit=0)
- audit_log.append(f"JIRA issues read: {len(tickets)}")
- audit_log.append("")
-
- # 2. Transform tasks according to task_sync_definition
- audit_log.append("Step 2: Transforming JIRA data...")
- transformed_tasks = self._transform_tasks(tickets, include_put=True)
- jira_data = [task.data for task in transformed_tasks]
- audit_log.append(f"JIRA issues transformed: {len(jira_data)}")
- audit_log.append("")
-
- # 3. Create JIRA export file in audit folder
- audit_log.append("Step 3: Creating JIRA export file...")
- try:
- timestamp = get_utc_now().strftime("%Y%m%d_%H%M%S")
- jira_export_filename = f"jira_export_{timestamp}.xlsx"
- # Use default headers for JIRA export
- jira_export_content = self._create_excel_content(jira_data, {"header1": "JIRA Export", "header2": "Raw Data"})
- await self.connector_sharepoint.upload_file(
- site_id=self.site_id,
- folder_path=self.audit_folder,
- file_name=jira_export_filename,
- content=jira_export_content,
- )
- audit_log.append(f"JIRA export file created: {jira_export_filename}")
- except Exception as e:
- audit_log.append(f"Failed to create JIRA export file: {str(e)}")
- audit_log.append("")
-
- # 4. Create backup of existing Excel file (if it exists)
- audit_log.append("Step 4: Creating backup...")
- backup_created = False
- try:
- await self.create_backup()
- backup_created = True
- audit_log.append("Backup created successfully")
- except Exception as e:
- audit_log.append(
- f"Backup creation failed (file might not exist): {str(e)}"
- )
- audit_log.append("")
-
- # 5. Try to read existing Excel file from SharePoint
- audit_log.append("Step 5: Reading existing Excel file...")
- existing_data = []
- existing_file_found = False
- existing_headers = {"header1": "Header 1", "header2": "Header 2"}
- try:
- file_path = f"{self.sync_folder}/{self.sync_file}"
- excel_content = await self.connector_sharepoint.download_file_by_path(
- site_id=self.site_id, file_path=file_path
- )
-
- # Parse Excel file with 4-row structure
- existing_data, existing_headers = self._parse_excel_content(excel_content)
- existing_file_found = True
- audit_log.append(
- f"Existing Excel file found with {len(existing_data)} records"
- )
- audit_log.append(f"Preserved headers: Header1='{existing_headers['header1']}', Header2='{existing_headers['header2']}'")
- except Exception as e:
- audit_log.append(f"No existing Excel file found or read error: {str(e)}")
- audit_log.append("")
-
- # 6. Merge JIRA data with existing data and track changes
- audit_log.append("Step 6: Merging JIRA data with existing data...")
- merged_data, change_details = self._merge_jira_with_existing_detailed(
- jira_data, existing_data
- )
-
- # Log detailed changes
- audit_log.append(f"Total records after merge: {len(merged_data)}")
- audit_log.append(f"Records updated: {change_details['updated']}")
- audit_log.append(f"Records added: {change_details['added']}")
- audit_log.append(f"Records unchanged: {change_details['unchanged']}")
- audit_log.append("")
-
- # Log individual changes
- if change_details["changes"]:
- audit_log.append("DETAILED CHANGES:")
- for change in change_details["changes"]:
- audit_log.append(f"- {change}")
- audit_log.append("")
-
- # 7. Create Excel with 4-row structure and write to SharePoint
- audit_log.append("Step 7: Writing updated Excel to SharePoint...")
- # Ensure no records without ID are written
- merged_data = self._filter_empty_records(merged_data)
- excel_content = self._create_excel_content(merged_data, existing_headers)
- await self.connector_sharepoint.upload_file(
- site_id=self.site_id,
- folder_path=self.sync_folder,
- file_name=self.sync_file,
- content=excel_content,
- )
- audit_log.append("Excel file successfully written to SharePoint")
- audit_log.append("")
-
- # Success summary
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("=== SYNC COMPLETED SUCCESSFULLY ===")
- audit_log.append(f"End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration: {duration:.2f} seconds")
- audit_log.append(f"Total JIRA issues processed: {len(jira_data)}")
- audit_log.append(f"Total records in final Excel: {len(merged_data)}")
-
- except Exception as e:
- # Error handling
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("")
- audit_log.append("=== SYNC FAILED ===")
- audit_log.append(f"Error Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration before failure: {duration:.2f} seconds")
- audit_log.append(f"Error: {str(e)}")
- raise
- finally:
- # Write audit log to SharePoint
- await self._write_audit_log(audit_log, "jira_to_excel")
-
- async def sync_from_excel_to_jira(self):
- """Syncs tasks from an Excel file in SharePoint to JIRA."""
- start_time = get_utc_now()
- audit_log = []
-
- audit_log.append("=== EXCEL TO JIRA SYNC STARTED ===")
- audit_log.append(f"Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Sync File: {self.sync_file}")
- audit_log.append(f"Sync Folder: {self.sync_folder}")
- audit_log.append("")
-
- try:
- # 1. Read Excel file from SharePoint
- audit_log.append("Step 1: Reading Excel file from SharePoint...")
- try:
- file_path = f"{self.sync_folder}/{self.sync_file}"
- excel_content = await self.connector_sharepoint.download_file_by_path(
- site_id=self.site_id, file_path=file_path
- )
- # Parse Excel file with 4-row structure
- excel_data, _ = self._parse_excel_content(excel_content)
- audit_log.append(
- f"Excel file read successfully with {len(excel_data)} records"
- )
- except Exception as e:
- audit_log.append(f"Failed to read Excel file: {str(e)}")
- audit_log.append("Excel to JIRA sync aborted - no file to process")
- return
- audit_log.append("")
-
- # 2. Read current JIRA data for comparison
- audit_log.append("Step 2: Reading current JIRA data for comparison...")
- try:
- current_jira_tasks = await self.connector_ticket.read_tasks(limit=0)
- current_jira_data = self._transform_tasks(
- current_jira_tasks, include_put=True
- )
- jira_lookup = {
- task.data.get("ID"): task.data for task in current_jira_data
- }
- audit_log.append(f"Current JIRA data read: {len(jira_lookup)} tasks")
- except Exception as e:
- audit_log.append(f"Failed to read current JIRA data: {str(e)}")
- raise
- audit_log.append("")
-
- # 3. Detect actual changes in "put" fields
- audit_log.append("Step 3: Detecting changes in 'put' fields...")
- actual_changes = {}
- records_with_changes = 0
- total_changes = 0
-
- for row in excel_data:
- task_id = row.get("ID")
- if not task_id or task_id not in jira_lookup:
- continue
-
- current_jira_task = jira_lookup[task_id]
- task_changes = {}
-
- for field_name, field_config in self.task_sync_definition.items():
- if field_config[0] == "put": # Only process "put" fields
- excel_value = row.get(field_name, "")
- jira_value = current_jira_task.get(field_name, "")
-
- # Convert None to empty string for comparison
- excel_value = "" if excel_value is None else str(excel_value).strip()
- jira_value = (
- "" if jira_value is None else str(jira_value).strip()
- )
-
- # Include if values are different (allow empty strings to clear fields like the reference does)
- if excel_value != jira_value:
- task_changes[field_name] = excel_value
-
- if task_changes:
- actual_changes[task_id] = task_changes
- records_with_changes += 1
- total_changes += len(task_changes)
-
- audit_log.append(f"Records with actual changes: {records_with_changes}")
- audit_log.append(f"Total field changes detected: {total_changes}")
- audit_log.append("")
-
- # Log detailed changes
- if actual_changes:
- audit_log.append("DETAILED CHANGES TO APPLY TO JIRA:")
- for task_id, changes in actual_changes.items():
- change_list = [
- f"{field}: '{value}'" for field, value in changes.items()
- ]
- audit_log.append(f"- Task ID {task_id}: {', '.join(change_list)}")
- audit_log.append("")
-
- # 4. Update JIRA tasks with actual changes
- if actual_changes:
- audit_log.append("Step 4: Updating JIRA tasks...")
-
- # Convert to Task objects for the connector
- tasks_to_update = []
- for task_id, changes in actual_changes.items():
- # Create task data structure expected by JIRA connector
- # Build the nested fields structure that JIRA expects
- fields = {}
- for field_name, new_value in changes.items():
- # Map back to JIRA field structure using task_sync_definition
- field_config = self.task_sync_definition[field_name]
- field_path = field_config[1]
-
- # Extract the JIRA field ID from the path
- # For "put" fields, the path is like ['fields', 'customfield_10067']
- if len(field_path) >= 2 and field_path[0] == "fields":
- jira_field_id = field_path[1]
- # Parse date fields back to JIRA format
- if self._is_date_field(field_name) and new_value:
- parsed_date = self._parse_date_from_excel(str(new_value))
- if parsed_date:
- fields[jira_field_id] = parsed_date
- else:
- fields[jira_field_id] = new_value
- else:
- fields[jira_field_id] = new_value
-
- if fields:
- task_data = {"ID": task_id, "fields": fields}
- task = Task(data=task_data)
- tasks_to_update.append(task)
-
- # Write tasks back to JIRA
- try:
- await self.connector_ticket.write_tasks(tasks_to_update)
- audit_log.append(
- f"Successfully updated {len(tasks_to_update)} JIRA tasks"
- )
- except Exception as e:
- audit_log.append(f"Failed to update JIRA tasks: {str(e)}")
- raise
- else:
- audit_log.append("Step 4: No changes to apply to JIRA")
- audit_log.append("")
-
- # Success summary
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("=== SYNC COMPLETED SUCCESSFULLY ===")
- audit_log.append(f"End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration: {duration:.2f} seconds")
- audit_log.append(f"Total Excel records processed: {len(excel_data)}")
- audit_log.append(f"Records with actual changes: {records_with_changes}")
- audit_log.append(f"JIRA tasks updated: {len(actual_changes)}")
-
- except Exception as e:
- # Error handling
- end_time = get_utc_now()
- duration = (end_time - start_time).total_seconds()
- audit_log.append("")
- audit_log.append("=== SYNC FAILED ===")
- audit_log.append(f"Error Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
- audit_log.append(f"Duration before failure: {duration:.2f} seconds")
- audit_log.append(f"Error: {str(e)}")
- raise
- finally:
- # Write audit log to SharePoint
- await self._write_audit_log(audit_log, "excel_to_jira")
-
- def _transform_tasks(
- self, tasks: list[Task], include_put: bool = False
+# Module-level factory to create TicketInterface by connector type
+async def createTicketInterfaceByType(
+ *,
+ taskSyncDefinition: dict,
+ connectorType: str,
+ connectorParams: dict,
+) -> "TicketInterface":
+ connectorTypeLower = (connectorType or "").strip().lower()
+ if connectorTypeLower == "jira":
+ from modules.connectors.connectorTicketsJira import ConnectorTicketJira
+ connector_ticket = ConnectorTicketJira(**connectorParams)
+ elif connectorTypeLower == "clickup":
+ from modules.connectors.connectorTicketsClickup import ConnectorTicketClickup
+ # ClickUp does not require async factory; instantiate directly
+ connector_ticket = ConnectorTicketClickup(**connectorParams)
+ else:
+ raise ValueError(f"Unsupported connector_type: {connectorType}")
+
+ return TicketInterface(
+ connector_ticket=connector_ticket,
+ task_sync_definition=taskSyncDefinition,
+ )
+
+
+class TicketInterface:
+ def __init__(self, *, connector_ticket: TicketBase, task_sync_definition: dict):
+ self.connector_ticket = connector_ticket
+ self.task_sync_definition = task_sync_definition
+
+ async def exportTicketsAsList(self) -> list[dict]:
+ tickets = await self.connector_ticket.read_tasks(limit=0)
+ transformed_tasks = self._transformTasks(tickets, includePut=True)
+ data_list = [task.data for task in transformed_tasks]
+ return self._filterEmptyRecords(data_list)
+
+ async def importListToTickets(self, records: list[dict]) -> None:
+ updates: list[Task] = []
+ for row in records:
+ task_id = row.get("ID")
+ if not task_id:
+ continue
+ fields = {}
+ for field_name, field_config in self.task_sync_definition.items():
+ if field_config[0] == "put":
+ field_path = field_config[1]
+ value = row.get(field_name, "")
+ if len(field_path) >= 2 and field_path[0] == "fields":
+ field_id = field_path[1]
+ fields[field_id] = value
+ if fields:
+ updates.append(Task(data={"ID": task_id, "fields": fields}))
+ if updates:
+ await self.connector_ticket.write_tasks(updates)
+
+ def _transformTasks(
+ self, tasks: list[Task], includePut: bool = False
) -> list[Task]:
"""Transforms tasks according to the task_sync_definition."""
transformed_tasks = []
@@ -709,13 +68,12 @@ class TicketSharepointSyncInterface:
# Process each field in the sync definition
for field_name, field_config in self.task_sync_definition.items():
- direction = field_config[0] # "get" or "put"
- field_path = field_config[1] # List of keys to navigate
+ direction = field_config[0]
+ field_path = field_config[1]
# Get the right fields
- if direction == "get" or include_put:
- # Extract value using the field path
- value = self._extract_field_value(task.data, field_path, field_name)
+ if direction == "get" or includePut:
+ value = self._extractFieldValue(task.data, field_path, field_name)
transformed_data[field_name] = value
# Create new Task with transformed data
@@ -724,8 +82,8 @@ class TicketSharepointSyncInterface:
return transformed_tasks
- def _extract_field_value(self, issue_data: dict, field_path: list[str], field_name: str = None) -> Any:
- """Extract field value from JIRA issue data using field path."""
+ def _extractFieldValue(self, issue_data: dict, field_path: list[str], field_name: str = None) -> Any:
+ """Extract field value from ticket data using field path."""
value = issue_data
try:
for key in field_path:
@@ -747,174 +105,21 @@ class TicketSharepointSyncInterface:
):
value = value[0]["value"]
- # Apply ADF conversion for specific fields that contain ADF content
- if isinstance(value, dict) and value.get("type") == "doc":
- value = self._convert_adf_to_text(value)
-
# Apply date formatting for date fields
- if field_name and self._is_date_field(field_name):
- value = self._format_date_for_excel(value)
+ if field_name and self._isDateField(field_name):
+ value = self._formatDateForExcel(value)
return value
except (KeyError, TypeError):
return None
- def _convert_adf_to_text(self, adf_data):
- """Convert Atlassian Document Format (ADF) to plain text.
-
- Based on Atlassian Document Format specification for JIRA fields.
- Handles paragraphs, lists, text formatting, and other ADF node types.
-
- Args:
- adf_data: ADF object or None
-
- Returns:
- str: Plain text content, or empty string if None/invalid
- """
- if not adf_data or not isinstance(adf_data, dict):
- return ""
-
- if adf_data.get("type") != "doc":
- return str(adf_data) if adf_data else ""
-
- content = adf_data.get("content", [])
- if not isinstance(content, list):
- return ""
-
- def extract_text_from_content(content_list, list_level=0):
- """Recursively extract text from ADF content with proper formatting."""
- text_parts = []
- list_counter = 1
-
- for item in content_list:
- if not isinstance(item, dict):
- continue
-
- item_type = item.get("type", "")
-
- if item_type == "text":
- # Extract text content, preserving formatting
- text = item.get("text", "")
- marks = item.get("marks", [])
-
- # Handle text formatting (bold, italic, etc.)
- if marks:
- for mark in marks:
- if mark.get("type") == "strong":
- text = f"**{text}**"
- elif mark.get("type") == "em":
- text = f"*{text}*"
- elif mark.get("type") == "code":
- text = f"`{text}`"
- elif mark.get("type") == "link":
- attrs = mark.get("attrs", {})
- href = attrs.get("href", "")
- if href:
- text = f"[{text}]({href})"
-
- text_parts.append(text)
-
- elif item_type == "hardBreak":
- text_parts.append("\n")
-
- elif item_type == "paragraph":
- paragraph_content = item.get("content", [])
- if paragraph_content:
- paragraph_text = extract_text_from_content(paragraph_content, list_level)
- if paragraph_text.strip():
- text_parts.append(paragraph_text)
-
- elif item_type == "bulletList":
- list_content = item.get("content", [])
- for list_item in list_content:
- if list_item.get("type") == "listItem":
- list_item_content = list_item.get("content", [])
- for list_paragraph in list_item_content:
- if list_paragraph.get("type") == "paragraph":
- list_paragraph_content = list_paragraph.get("content", [])
- if list_paragraph_content:
- indent = " " * list_level
- bullet_text = extract_text_from_content(list_paragraph_content, list_level + 1)
- if bullet_text.strip():
- text_parts.append(f"{indent}• {bullet_text}")
-
- elif item_type == "orderedList":
- list_content = item.get("content", [])
- for list_item in list_content:
- if list_item.get("type") == "listItem":
- list_item_content = list_item.get("content", [])
- for list_paragraph in list_item_content:
- if list_paragraph.get("type") == "paragraph":
- list_paragraph_content = list_paragraph.get("content", [])
- if list_paragraph_content:
- indent = " " * list_level
- ordered_text = extract_text_from_content(list_paragraph_content, list_level + 1)
- if ordered_text.strip():
- text_parts.append(f"{indent}{list_counter}. {ordered_text}")
- list_counter += 1
-
- elif item_type == "listItem":
- # Handle nested list items
- list_item_content = item.get("content", [])
- if list_item_content:
- text_parts.append(extract_text_from_content(list_item_content, list_level))
-
- elif item_type == "embedCard":
- # Handle embedded content (videos, etc.)
- attrs = item.get("attrs", {})
- url = attrs.get("url", "")
- if url:
- text_parts.append(f"[Embedded Content: {url}]")
-
- elif item_type == "codeBlock":
- # Handle code blocks
- code_content = item.get("content", [])
- if code_content:
- code_text = extract_text_from_content(code_content, list_level)
- if code_text.strip():
- text_parts.append(f"```\n{code_text}\n```")
-
- elif item_type == "blockquote":
- # Handle blockquotes
- quote_content = item.get("content", [])
- if quote_content:
- quote_text = extract_text_from_content(quote_content, list_level)
- if quote_text.strip():
- text_parts.append(f"> {quote_text}")
-
- elif item_type == "heading":
- # Handle headings
- heading_content = item.get("content", [])
- if heading_content:
- heading_text = extract_text_from_content(heading_content, list_level)
- if heading_text.strip():
- level = item.get("attrs", {}).get("level", 1)
- text_parts.append(f"{'#' * level} {heading_text}")
-
- elif item_type == "rule":
- # Handle horizontal rules
- text_parts.append("---")
-
- else:
- # Handle unknown types by trying to extract content
- if "content" in item:
- content_text = extract_text_from_content(item.get("content", []), list_level)
- if content_text.strip():
- text_parts.append(content_text)
-
- return "\n".join(text_parts)
-
- result = extract_text_from_content(content)
- return result.strip()
-
- def _format_date_for_excel(self, date_value: Any) -> Optional[str]:
+ def _formatDateForExcel(self, date_value: Any) -> Optional[str]:
"""Format date value for Excel export.
- Handles various date formats from JIRA and converts them to a consistent format
- suitable for Excel display.
+ Handles various date formats and converts them to a consistent format suitable for Excel display.
Args:
- date_value: Date value from JIRA (string, datetime, or None)
+ date_value: Date value from Tickets (string, datetime, or None)
Returns:
Formatted date string or None if invalid/empty
@@ -955,61 +160,7 @@ class TicketSharepointSyncInterface:
# Log error but don't fail the sync
return str(date_value) if date_value else None
- def _parse_date_from_excel(self, date_string: str) -> Optional[str]:
- """Parse date string from Excel and convert to JIRA format.
-
- Converts Excel date strings back to JIRA-compatible ISO format.
-
- Args:
- date_string: Date string from Excel
-
- Returns:
- ISO formatted date string for JIRA or None if invalid
- """
- if not date_string or not isinstance(date_string, str):
- return None
-
- try:
- # Handle various Excel date formats
- date_string = date_string.strip()
-
- # Try common Excel date formats
- formats_to_try = [
- '%Y-%m-%d %H:%M:%S UTC', # Our export format
- '%Y-%m-%d %H:%M:%S', # Standard format
- '%Y-%m-%d', # Date only
- '%d.%m.%Y', # German format
- '%m/%d/%Y', # US format
- '%d/%m/%Y', # European format
- ]
-
- for fmt in formats_to_try:
- try:
- dt = datetime.strptime(date_string, fmt)
- # Convert to UTC and format as ISO
- if dt.tzinfo is None:
- dt = dt.replace(tzinfo=timezone.utc)
- return dt.isoformat()
- except ValueError:
- continue
-
- # If no format matches, try pandas parsing
- try:
- dt = pd.to_datetime(date_string)
- if hasattr(dt, 'to_pydatetime'):
- dt = dt.to_pydatetime()
- if dt.tzinfo is None:
- dt = dt.replace(tzinfo=timezone.utc)
- return dt.isoformat()
- except:
- pass
-
- return None
-
- except Exception:
- return None
-
- def _is_date_field(self, field_name: str) -> bool:
+ def _isDateField(self, field_name: str) -> bool:
"""Check if a field is a date field based on its name.
Args:
@@ -1021,7 +172,7 @@ class TicketSharepointSyncInterface:
date_keywords = ['date', 'time', 'created', 'updated', 'due', 'deadline']
return any(keyword in field_name.lower() for keyword in date_keywords)
- def _filter_empty_records(self, records: list[dict]) -> list[dict]:
+ def _filterEmptyRecords(self, records: list[dict]) -> list[dict]:
"""Remove records that are missing an ID.
Purposefully only filter by presence of 'ID' to avoid dropping
@@ -1033,392 +184,3 @@ class TicketSharepointSyncInterface:
filtered.append(row)
return filtered
- def _merge_jira_with_existing(
- self, jira_data: list[dict], existing_data: list[dict]
- ) -> list[dict]:
- """Merge JIRA data with existing CSV data, updating only 'get' fields."""
- # Create a lookup for existing data by ID
- existing_lookup = {row.get("ID"): row for row in existing_data if row.get("ID")}
-
- merged_data = []
- for jira_row in jira_data:
- jira_id = jira_row.get("ID")
- if jira_id and jira_id in existing_lookup:
- # Update existing row with JIRA data (only 'get' fields)
- existing_row = existing_lookup[jira_id].copy()
- for field_name, field_config in self.task_sync_definition.items():
- if field_config[0] == "get": # Only update 'get' fields
- existing_row[field_name] = jira_row.get(field_name)
- merged_data.append(existing_row)
- # Remove from lookup to track processed items
- del existing_lookup[jira_id]
- else:
- # New row from JIRA
- merged_data.append(jira_row)
-
- # Add any remaining existing rows that weren't in JIRA data
- merged_data.extend(existing_lookup.values())
-
- return merged_data
-
- def _merge_jira_with_existing_detailed(
- self, jira_data: list[dict], existing_data: list[dict]
- ) -> tuple[list[dict], dict]:
- """Merge JIRA data with existing CSV data and track detailed changes."""
- # Create a lookup for existing data by ID
- existing_lookup = {row.get("ID"): row for row in existing_data if row.get("ID")}
-
- merged_data = []
- changes = []
- updated_count = 0
- added_count = 0
- unchanged_count = 0
-
- for jira_row in jira_data:
- jira_id = jira_row.get("ID")
- if jira_id and jira_id in existing_lookup:
- # Update existing row with JIRA data (only 'get' fields)
- existing_row = existing_lookup[jira_id].copy()
- row_changes = []
-
- for field_name, field_config in self.task_sync_definition.items():
- if field_config[0] == "get": # Only update 'get' fields
- old_value = existing_row.get(field_name, "")
- new_value = jira_row.get(field_name, "")
-
- # Convert None to empty string for comparison
- old_value = "" if old_value is None else str(old_value)
- new_value = "" if new_value is None else str(new_value)
-
- if old_value != new_value:
- row_changes.append(
- f"{field_name}: '{old_value}' → '{new_value}'"
- )
-
- existing_row[field_name] = jira_row.get(field_name)
-
- merged_data.append(existing_row)
-
- if row_changes:
- updated_count += 1
- changes.append(
- f"Row ID {jira_id} updated: {', '.join(row_changes)}"
- )
- else:
- unchanged_count += 1
-
- # Remove from lookup to track processed items
- del existing_lookup[jira_id]
- else:
- # New row from JIRA
- merged_data.append(jira_row)
- added_count += 1
- changes.append(f"Row ID {jira_id} added as new record")
-
- # Add any remaining existing rows that weren't in JIRA data
- for remaining_row in existing_lookup.values():
- merged_data.append(remaining_row)
- unchanged_count += 1
-
- change_details = {
- "updated": updated_count,
- "added": added_count,
- "unchanged": unchanged_count,
- "changes": changes,
- }
-
- return merged_data, change_details
-
- async def _write_audit_log(self, audit_log: list[str], operation_type: str):
- """Write audit log to SharePoint."""
- try:
- timestamp = get_utc_now().strftime("%Y%m%d_%H%M%S")
- audit_filename = f"audit_{operation_type}_{timestamp}.log"
-
- # Convert audit log to bytes
- audit_content = "\n".join(audit_log).encode("utf-8")
-
- # Debug logging
- import logging
- logger = logging.getLogger(__name__)
- logger.debug(f"Writing audit log to folder: {self.audit_folder}, file: {audit_filename}")
-
- # Write to SharePoint
- await self.connector_sharepoint.upload_file(
- site_id=self.site_id,
- folder_path=self.audit_folder,
- file_name=audit_filename,
- content=audit_content,
- )
- logger.debug("Audit log written successfully")
- except Exception as e:
- # If audit logging fails, we don't want to break the main sync process
- # Just log the error (this could be enhanced with fallback logging)
- import logging
- logger = logging.getLogger(__name__)
- logger.warning(f"Failed to write audit log: {str(e)}")
- logger.warning(f"Audit folder: {self.audit_folder}")
- logger.warning(f"Operation type: {operation_type}")
- import traceback
- logger.warning(f"Traceback: {traceback.format_exc()}")
-
- def _create_csv_content(self, data: list[dict], existing_headers: dict = None) -> bytes:
- """Create CSV content with 4-row structure matching reference code."""
- # Get current timestamp for header
- timestamp = get_utc_now().strftime("%Y-%m-%d %H:%M:%S UTC")
-
- # Use existing headers if provided, otherwise use defaults
- if existing_headers is None:
- existing_headers = {"header1": "Header 1", "header2": "Header 2"}
-
- if not data:
- # Build an empty table with the expected columns from schema
- cols = list(self.task_sync_definition.keys())
-
- df = pd.DataFrame(columns=cols)
-
- # Parse existing headers to extract individual columns
- import csv as csv_module
- header1_text = existing_headers.get("header1", "Header 1")
- header2_text = existing_headers.get("header2", "Header 2")
-
- # Parse the existing header rows
- header1_reader = csv_module.reader([header1_text])
- header2_reader = csv_module.reader([header2_text])
- header1_row = next(header1_reader, [])
- header2_row = next(header2_reader, [])
-
- # Row 1: Use existing header1 or default
- if len(header1_row) >= len(cols):
- header_row1_data = header1_row[:len(cols)]
- else:
- header_row1_data = header1_row + [""] * (len(cols) - len(header1_row))
- header_row1 = pd.DataFrame([header_row1_data], columns=cols)
-
- # Row 2: Use existing header2 and add timestamp to second column
- if len(header2_row) >= len(cols):
- header_row2_data = header2_row[:len(cols)]
- else:
- header_row2_data = header2_row + [""] * (len(cols) - len(header2_row))
- if len(header_row2_data) > 1:
- header_row2_data[1] = timestamp
- header_row2 = pd.DataFrame([header_row2_data], columns=cols)
-
- # Row 3: table headers
- table_headers = pd.DataFrame([cols], columns=cols)
-
- final_df = pd.concat(
- [header_row1, header_row2, table_headers, df], ignore_index=True
- )
- csv_text = StringIO()
- final_df.to_csv(csv_text, index=False, header=False, quoting=1, escapechar='\\')
- return csv_text.getvalue().encode("utf-8")
-
- # Create DataFrame from data
- df = pd.DataFrame(data)
-
- # Force all columns to be object (string) type to preserve empty cells
- for column in df.columns:
- df[column] = df[column].astype("object")
- df[column] = df[column].fillna("")
-
- # Clean data: replace actual line breaks with \n and escape quotes
- for column in df.columns:
- df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False)
- df[column] = df[column].str.replace('"', '""', regex=False)
-
- # Create the 4-row structure
- # Parse existing headers to extract individual columns
- import csv as csv_module
- header1_text = existing_headers.get("header1", "Header 1")
- header2_text = existing_headers.get("header2", "Header 2")
-
- # Parse the existing header rows
- header1_reader = csv_module.reader([header1_text])
- header2_reader = csv_module.reader([header2_text])
- header1_row = next(header1_reader, [])
- header2_row = next(header2_reader, [])
-
- # Row 1: Use existing header1 or default
- if len(header1_row) >= len(df.columns):
- header_row1_data = header1_row[:len(df.columns)]
- else:
- header_row1_data = header1_row + [""] * (len(df.columns) - len(header1_row))
- header_row1 = pd.DataFrame([header_row1_data], columns=df.columns)
-
- # Row 2: Use existing header2 and add timestamp to second column
- if len(header2_row) >= len(df.columns):
- header_row2_data = header2_row[:len(df.columns)]
- else:
- header_row2_data = header2_row + [""] * (len(df.columns) - len(header2_row))
- if len(header_row2_data) > 1:
- header_row2_data[1] = timestamp
- header_row2 = pd.DataFrame([header_row2_data], columns=df.columns)
-
- # Row 3: Table headers (column names)
- table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns)
-
- # Concatenate all rows: header1 + header2 + table_headers + data
- final_df = pd.concat(
- [header_row1, header_row2, table_headers, df], ignore_index=True
- )
-
- # Convert to CSV bytes with proper quoting for fields containing special characters
- csv_text = StringIO()
- final_df.to_csv(csv_text, index=False, header=False, quoting=1, escapechar='\\')
- return csv_text.getvalue().encode("utf-8")
-
- def _create_excel_content(self, data: list[dict], existing_headers: dict = None) -> bytes:
- """Create Excel content with 4-row structure matching reference code."""
- # Get current timestamp for header
- timestamp = get_utc_now().strftime("%Y-%m-%d %H:%M:%S UTC")
-
- # Use existing headers if provided, otherwise use defaults
- if existing_headers is None:
- existing_headers = {"header1": "Header 1", "header2": "Header 2"}
-
- if not data:
- # Build an empty table with the expected columns from schema
- cols = list(self.task_sync_definition.keys())
-
- df = pd.DataFrame(columns=cols)
-
- # Parse existing headers to extract individual columns
- import csv as csv_module
- header1_text = existing_headers.get("header1", "Header 1")
- header2_text = existing_headers.get("header2", "Header 2")
-
- # Parse the existing header rows
- header1_reader = csv_module.reader([header1_text])
- header2_reader = csv_module.reader([header2_text])
- header1_row = next(header1_reader, [])
- header2_row = next(header2_reader, [])
-
- # Row 1: Use existing header1 or default
- if len(header1_row) >= len(cols):
- header_row1_data = header1_row[:len(cols)]
- else:
- header_row1_data = header1_row + [""] * (len(cols) - len(header1_row))
- header_row1 = pd.DataFrame([header_row1_data], columns=cols)
-
- # Row 2: Use existing header2 and add timestamp to second column
- if len(header2_row) >= len(cols):
- header_row2_data = header2_row[:len(cols)]
- else:
- header_row2_data = header2_row + [""] * (len(cols) - len(header2_row))
- if len(header_row2_data) > 1:
- header_row2_data[1] = timestamp
- header_row2 = pd.DataFrame([header_row2_data], columns=cols)
-
- # Row 3: table headers
- table_headers = pd.DataFrame([cols], columns=cols)
-
- final_df = pd.concat(
- [header_row1, header_row2, table_headers, df], ignore_index=True
- )
-
- # Create Excel file in memory
- excel_buffer = BytesIO()
- final_df.to_excel(excel_buffer, index=False, header=False, engine='openpyxl')
- return excel_buffer.getvalue()
-
- # Create DataFrame from data
- df = pd.DataFrame(data)
-
- # Force all columns to be object (string) type to preserve empty cells
- for column in df.columns:
- df[column] = df[column].astype("object")
- df[column] = df[column].fillna("")
-
- # Clean data: replace actual line breaks with \n and escape quotes
- for column in df.columns:
- df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False)
- df[column] = df[column].str.replace('"', '""', regex=False)
-
- # Create the 4-row structure
- # Parse existing headers to extract individual columns
- import csv as csv_module
- header1_text = existing_headers.get("header1", "Header 1")
- header2_text = existing_headers.get("header2", "Header 2")
-
- # Parse the existing header rows
- header1_reader = csv_module.reader([header1_text])
- header2_reader = csv_module.reader([header2_text])
- header1_row = next(header1_reader, [])
- header2_row = next(header2_reader, [])
-
- # Row 1: Use existing header1 or default
- if len(header1_row) >= len(df.columns):
- header_row1_data = header1_row[:len(df.columns)]
- else:
- header_row1_data = header1_row + [""] * (len(df.columns) - len(header1_row))
- header_row1 = pd.DataFrame([header_row1_data], columns=df.columns)
-
- # Row 2: Use existing header2 and add timestamp to second column
- if len(header2_row) >= len(df.columns):
- header_row2_data = header2_row[:len(df.columns)]
- else:
- header_row2_data = header2_row + [""] * (len(df.columns) - len(header2_row))
- if len(header_row2_data) > 1:
- header_row2_data[1] = timestamp
- header_row2 = pd.DataFrame([header_row2_data], columns=df.columns)
-
- # Row 3: Table headers (column names)
- table_headers = pd.DataFrame([df.columns.tolist()], columns=df.columns)
-
- # Concatenate all rows: header1 + header2 + table_headers + data
- final_df = pd.concat(
- [header_row1, header_row2, table_headers, df], ignore_index=True
- )
-
- # Create Excel file in memory
- excel_buffer = BytesIO()
- final_df.to_excel(excel_buffer, index=False, header=False, engine='openpyxl')
- return excel_buffer.getvalue()
-
- def _parse_excel_content(self, excel_content: bytes) -> tuple[list[dict], dict]:
- """Parse Excel content with 4-row structure and return data and headers."""
- try:
- # Load Excel file from bytes
- df = pd.read_excel(
- BytesIO(excel_content),
- engine='openpyxl',
- header=None
- )
-
- # Extract the 4 parts:
- # Row 1: Static header row 1
- header_row1 = df.iloc[0:1].copy()
-
- # Row 2: Static header row 2
- header_row2 = df.iloc[1:2].copy()
-
- # Row 3: Table headers
- table_headers = df.iloc[2:3].copy()
-
- # Row 4+: Data rows
- df_data = df.iloc[3:].copy()
- # Set column names from row 3
- df_data.columns = table_headers.iloc[0]
- # Reset index to start from 0
- df_data = df_data.reset_index(drop=True)
-
- # Force all columns to be object (string) type and handle NaN values
- for column in df_data.columns:
- df_data[column] = df_data[column].astype('object')
- # Fill NaN values with empty string to keep cells empty
- df_data[column] = df_data[column].fillna('')
-
- # Convert DataFrame to list of dictionaries
- data = df_data.to_dict(orient='records')
-
- # Extract headers as strings (like CSV version)
- headers = {
- "header1": ",".join([str(x) if pd.notna(x) else "" for x in header_row1.iloc[0].tolist()]),
- "header2": ",".join([str(x) if pd.notna(x) else "" for x in header_row2.iloc[0].tolist()])
- }
-
- return data, headers
-
- except Exception as e:
- raise Exception(f"Failed to parse Excel content: {str(e)}")
diff --git a/modules/interfaces/interfaceWebObjects.py b/modules/interfaces/interfaceWebObjects.py
index bdd1fd53..023eb539 100644
--- a/modules/interfaces/interfaceWebObjects.py
+++ b/modules/interfaces/interfaceWebObjects.py
@@ -33,12 +33,9 @@ class WebInterface:
@classmethod
async def create(cls) -> "WebInterface":
connectorWebTavily = await ConnectorTavily.create()
-
return WebInterface(connectorWebTavily=connectorWebTavily)
- async def search(
- self, web_search_request: WebSearchRequest
- ) -> WebSearchActionResult:
+ async def search(self, web_search_request: WebSearchRequest) -> WebSearchActionResult:
# NOTE: Add connectors here
return await self.connectorWebTavily.search_urls(web_search_request)
@@ -46,9 +43,7 @@ class WebInterface:
# NOTE: Add connectors here
return await self.connectorWebTavily.crawl_urls(web_crawl_request)
- async def scrape(
- self, web_scrape_request: WebScrapeRequest
- ) -> WebScrapeActionResult:
+ async def scrape(self, web_scrape_request: WebScrapeRequest) -> WebScrapeActionResult:
# NOTE: Add connectors here
return await self.connectorWebTavily.scrape(web_scrape_request)
diff --git a/modules/routes/routeDataNeutralization.py b/modules/routes/routeDataNeutralization.py
index 322d5398..71b1db25 100644
--- a/modules/routes/routeDataNeutralization.py
+++ b/modules/routes/routeDataNeutralization.py
@@ -7,7 +7,7 @@ from modules.security.auth import limiter, getCurrentUser
# Import interfaces
from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes
-from modules.features.neutralization.mainNeutralizationPlayground import NeutralizationService
+from modules.features.neutralizePlayground.mainNeutralizePlayground import NeutralizationPlayground
# Configure logger
logger = logging.getLogger(__name__)
@@ -33,7 +33,7 @@ async def get_neutralization_config(
) -> DataNeutraliserConfig:
"""Get data neutralization configuration"""
try:
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
config = service.get_config()
if not config:
@@ -67,7 +67,7 @@ async def save_neutralization_config(
) -> DataNeutraliserConfig:
"""Save or update data neutralization configuration"""
try:
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
config = service.save_config(config_data)
return config
@@ -97,7 +97,7 @@ async def neutralize_text(
detail="Text content is required"
)
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
result = service.neutralize_text(text, file_id)
return result
@@ -128,7 +128,7 @@ async def resolve_text(
detail="Text content is required"
)
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
resolved_text = service.resolve_text(text)
return {"resolved_text": resolved_text}
@@ -151,7 +151,7 @@ async def get_neutralization_attributes(
) -> List[DataNeutralizerAttributes]:
"""Get neutralization attributes, optionally filtered by file ID"""
try:
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
attributes = service.get_attributes(fileId)
return attributes
@@ -181,7 +181,7 @@ async def process_sharepoint_files(
detail="Both source and target paths are required"
)
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
result = await service.process_sharepoint_files(source_path, target_path)
return result
@@ -210,7 +210,7 @@ async def batch_process_files(
detail="Files data is required"
)
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
result = service.batch_neutralize_files(files_data)
return result
@@ -232,7 +232,7 @@ async def get_neutralization_stats(
) -> Dict[str, Any]:
"""Get neutralization processing statistics"""
try:
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
stats = service.get_processing_stats()
return stats
@@ -253,7 +253,7 @@ async def cleanup_file_attributes(
) -> Dict[str, str]:
"""Clean up neutralization attributes for a specific file"""
try:
- service = NeutralizationService(currentUser)
+ service = NeutralizationPlayground(currentUser)
success = service.cleanup_file_attributes(fileId)
if success:
diff --git a/modules/security/csrf.py b/modules/security/csrf.py
index 030ab665..f1c4aa5b 100644
--- a/modules/security/csrf.py
+++ b/modules/security/csrf.py
@@ -51,17 +51,19 @@ class CSRFMiddleware(BaseHTTPMiddleware):
csrf_token = request.headers.get("X-CSRF-Token")
if not csrf_token:
logger.warning(f"CSRF token missing for {request.method} {request.url.path}")
- raise HTTPException(
+ from fastapi.responses import JSONResponse
+ return JSONResponse(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing"
+ content={"detail": "CSRF token missing"}
)
# Validate CSRF token format (basic validation)
if not self._is_valid_csrf_token(csrf_token):
logger.warning(f"Invalid CSRF token format for {request.method} {request.url.path}")
- raise HTTPException(
+ from fastapi.responses import JSONResponse
+ return JSONResponse(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ content={"detail": "Invalid CSRF token format"}
)
# Additional CSRF validation could be added here:
diff --git a/modules/services/__init__.py b/modules/services/__init__.py
index 842a2176..5725742f 100644
--- a/modules/services/__init__.py
+++ b/modules/services/__init__.py
@@ -39,60 +39,44 @@ class PublicService:
class Services:
- def __init__(self, user: User, workflow: ChatWorkflow):
+ def __init__(self, user: User, workflow: ChatWorkflow = None):
self.user: User = user
self.workflow: ChatWorkflow = workflow
- # Directly expose existing service modules
+ # Initialize interfaces
+
+ from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
+ self.interfaceChat = getChatInterface(user)
+
+ from modules.interfaces.interfaceAppObjects import getInterface as getAppInterface
+ self.interfaceApp = getAppInterface(user)
+
+ from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface
+ self.interfaceComponent = getComponentInterface(user)
+
+ # Initialize service packages
from .serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
- self.document = PublicService(DocumentExtractionService(self))
+ self.documentExtraction = PublicService(DocumentExtractionService(self))
from .serviceDocument.mainServiceDocumentGeneration import DocumentGenerationService
- self.document = PublicService(DocumentGenerationService(self))
+ self.documentGeneration = PublicService(DocumentGenerationService(self))
- from .serviceNeutralization.mainNeutralization import NeutralizationService
- self.neutralization = PublicService(NeutralizationService())
+ from .serviceNeutralization.mainServiceNeutralization import NeutralizationService
+ self.neutralization = PublicService(NeutralizationService(self))
- from .serviceSharepoint.mainSharepoint import SharePointService
- self.sharepoint = PublicService(SharePointService(self))
+ from .serviceSharepoint.mainServiceSharepoint import SharepointService
+ self.sharepoint = PublicService(SharepointService(self))
from .serviceAi.mainServiceAi import AiService
self.ai = PublicService(AiService(self))
- from .serviceWorkflows.mainServiceWorkflows import WorkflowService
+ from .serviceTicket.mainServiceTicket import TicketService
+ self.ticket = PublicService(TicketService(self))
+
+ from .serviceWorkflow.mainServiceWorkflow import WorkflowService
self.workflow = PublicService(WorkflowService(self))
-
- # Initialize chat interface for workflow operations
- from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
- self.chatInterface = getChatInterface(user)
- # Chat interface wrapper methods
- def getWorkflow(self, workflowId: str):
- return self.chatInterface.getWorkflow(workflowId)
-
- def createWorkflow(self, workflowData: dict):
- return self.chatInterface.createWorkflow(workflowData)
-
- def updateWorkflow(self, workflowId: str, workflowData: dict):
- return self.chatInterface.updateWorkflow(workflowId, workflowData)
-
- def createMessage(self, messageData: dict):
- return self.chatInterface.createMessage(messageData)
-
- def updateMessage(self, messageId: str, messageData: dict):
- return self.chatInterface.updateMessage(messageId, messageData)
-
- def createLog(self, logData: dict):
- return self.chatInterface.createLog(logData)
-
- def updateWorkflowStats(self, workflowId: str, bytesSent: int = 0, bytesReceived: int = 0, tokenCount: int = 0):
- return self.chatInterface.updateWorkflowStats(workflowId, bytesSent, bytesReceived, tokenCount)
-
- @property
- def mandateId(self):
- return self.chatInterface.mandateId
-
def getInterface(user: User, workflow: ChatWorkflow) -> Services:
return Services(user, workflow)
diff --git a/modules/services/serviceAi/mainServiceAi.py b/modules/services/serviceAi/mainServiceAi.py
index c7458756..834e39a0 100644
--- a/modules/services/serviceAi/mainServiceAi.py
+++ b/modules/services/serviceAi/mainServiceAi.py
@@ -2,7 +2,7 @@ import logging
from typing import Dict, Any, List, Optional, Tuple
from modules.interfaces.interfaceChatModel import ChatDocument
-from modules.services.serviceDocument.documentExtraction import DocumentExtractionService
+from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
from modules.interfaces.interfaceAiModel import AiCallRequest, AiCallOptions
from modules.interfaces.interfaceAiObjects import AiObjects
@@ -19,9 +19,15 @@ class AiService:
The concrete connector instances (OpenAI/Anthropic) are injected by the interface layer.
"""
- def __init__(self, aiObjects: AiObjects | None = None) -> None:
+ def __init__(self, serviceCenter=None) -> None:
+ """Initialize AI service with service center access.
+
+ Args:
+ serviceCenter: Service center instance for accessing other services
+ """
+ self.serviceCenter = serviceCenter
# Only depend on interfaces
- self.aiObjects = aiObjects or AiObjects()
+ self.aiObjects = AiObjects()
self.documentExtractor = DocumentExtractionService()
async def callAi(
diff --git a/modules/services/serviceDocument/mainServiceDocumentExtraction.py b/modules/services/serviceDocument/mainServiceDocumentExtraction.py
index 41340f02..3f1c9ef9 100644
--- a/modules/services/serviceDocument/mainServiceDocumentExtraction.py
+++ b/modules/services/serviceDocument/mainServiceDocumentExtraction.py
@@ -22,7 +22,7 @@ from modules.interfaces.interfaceChatModel import (
ContentItem,
ContentMetadata
)
-from modules.services.serviceNeutralization.mainNeutralization import NeutralizationService
+from modules.services.serviceNeutralization.mainServiceNeutralization import NeutralizationService
from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
@@ -43,9 +43,8 @@ class DocumentExtractionService:
"""Initialize the document processor."""
self._neutralizer = NeutralizationService() if APP_CONFIG.get("ENABLE_CONTENT_NEUTRALIZATION", False) else None
self._serviceCenter = serviceCenter
- # Centralized services interface (for AI)
- from modules.services import getInterface as getServices
- self.services = getServices(serviceCenter.user, serviceCenter.workflow)
+ # Store service center for access to user/workflow context when needed
+ self.services = None # Will be set to None to avoid circular dependency
self.supportedTypes: Dict[str, Callable[[bytes, str, str], Awaitable[List[ContentItem]]]] = {
# Text and data files
@@ -1427,17 +1426,21 @@ class DocumentExtractionService:
"""
from modules.interfaces.interfaceChatModel import ChatDocument
image_doc = ChatDocument(fileData=chunk, fileName="image", mimeType=mimeType)
- processedContent = await self.services.ai.callAi(
- prompt=imagePrompt,
- documents=[image_doc],
- options={
- "process_type": "image",
- "operation_type": "analyse_content",
- "priority": "balanced",
- "compress_documents": True,
- "max_cost": 0.03
- }
- )
+ # Use direct import to avoid circular dependency
+ from modules.services.serviceAi.mainServiceAi import AiService
+ from modules.interfaces.interfaceAiObjects import AiObjects
+ aiService = AiService(AiObjects())
+ processedContent = await aiService.callAi(
+ prompt=imagePrompt,
+ documents=[image_doc],
+ options={
+ "process_type": "image",
+ "operation_type": "analyse_content",
+ "priority": "balanced",
+ "compress_documents": True,
+ "max_cost": 0.03
+ }
+ )
else:
# For text content (including SVG), use text AI service
# Neutralize content if neutralizer is enabled (only for text)
@@ -1462,36 +1465,23 @@ class DocumentExtractionService:
# For code files, preserve the complete content without AI processing
processedContent = contentToProcess
else:
- if self.services and hasattr(self.services, 'ai'):
- processedContent = await self.services.ai.callAi(
- prompt=aiPrompt,
- documents=None,
- options={
- "process_type": "text",
- "operation_type": "analyse_content",
- "priority": "balanced",
- "compress_prompt": True,
- "compress_documents": False,
- "processing_mode": "advanced",
- "max_cost": 0.05,
- "max_processing_time": 30
- }
- )
- else:
- # Fallback to basic AI processing with centralized service
- processedContent = await self.services.ai.callAi(
- prompt=aiPrompt,
- documents=None,
- options={
- "process_type": "text",
- "operation_type": "analyse_content",
- "priority": "speed",
- "compress_prompt": True,
- "compress_documents": False,
- "max_cost": 0.01,
- "max_processing_time": 15
- }
- )
+ # Use direct import to avoid circular dependency
+ from modules.services.serviceAi.mainServiceAi import AiService
+ from modules.interfaces.interfaceAiObjects import AiObjects
+ aiService = AiService(AiObjects())
+ processedContent = await aiService.callAi(
+ prompt=aiPrompt,
+ documents=None,
+ options={
+ "process_type": "text",
+ "operation_type": "analyse_content",
+ "priority": "speed",
+ "compress_prompt": True,
+ "compress_documents": False,
+ "max_cost": 0.01,
+ "max_processing_time": 15
+ }
+ )
chunkResults.append(processedContent)
except Exception as aiError:
diff --git a/modules/services/serviceNeutralization/mainNeutralization.py b/modules/services/serviceNeutralization/mainServiceNeutralization.py
similarity index 79%
rename from modules/services/serviceNeutralization/mainNeutralization.py
rename to modules/services/serviceNeutralization/mainServiceNeutralization.py
index fbd8d122..d3f7ea20 100644
--- a/modules/services/serviceNeutralization/mainNeutralization.py
+++ b/modules/services/serviceNeutralization/mainServiceNeutralization.py
@@ -8,59 +8,51 @@ Mehrsprachig: DE, EN, FR, IT
import logging
import re
-import os
-import uuid
import json
-from typing import Dict, List, Any, Optional, Tuple
-from datetime import datetime
-from pathlib import Path
-import mimetypes
+from typing import Dict, List, Any, Optional
-from modules.interfaces.interfaceAppObjects import getInterface
-from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes
-from modules.shared.timezoneUtils import get_utc_timestamp
+from modules.interfaces.interfaceAppModel import DataNeutraliserConfig, DataNeutralizerAttributes
# Import all necessary classes and functions for neutralization
-from modules.services.serviceNeutralization.subProcessCommon import ProcessResult, CommonUtils, NeutralizationResult, NeutralizationAttribute
+from modules.services.serviceNeutralization.subProcessCommon import CommonUtils, NeutralizationResult, NeutralizationAttribute
from modules.services.serviceNeutralization.subProcessText import TextProcessor, PlainText
from modules.services.serviceNeutralization.subProcessList import ListProcessor, TableData
-from modules.services.serviceNeutralization.subProcessBinary import BinaryProcessor, BinaryData
-from modules.services.serviceNeutralization.subParseString import StringParser
-from modules.services.serviceNeutralization.subPatterns import Pattern, HeaderPatterns, DataPatterns, TextTablePatterns
+from modules.services.serviceNeutralization.subProcessBinary import BinaryProcessor
+from modules.services.serviceNeutralization.subPatterns import HeaderPatterns, DataPatterns, TextTablePatterns
logger = logging.getLogger(__name__)
class NeutralizationService:
"""Service for handling data neutralization operations"""
- def __init__(self, current_user: User = None, names_to_parse: List[str] = None):
+ def __init__(self, serviceCenter=None, NamesToParse: List[str] = None):
"""Initialize the service with user context and anonymization processors
Args:
- current_user: User object for context (optional for basic neutralization)
- names_to_parse: List of names to parse and replace (case-insensitive)
+ serviceCenter: Service center instance for accessing other services
+ NamesToParse: List of names to parse and replace (case-insensitive)
"""
- self.current_user = current_user
- self.app_interface = getInterface(current_user) if current_user else None
+ self.serviceCenter = serviceCenter
+ self.interfaceApp = serviceCenter.interfaceApp
# Initialize anonymization processors
- self.names_to_parse = names_to_parse or []
- self.textProcessor = TextProcessor(names_to_parse)
- self.listProcessor = ListProcessor(names_to_parse)
+ self.NamesToParse = NamesToParse or []
+ self.textProcessor = TextProcessor(NamesToParse)
+ self.listProcessor = ListProcessor(NamesToParse)
self.binaryProcessor = BinaryProcessor()
self.commonUtils = CommonUtils()
def getConfig(self) -> Optional[DataNeutraliserConfig]:
"""Get the neutralization configuration for the current user's mandate"""
- if not self.app_interface:
+ if not self.interfaceApp:
return None
- return self.app_interface.getNeutralizationConfig()
+ return self.interfaceApp.getNeutralizationConfig()
def saveConfig(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig:
"""Save or update the neutralization configuration"""
- if not self.app_interface:
+ if not self.interfaceApp:
raise ValueError("User context required for saving configuration")
- return self.app_interface.createOrUpdateNeutralizationConfig(config_data)
+ return self.interfaceApp.createOrUpdateNeutralizationConfig(config_data)
# Public API: process text or file
@@ -70,18 +62,18 @@ class NeutralizationService:
def processFile(self, fileId: str) -> Dict[str, Any]:
"""Neutralize a file referenced by its fileId using app interface."""
- if not self.app_interface:
+ if not self.interfaceApp:
raise ValueError("User context is required to process a file by fileId")
# Fetch file data and metadata
fileInfo = None
try:
# getFile returns an object; fallback to dict-like
- fileInfo = self.app_interface.getFile(fileId)
+ fileInfo = self.interfaceApp.getFile(fileId)
except Exception:
fileInfo = None
fileName = getattr(fileInfo, 'fileName', None) if fileInfo else None
mimeType = getattr(fileInfo, 'mimeType', None) if fileInfo else None
- fileData = self.app_interface.getFileData(fileId)
+ fileData = self.interfaceApp.getFileData(fileId)
if not fileData:
raise ValueError(f"No file data found for fileId: {fileId}")
@@ -111,17 +103,17 @@ class NeutralizationService:
return result
def resolveText(self, text: str) -> str:
- if not self.app_interface:
+ if not self.interfaceApp:
return text
try:
placeholder_pattern = r'\[([a-z]+)\.([a-f0-9-]{36})\]'
matches = re.findall(placeholder_pattern, text)
resolved_text = text
for placeholder_type, uid in matches:
- attributes = self.app_interface.db.getRecordset(
+ attributes = self.interfaceApp.db.getRecordset(
DataNeutralizerAttributes,
recordFilter={
- "mandateId": self.app_interface.mandateId,
+ "mandateId": self.interfaceApp.mandateId,
"id": uid
}
)
@@ -194,6 +186,19 @@ class NeutralizationService:
processed_info={'type': 'error', 'error': str(e)}
).model_dump()
+ def _getAttributes(self) -> List[DataNeutralizerAttributes]:
+ """Get all neutralization attributes for the current user's mandate"""
+ if not self.interfaceApp:
+ return []
+ try:
+ return self.interfaceApp.db.getRecordset(
+ DataNeutralizerAttributes,
+ recordFilter={"mandateId": self.interfaceApp.mandateId}
+ )
+ except Exception as e:
+ logger.error(f"Error getting neutralization attributes: {str(e)}")
+ return []
+
def _getContentTypeFromMime(self, mime_type: str) -> str:
"""Determine content type from MIME type for neutralization processing"""
if mime_type.startswith('text/'):
diff --git a/modules/services/serviceNeutralization/subParseString.py b/modules/services/serviceNeutralization/subParseString.py
index fd9f54cc..5c92e110 100644
--- a/modules/services/serviceNeutralization/subParseString.py
+++ b/modules/services/serviceNeutralization/subParseString.py
@@ -11,15 +11,15 @@ from modules.services.serviceNeutralization.subPatterns import DataPatterns, fin
class StringParser:
"""Handles string parsing and replacement operations"""
- def __init__(self, names_to_parse: List[str] = None):
+ def __init__(self, NamesToParse: List[str] = None):
"""
Initialize the string parser
Args:
- names_to_parse: List of names to parse and replace (case-insensitive)
+ NamesToParse: List of names to parse and replace (case-insensitive)
"""
self.data_patterns = DataPatterns.patterns
- self.names_to_parse = names_to_parse or []
+ self.NamesToParse = NamesToParse or []
self.mapping = {}
def is_placeholder(self, text: str) -> bool:
@@ -84,7 +84,7 @@ class StringParser:
Returns:
str: Text with custom names replaced
"""
- for name in self.names_to_parse:
+ for name in self.NamesToParse:
if not name.strip():
continue
diff --git a/modules/services/serviceNeutralization/subProcessList.py b/modules/services/serviceNeutralization/subProcessList.py
index e4ac91f7..3d5e7900 100644
--- a/modules/services/serviceNeutralization/subProcessList.py
+++ b/modules/services/serviceNeutralization/subProcessList.py
@@ -22,14 +22,14 @@ class TableData:
class ListProcessor:
"""Handles structured data processing with headers for anonymization"""
- def __init__(self, names_to_parse: List[str] = None):
+ def __init__(self, NamesToParse: List[str] = None):
"""
Initialize the list processor
Args:
- names_to_parse: List of names to parse and replace
+ NamesToParse: List of names to parse and replace
"""
- self.string_parser = StringParser(names_to_parse)
+ self.string_parser = StringParser(NamesToParse)
self.header_patterns = HeaderPatterns.patterns
def anonymize_table(self, table: TableData) -> TableData:
@@ -215,7 +215,7 @@ class ListProcessor:
text = self.string_parser.mapping[text]
else:
# Check if text matches any custom names from the user list
- for name in self.string_parser.names_to_parse:
+ for name in self.string_parser.NamesToParse:
if not name.strip():
continue
if text.lower().strip() == name.lower().strip():
diff --git a/modules/services/serviceNeutralization/subProcessText.py b/modules/services/serviceNeutralization/subProcessText.py
index 7b1a7372..98d8cbde 100644
--- a/modules/services/serviceNeutralization/subProcessText.py
+++ b/modules/services/serviceNeutralization/subProcessText.py
@@ -16,14 +16,14 @@ class PlainText:
class TextProcessor:
"""Handles plain text processing for anonymization"""
- def __init__(self, names_to_parse: List[str] = None):
+ def __init__(self, NamesToParse: List[str] = None):
"""
Initialize the text processor
Args:
- names_to_parse: List of names to parse and replace
+ NamesToParse: List of names to parse and replace
"""
- self.string_parser = StringParser(names_to_parse)
+ self.string_parser = StringParser(NamesToParse)
def extract_tables_from_text(self, content: str) -> tuple:
"""
diff --git a/modules/services/serviceSharepoint/mainSharepoint.py b/modules/services/serviceSharepoint/mainServiceSharepoint.py
similarity index 89%
rename from modules/services/serviceSharepoint/mainSharepoint.py
rename to modules/services/serviceSharepoint/mainServiceSharepoint.py
index 26a68e88..7a8bb084 100644
--- a/modules/services/serviceSharepoint/mainSharepoint.py
+++ b/modules/services/serviceSharepoint/mainServiceSharepoint.py
@@ -13,18 +13,55 @@ logger = logging.getLogger(__name__)
class SharepointService:
"""SharePoint connector using Microsoft Graph API for reliable authentication."""
- def __init__(self, access_token: str):
- """Initialize with access token.
+ def __init__(self, serviceCenter=None):
+ """Initialize SharePoint service without access token.
Args:
- access_token: Microsoft Graph access token
+ serviceCenter: Service center instance for accessing other services
+
+ Use setAccessToken() method to configure the access token before making API calls.
"""
- self.access_token = access_token
+ self.serviceCenter = serviceCenter
+ self.access_token = None
self.base_url = "https://graph.microsoft.com/v1.0"
+ def setAccessToken(self, userConnection, interfaceApp) -> bool:
+ """Set access token from UserConnection.
+
+ Args:
+ userConnection: UserConnection object containing token information
+ interfaceApp: InterfaceApp instance used by TokenManager to resolve the token
+
+ Returns:
+ bool: True if token was set successfully, False otherwise
+ """
+ try:
+ if not userConnection:
+ logger.error("UserConnection is required to set access token")
+ return False
+
+ # Get a fresh token for this specific connection
+ from modules.security.tokenManager import TokenManager
+ token = TokenManager().getFreshToken(interfaceApp, userConnection.id)
+ if not token:
+ logger.error(f"No token found for connection {userConnection.id}")
+ return False
+
+ self.access_token = token.tokenAccess
+ logger.info(f"Access token set for connection {userConnection.id}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error setting access token: {str(e)}")
+ return False
+
async def _make_graph_api_call(self, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
"""Make a Microsoft Graph API call with proper error handling."""
try:
+ if self.access_token is None:
+ logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
+ return {"error": "Access token is not set. Please call setAccessToken() before using the SharePoint service."}
+
headers = {
"Authorization": f"Bearer {self.access_token}",
"Content-Type": "application/json" if data and method != "PUT" else "application/octet-stream" if data else "application/json"
@@ -280,6 +317,10 @@ class SharepointService:
async def download_file(self, site_id: str, file_id: str) -> Optional[bytes]:
"""Download a file from SharePoint."""
try:
+ if self.access_token is None:
+ logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
+ return None
+
endpoint = f"sites/{site_id}/drive/items/{file_id}/content"
headers = {"Authorization": f"Bearer {self.access_token}"}
@@ -416,6 +457,10 @@ class SharepointService:
async def download_file_by_path(self, site_id: str, file_path: str) -> Optional[bytes]:
"""Download a file by its path within a site."""
try:
+ if self.access_token is None:
+ logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
+ return None
+
# Clean the path
clean_path = file_path.strip('/')
endpoint = f"sites/{site_id}/drive/root:/{clean_path}:/content"
diff --git a/modules/services/serviceTicket/mainServiceTicket.py b/modules/services/serviceTicket/mainServiceTicket.py
new file mode 100644
index 00000000..5018196d
--- /dev/null
+++ b/modules/services/serviceTicket/mainServiceTicket.py
@@ -0,0 +1,41 @@
+"""Ticket service for creating ticket interfaces."""
+
+import logging
+from typing import Dict, Any, Optional
+from modules.interfaces.interfaceTicketObjects import createTicketInterfaceByType
+
+logger = logging.getLogger(__name__)
+
+
+class TicketService:
+ """Service class for ticket interface operations."""
+
+ def __init__(self, serviceCenter=None):
+ """Initialize ticket service with service center access.
+
+ Args:
+ serviceCenter: Service center instance for accessing other services
+ """
+ self.serviceCenter = serviceCenter
+
+ async def createTicketInterfaceByType(
+ self,
+ taskSyncDefinition: Dict[str, Any],
+ connectorType: str,
+ connectorParams: Optional[Dict[str, Any]] = None
+ ):
+ """Create a ticket interface by type with the given parameters.
+
+ Args:
+ taskSyncDefinition: Field mapping definition for ticket synchronization
+ connectorType: Type of connector (e.g., "Jira", "ServiceNow")
+ connectorParams: Optional parameters for the connector
+
+ Returns:
+ Ticket interface instance
+ """
+ return await createTicketInterfaceByType(
+ taskSyncDefinition=taskSyncDefinition,
+ connectorType=connectorType,
+ connectorParams=connectorParams
+ )
diff --git a/modules/services/serviceWorkflows/mainServiceWorkflows.py b/modules/services/serviceWorkflow/mainServiceWorkflow.py
similarity index 90%
rename from modules/services/serviceWorkflows/mainServiceWorkflows.py
rename to modules/services/serviceWorkflow/mainServiceWorkflow.py
index 08ff2e8f..a9440993 100644
--- a/modules/services/serviceWorkflows/mainServiceWorkflows.py
+++ b/modules/services/serviceWorkflow/mainServiceWorkflow.py
@@ -3,7 +3,7 @@ import uuid
from typing import Dict, Any, List, Optional
from modules.interfaces.interfaceAppModel import User, UserConnection
from modules.interfaces.interfaceChatModel import ChatDocument, ChatMessage, ExtractedContent
-from modules.services.serviceDocument.documentExtraction import DocumentExtractionService
+from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
from modules.services.serviceDocument.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
from modules.shared.timezoneUtils import get_utc_timestamp
@@ -12,17 +12,13 @@ logger = logging.getLogger(__name__)
class WorkflowService:
"""Service class containing methods for document processing, chat operations, and workflow management"""
- def __init__(self, service_center):
- self.service_center = service_center
- self.user = service_center.user
- self.workflow = service_center.workflow
- self.interfaceChat = service_center.interfaceChat
- self.interfaceComponent = service_center.interfaceComponent
- self.interfaceApp = service_center.interfaceApp
- self.documentProcessor = service_center.documentProcessor
- # Centralized services interface (for AI)
- from modules.services import getInterface as getServices
- self.services = getServices(self.user, self.workflow)
+ def __init__(self, serviceCenter):
+ self.serviceCenter = serviceCenter
+ self.user = serviceCenter.user
+ self.workflow = serviceCenter.workflow
+ self.interfaceChat = serviceCenter.interfaceChat
+ self.interfaceComponent = serviceCenter.interfaceComponent
+ self.interfaceApp = serviceCenter.interfaceApp
async def summarizeChat(self, messages: List[ChatMessage]) -> str:
"""
@@ -57,8 +53,10 @@ class WorkflowService:
Please provide a comprehensive summary of this conversation."""
- # Get summary using centralized AI (speed priority)
- return await self.services.ai.callAi(
+ # Get summary using AI service directly (avoiding circular dependency)
+ from modules.services.serviceAi.mainServiceAi import AiService
+ ai_service = AiService(self)
+ return await ai_service.callAi(
prompt=prompt,
documents=None,
options={
@@ -251,6 +249,22 @@ class WorkflowService:
logger.debug(f"getConnectionReferenceFromUserConnection: Built reference: {base_ref + state_info}")
return base_ref + state_info
+
+ def getUserConnectionByExternalUsername(self, authority: str, externalUsername: str) -> Optional[UserConnection]:
+ """Fetch the user's connection by authority and external username."""
+ try:
+ if not authority or not externalUsername:
+ return None
+ user_connections = self.interfaceApp.getUserConnections(self.user.id)
+ for connection in user_connections:
+ # Normalize authority for comparison (enum vs string)
+ connection_authority = connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority)
+ if connection_authority.lower() == authority.lower() and connection.externalUsername == externalUsername:
+ return connection
+ return None
+ except Exception as e:
+ logger.error(f"Error getting connection by external username: {str(e)}")
+ return None
def getUserConnectionFromConnectionReference(self, connectionReference: str) -> Optional[UserConnection]:
"""Get UserConnection from reference string (handles both old and enhanced formats)"""
@@ -334,14 +348,34 @@ class WorkflowService:
# Recovery failed - don't continue with invalid data
raise RuntimeError(f"Document {document.id} properties are inaccessible and recovery failed. Diagnosis: {diagnosis}")
- # Process with document processor directly
- extractedContent = await self.documentProcessor.processFileData(
+ # Process with DocumentExtractionService directly (no circular dependency)
+ from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
+ docService = DocumentExtractionService(None) # Pass None to avoid circular dependency
+ content_items = await docService.processFileData(
fileData=fileData,
fileName=fileName,
mimeType=mimeType,
base64Encoded=False,
prompt=prompt,
- documentId=document.id
+ enableAI=True
+ )
+
+ # Convert ContentItem list to ExtractedContent
+ contents = []
+ for item in content_items:
+ contents.append({
+ 'label': item.label,
+ 'data': item.data,
+ 'metadata': {
+ 'mimeType': item.metadata.mimeType if hasattr(item.metadata, 'mimeType') else mimeType,
+ 'size': item.metadata.size if hasattr(item.metadata, 'size') else len(fileData),
+ 'base64Encoded': item.metadata.base64Encoded if hasattr(item.metadata, 'base64Encoded') else False
+ }
+ })
+
+ extractedContent = ExtractedContent(
+ id=document.id,
+ contents=contents
)
# Note: ExtractedContent model only has 'id' and 'contents' fields
diff --git a/modules/workflows/methods/methodSharepoint.py b/modules/workflows/methods/methodSharepoint.py
index accb268d..35999e17 100644
--- a/modules/workflows/methods/methodSharepoint.py
+++ b/modules/workflows/methods/methodSharepoint.py
@@ -32,7 +32,7 @@ class MethodSharepoint(MethodBase):
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
def _getMicrosoftConnection(self, connectionReference: str) -> Optional[Dict[str, Any]]:
- """Get Microsoft connection from connection reference"""
+ """Get Microsoft connection from connection reference and configure SharePoint service"""
try:
userConnection = self.service.getUserConnectionFromConnectionReference(connectionReference)
if not userConnection:
@@ -48,47 +48,33 @@ class MethodSharepoint(MethodBase):
logger.warning(f"Connection {userConnection.id} status is not active/pending: {userConnection.status.value}")
return None
- # Get a fresh token for this specific connection
- from modules.security.tokenManager import TokenManager
- token = TokenManager().getFreshToken(self.service.interfaceApp, userConnection.id)
- if not token:
- logger.warning(f"No token found for connection {userConnection.id}")
+ # Configure SharePoint service with the UserConnection
+ if not self.service.sharepoint.setAccessToken(userConnection, self.service.interfaceApp):
+ logger.warning(f"Failed to configure SharePoint service with connection {userConnection.id}")
return None
- # Check if token is expired
- if hasattr(token, 'expiresAt') and token.expiresAt:
- current_time = get_utc_timestamp()
- if current_time > token.expiresAt:
- logger.warning(f"Token for connection {userConnection.id} is expired (expiresAt: {token.expiresAt}, current: {current_time})")
- return None
-
- logger.info(f"Successfully retrieved Microsoft connection: {userConnection.id}, status: {userConnection.status.value}, externalId: {userConnection.externalId}")
+ logger.info(f"Successfully configured SharePoint service with Microsoft connection: {userConnection.id}, status: {userConnection.status.value}, externalId: {userConnection.externalId}")
return {
"id": userConnection.id,
"userConnection": userConnection,
- "accessToken": token.tokenAccess,
- "refreshToken": token.tokenRefresh,
"scopes": ["Sites.ReadWrite.All", "Files.ReadWrite.All", "User.Read"] # SharePoint scopes
}
except Exception as e:
logger.error(f"Error getting Microsoft connection: {str(e)}")
return None
- async def _discoverSharePointSites(self, access_token: str) -> List[Dict[str, Any]]:
+ async def _discoverSharePointSites(self) -> List[Dict[str, Any]]:
"""
Discover all SharePoint sites accessible to the user via Microsoft Graph API
- Parameters:
- access_token (str): Microsoft Graph access token
-
Returns:
List[Dict[str, Any]]: List of SharePoint site information
"""
try:
# Query Microsoft Graph to get all sites the user has access to
endpoint = "sites?search=*"
- result = await self._makeGraphApiCall(access_token, endpoint)
+ result = await self._makeGraphApiCall(endpoint)
if "error" in result:
logger.error(f"Error discovering SharePoint sites: {result['error']}")
@@ -375,11 +361,14 @@ class MethodSharepoint(MethodBase):
logger.error(f"Error parsing site URL {siteUrl}: {str(e)}")
return {"hostname": "", "sitePath": ""}
- async def _makeGraphApiCall(self, access_token: str, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
+ async def _makeGraphApiCall(self, endpoint: str, method: str = "GET", data: bytes = None) -> Dict[str, Any]:
"""Make a Microsoft Graph API call with timeout and detailed logging"""
try:
+ if not hasattr(self.service, 'sharepoint') or not self.service.sharepoint._target.access_token:
+ return {"error": "SharePoint service not configured with access token"}
+
headers = {
- "Authorization": f"Bearer {access_token}",
+ "Authorization": f"Bearer {self.service.sharepoint._target.access_token}",
"Content-Type": "application/json" if data and method != "PUT" else "application/octet-stream" if data else "application/json"
}
@@ -436,11 +425,11 @@ class MethodSharepoint(MethodBase):
logger.error(f"Error making Graph API call: {str(e)}")
return {"error": f"Error making Graph API call: {str(e)}"}
- async def _getSiteId(self, access_token: str, hostname: str, site_path: str) -> str:
+ async def _getSiteId(self, hostname: str, site_path: str) -> str:
"""Get SharePoint site ID from hostname and site path"""
try:
endpoint = f"sites/{hostname}:/{site_path}"
- result = await self._makeGraphApiCall(access_token, endpoint)
+ result = await self._makeGraphApiCall(endpoint)
if "error" in result:
logger.error(f"Error getting site ID: {result['error']}")
@@ -482,7 +471,7 @@ class MethodSharepoint(MethodBase):
# Discover SharePoint sites - use targeted approach when site parameter is provided
if site:
# When site parameter is provided, discover all sites first, then filter
- all_sites = await self._discoverSharePointSites(connection["accessToken"])
+ all_sites = await self._discoverSharePointSites()
if not all_sites:
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
@@ -492,7 +481,7 @@ class MethodSharepoint(MethodBase):
return ActionResult.isFailure(error=f"No SharePoint sites found matching '{site}'")
else:
# No site parameter - discover all sites
- sites = await self._discoverSharePointSites(connection["accessToken"])
+ sites = await self._discoverSharePointSites()
if not sites:
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
@@ -535,7 +524,6 @@ class MethodSharepoint(MethodBase):
# Use global search endpoint (site-specific search not available)
unified_result = await self._makeGraphApiCall(
- connection["accessToken"],
"search/query",
method="POST",
data=json.dumps(payload).encode("utf-8")
@@ -707,7 +695,7 @@ class MethodSharepoint(MethodBase):
logger.info(f"Using search API for files with query: '{search_query}'")
# Make the search API call (files)
- search_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
+ search_result = await self._makeGraphApiCall(endpoint)
if "error" in search_result:
logger.warning(f"Search failed for site {site_name}: {search_result['error']}")
continue
@@ -942,7 +930,7 @@ class MethodSharepoint(MethodBase):
return ActionResult.isFailure(error=f"Invalid pathQuery '{pathQuery}'. This appears to be search terms, not a valid SharePoint path. Use findDocumentPath action first to search for folders, then use the returned folder path as pathQuery.")
# For pathQuery, we need to discover sites to find the specific one
- sites = await self._discoverSharePointSites(connection["accessToken"])
+ sites = await self._discoverSharePointSites()
if not sites:
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
else:
@@ -975,7 +963,7 @@ class MethodSharepoint(MethodBase):
search_query = fileName.replace("'", "''") # Escape single quotes for OData
endpoint = f"sites/{site_id}/drive/root/search(q='{search_query}')"
- search_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
+ search_result = await self._makeGraphApiCall(endpoint)
if "error" in search_result:
continue
@@ -988,7 +976,7 @@ class MethodSharepoint(MethodBase):
file_endpoint = f"sites/{site_id}/drive/items/{file_id}"
# Get file metadata
- file_info_result = await self._makeGraphApiCall(connection["accessToken"], file_endpoint)
+ file_info_result = await self._makeGraphApiCall(file_endpoint)
if "error" in file_info_result:
continue
@@ -1027,7 +1015,7 @@ class MethodSharepoint(MethodBase):
# For content download, we need to handle binary data
try:
async with aiohttp.ClientSession() as session:
- headers = {"Authorization": f"Bearer {connection['accessToken']}"}
+ headers = {"Authorization": f"Bearer {self.service.sharepoint._target.access_token}"}
async with session.get(f"https://graph.microsoft.com/v1.0/{content_endpoint}", headers=headers) as response:
if response.status == 200:
content = await response.text()
@@ -1280,7 +1268,7 @@ class MethodSharepoint(MethodBase):
return ActionResult.isFailure(error=f"Invalid pathQuery '{upload_path}'. This appears to be search terms, not a valid SharePoint path. Use findDocumentPath action first to search for folders, then use the returned folder path as pathQuery.")
# For pathQuery, we need to discover sites to find the specific one
- sites = await self._discoverSharePointSites(connection["accessToken"])
+ sites = await self._discoverSharePointSites()
if not sites:
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
@@ -1368,7 +1356,6 @@ class MethodSharepoint(MethodBase):
# Upload the file
upload_result = await self._makeGraphApiCall(
- connection["accessToken"],
upload_endpoint,
method="PUT",
data=file_data
@@ -1633,7 +1620,7 @@ class MethodSharepoint(MethodBase):
return ActionResult.isFailure(error=f"Invalid pathQuery '{pathQuery}'. This appears to be search terms, not a valid SharePoint path. Use findDocumentPath action first to search for folders, then use the returned folder path as pathQuery.")
# For pathQuery, we need to discover sites to find the specific one
- sites = await self._discoverSharePointSites(connection["accessToken"])
+ sites = await self._discoverSharePointSites()
if not sites:
return ActionResult.isFailure(error="No SharePoint sites found or accessible")
else:
@@ -1680,7 +1667,7 @@ class MethodSharepoint(MethodBase):
endpoint = f"sites/{site_id}/drive/root:/{folder_path_clean}:/children"
# Make the API call to list folder contents
- api_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
+ api_result = await self._makeGraphApiCall(endpoint)
if "error" in api_result:
logger.warning(f"Failed to list folder {folderPath} in site {site_name}: {api_result['error']}")
@@ -1745,7 +1732,7 @@ class MethodSharepoint(MethodBase):
subfolder_endpoint = f"sites/{site_id}/drive/items/{item['id']}/children"
logger.debug(f"Getting contents of subfolder: {item['name']}")
- subfolder_result = await self._makeGraphApiCall(connection["accessToken"], subfolder_endpoint)
+ subfolder_result = await self._makeGraphApiCall(subfolder_endpoint)
if "error" not in subfolder_result:
subfolder_items = subfolder_result.get("value", [])
logger.debug(f"Found {len(subfolder_items)} items in subfolder {item['name']}")
diff --git a/modules/workflows/processing/promptFactory.py b/modules/workflows/processing/promptFactory.py
index 68deb87e..1df21183 100644
--- a/modules/workflows/processing/promptFactory.py
+++ b/modules/workflows/processing/promptFactory.py
@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
# Global methods catalog - moved from serviceCenter
methods = {}
-def _discoverMethods(service_center):
+def _discoverMethods(serviceCenter):
"""Dynamically discover all method classes and their actions in modules methods package"""
try:
# Import the methods package
@@ -36,7 +36,7 @@ def _discoverMethods(service_center):
issubclass(item, MethodBase) and
item != MethodBase):
# Instantiate the method
- methodInstance = item(service_center)
+ methodInstance = item(serviceCenter)
# Discover actions from public methods
actions = {}
@@ -83,11 +83,11 @@ def _discoverMethods(service_center):
except Exception as e:
logger.error(f"Error discovering methods: {str(e)}")
-def getMethodsList(service_center) -> List[str]:
+def getMethodsList(serviceCenter) -> List[str]:
"""Get list of available methods with their signatures in the required format"""
# Initialize methods if not already done
if not methods:
- _discoverMethods(service_center)
+ _discoverMethods(serviceCenter)
methodList = []
for methodName, method in methods.items():
@@ -99,10 +99,10 @@ def getMethodsList(service_center) -> List[str]:
methodList.append(signature)
return methodList
-def getEnhancedDocumentContext(service_center) -> str:
+def getEnhancedDocumentContext(serviceCenter) -> str:
"""Get enhanced document context formatted for action planning prompts with proper docList and docItem references"""
try:
- document_list = service_center.getDocumentReferenceList()
+ document_list = serviceCenter.getDocumentReferenceList()
# Build technical context string for AI action planning
context = "AVAILABLE DOCUMENTS:\n\n"
@@ -114,7 +114,7 @@ def getEnhancedDocumentContext(service_center) -> str:
# Generate docList reference for the exchange (using message ID and label)
# Find the message that corresponds to this exchange
message_id = None
- for message in service_center.workflow.messages:
+ for message in serviceCenter.workflow.messages:
if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
message_id = message.id
break
@@ -143,7 +143,7 @@ def getEnhancedDocumentContext(service_center) -> str:
# Generate docList reference for the exchange (using message ID and label)
# Find the message that corresponds to this exchange
message_id = None
- for message in service_center.workflow.messages:
+ for message in serviceCenter.workflow.messages:
if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
message_id = message.id
break
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index bebaa7f8..597725d7 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -32,7 +32,7 @@ class WorkflowManager:
currentTime = get_utc_timestamp()
if workflowId:
- workflow = self.services.getWorkflow(workflowId)
+ workflow = self.services.workflow.getWorkflow(workflowId)
if not workflow:
raise ValueError(f"Workflow {workflowId} not found")
@@ -43,11 +43,11 @@ class WorkflowManager:
logger.info(f"Stopping running workflow {workflowId} before processing new prompt")
workflow.status = "stopped"
workflow.lastActivity = currentTime
- self.services.updateWorkflow(workflowId, {
+ self.services.workflow.updateWorkflow(workflowId, {
"status": "stopped",
"lastActivity": currentTime
})
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflowId,
"message": "Workflow stopped for new prompt",
"type": "info",
@@ -57,17 +57,17 @@ class WorkflowManager:
await asyncio.sleep(0.1)
newRound = workflow.currentRound + 1
- self.services.updateWorkflow(workflowId, {
+ self.services.workflow.updateWorkflow(workflowId, {
"status": "running",
"lastActivity": currentTime,
"currentRound": newRound
})
- workflow = self.services.getWorkflow(workflowId)
+ workflow = self.services.workflow.getWorkflow(workflowId)
if not workflow:
raise ValueError(f"Failed to reload workflow {workflowId} after update")
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflowId,
"message": f"Workflow resumed (round {workflow.currentRound})",
"type": "info",
@@ -85,7 +85,7 @@ class WorkflowManager:
"currentAction": 0,
"totalTasks": 0,
"totalActions": 0,
- "mandateId": self.services.mandateId,
+ "mandateId": self.services.user.mandateId,
"messageIds": [],
"workflowMode": workflowMode,
"maxSteps": 5 if workflowMode == "React" else 1, # Set maxSteps for React mode
@@ -99,12 +99,12 @@ class WorkflowManager:
}
}
- workflow = self.services.createWorkflow(workflowData)
+ workflow = self.services.workflow.createWorkflow(workflowData)
logger.info(f"Created workflow with mode: {getattr(workflow, 'workflowMode', 'NOT_SET')}")
logger.info(f"Workflow data passed: {workflowData.get('workflowMode', 'NOT_IN_DATA')}")
workflow.currentRound = 1
- self.services.updateWorkflow(workflow.id, {"currentRound": 1})
- self.services.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0)
+ self.services.workflow.updateWorkflow(workflow.id, {"currentRound": 1})
+ self.services.workflow.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0)
# Add workflow to services
self.services.workflow = workflow
@@ -120,17 +120,17 @@ class WorkflowManager:
async def workflowStop(self, workflowId: str) -> ChatWorkflow:
"""Stops a running workflow."""
try:
- workflow = self.services.getWorkflow(workflowId)
+ workflow = self.services.workflow.getWorkflow(workflowId)
if not workflow:
raise ValueError(f"Workflow {workflowId} not found")
workflow.status = "stopped"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflowId, {
+ self.services.workflow.updateWorkflow(workflowId, {
"status": "stopped",
"lastActivity": workflow.lastActivity
})
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflowId,
"message": "Workflow stopped",
"type": "warning",
@@ -192,7 +192,7 @@ class WorkflowManager:
}
# Create message first to get messageId
- message = self.services.createMessage(messageData)
+ message = self.services.workflow.createMessage(messageData)
if message:
workflow.messages.append(message)
@@ -205,7 +205,7 @@ class WorkflowManager:
documents = await self._processFileIds(userInput.listFileId, message.id)
message.documents = documents
# Update the message with documents in database
- self.services.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]})
+ self.services.workflow.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]})
return message
else:
@@ -307,14 +307,14 @@ class WorkflowManager:
"taskProgress": "stopped",
"actionProgress": "stopped"
}
- message = self.services.createMessage(stopped_message)
+ message = self.services.workflow.createMessage(stopped_message)
if message:
workflow.messages.append(message)
# Update workflow status to stopped
workflow.status = "stopped"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "stopped",
"lastActivity": workflow.lastActivity
})
@@ -339,14 +339,14 @@ class WorkflowManager:
"taskProgress": "stopped",
"actionProgress": "stopped"
}
- message = self.services.createMessage(stopped_message)
+ message = self.services.workflow.createMessage(stopped_message)
if message:
workflow.messages.append(message)
# Update workflow status to stopped
workflow.status = "stopped"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "stopped",
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
@@ -354,7 +354,7 @@ class WorkflowManager:
})
# Add stopped log entry
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflow.id,
"message": "Workflow stopped by user",
"type": "warning",
@@ -381,14 +381,14 @@ class WorkflowManager:
"taskProgress": "fail",
"actionProgress": "fail"
}
- message = self.services.createMessage(error_message)
+ message = self.services.workflow.createMessage(error_message)
if message:
workflow.messages.append(message)
# Update workflow status to failed
workflow.status = "failed"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "failed",
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
@@ -396,7 +396,7 @@ class WorkflowManager:
})
# Add failed log entry
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflow.id,
"message": f"Workflow failed: {workflow_result.error or 'Unknown error'}",
"type": "error",
@@ -428,14 +428,14 @@ class WorkflowManager:
"taskProgress": "fail",
"actionProgress": "fail"
}
- message = self.services.createMessage(error_message)
+ message = self.services.workflow.createMessage(error_message)
if message:
workflow.messages.append(message)
# Update workflow status to failed
workflow.status = "failed"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "failed",
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
@@ -473,7 +473,7 @@ class WorkflowManager:
}
# Create message using interface
- message = self.services.createMessage(messageData)
+ message = self.services.workflow.createMessage(messageData)
if message:
workflow.messages.append(message)
@@ -482,13 +482,13 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
# Update workflow in database
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "completed",
"lastActivity": workflow.lastActivity
})
# Add completion log entry
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflow.id,
"message": "Workflow completed",
"type": "success",
@@ -534,7 +534,7 @@ class WorkflowManager:
# Update workflow status to stopped
workflow.status = "stopped"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "stopped",
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
@@ -559,12 +559,12 @@ class WorkflowManager:
"taskProgress": "pending",
"actionProgress": "pending"
}
- message = self.services.createMessage(stopped_message)
+ message = self.services.workflow.createMessage(stopped_message)
if message:
workflow.messages.append(message)
# Add log entry
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflow.id,
"message": "Workflow stopped by user",
"type": "warning",
@@ -579,7 +579,7 @@ class WorkflowManager:
# Update workflow status to failed
workflow.status = "failed"
workflow.lastActivity = get_utc_timestamp()
- self.services.updateWorkflow(workflow.id, {
+ self.services.workflow.updateWorkflow(workflow.id, {
"status": "failed",
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
@@ -604,12 +604,12 @@ class WorkflowManager:
"taskProgress": "fail",
"actionProgress": "fail"
}
- message = self.services.createMessage(error_message)
+ message = self.services.workflow.createMessage(error_message)
if message:
workflow.messages.append(message)
# Add error log entry
- self.services.createLog({
+ self.services.workflow.createLog({
"workflowId": workflow.id,
"message": f"Workflow failed: {str(error)}",
"type": "error",
diff --git a/tools_security_generate_master_keys.py b/tool_security_generate_master_keys.py
similarity index 100%
rename from tools_security_generate_master_keys.py
rename to tool_security_generate_master_keys.py