From 168d66d16741a95895a5ccf8bfc27fb10d3927c8 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Mon, 22 Sep 2025 00:39:15 +0200 Subject: [PATCH 001/169] cleaned key handling and security --- app.py | 96 +- config.ini | 42 - .../audio_google_interpreter_recording.webm | Bin 118148 -> 0 bytes env_dev.env | 69 +- env_int.env | 47 +- env_prod.env | 46 +- modules/chat/documents/documentExtraction.py | 2 +- modules/chat/documents/documentGeneration.py | 2 +- modules/chat/handling/handlingTasks.py | 114 +- modules/chat/handling/promptFactory.py | 67 +- modules/chat/managerChat.py | 13 +- modules/chat/serviceCenter.py | 184 +-- modules/connectors/connectorGoogleSpeech.py | 4 +- modules/connectors/connectorWebTavily.py | 4 +- .../featureChatPlayground.py} | 7 +- .../featureNeutralizePlayground.py} | 0 .../featureSyncDelta.py} | 0 modules/interfaces/interfaceChatObjects.py | 2 +- modules/methods/methodAi.py | 2 +- modules/{chat => methods}/methodBase.py | 0 modules/methods/methodDocument.py | 2 +- modules/methods/methodOutlook.py | 2 +- modules/methods/methodSharepoint.py | 2 +- modules/methods/methodWeb.py | 2 +- modules/neutralizer/neutralizer.py | 506 +------ modules/neutralizer/readme.md | 91 ++ modules/neutralizer/subParseString.py | 162 +++ .../{patterns.py => subPatterns.py} | 0 modules/neutralizer/subProcessBinary.py | 101 ++ modules/neutralizer/subProcessCommon.py | 143 ++ modules/neutralizer/subProcessList.py | 279 ++++ modules/neutralizer/subProcessText.py | 101 ++ modules/routes/routeDataFiles.py | 2 +- modules/routes/routeSecurityGoogle.py | 14 + modules/routes/routeSecurityLocal.py | 40 + modules/routes/routeSecurityMsft.py | 14 + modules/routes/routeVoiceGoogle.py | 12 +- modules/security/auth.py | 2 +- modules/shared/auditLogger.py | 202 +++ modules/shared/configuration.py | 373 ++++- notes/changelog.txt | 1206 ----------------- notes/produce_diagrams.md | 48 - notes/readme.md | 39 - query | 1 - requirements.txt | 1 + test_excel_fix.py | 77 -- tool_security_encrypt_config_value.py | 375 +++++ ...log.py => tool_stats_durations_from_log.py | 0 ...getStats.py => tool_stats_get_codelines.py | 0 ...ns.py => tool_stats_showUnusedFunctions.py | 0 tools_security_generate_master_keys.py | 89 ++ 51 files changed, 2468 insertions(+), 2119 deletions(-) delete mode 100644 debug_audio/audio_google_interpreter_recording.webm rename modules/{services/serviceValueonChat.py => features/featureChatPlayground.py} (98%) rename modules/{services/serviceNeutralization.py => features/featureNeutralizePlayground.py} (100%) rename modules/{services/serviceDeltaSync.py => features/featureSyncDelta.py} (100%) rename modules/{chat => methods}/methodBase.py (100%) create mode 100644 modules/neutralizer/readme.md create mode 100644 modules/neutralizer/subParseString.py rename modules/neutralizer/{patterns.py => subPatterns.py} (100%) create mode 100644 modules/neutralizer/subProcessBinary.py create mode 100644 modules/neutralizer/subProcessCommon.py create mode 100644 modules/neutralizer/subProcessList.py create mode 100644 modules/neutralizer/subProcessText.py create mode 100644 modules/shared/auditLogger.py delete mode 100644 notes/changelog.txt delete mode 100644 notes/produce_diagrams.md delete mode 100644 notes/readme.md delete mode 100644 query delete mode 100644 test_excel_fix.py create mode 100644 tool_security_encrypt_config_value.py rename tool_durations_from_log.py => tool_stats_durations_from_log.py (100%) rename tool_getStats.py => tool_stats_get_codelines.py (100%) rename tool_showUnusedFunctions.py => tool_stats_showUnusedFunctions.py (100%) create mode 100644 tools_security_generate_master_keys.py diff --git a/app.py b/app.py index ad932e9a..cade2e7c 100644 --- a/app.py +++ b/app.py @@ -8,19 +8,79 @@ from zoneinfo import ZoneInfo import logging from logging.handlers import RotatingFileHandler -from datetime import timedelta +from datetime import timedelta, datetime import pathlib from modules.shared.configuration import APP_CONFIG from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.triggers.cron import CronTrigger + +class DailyRotatingFileHandler(RotatingFileHandler): + """ + A rotating file handler that automatically switches to a new file when the date changes. + The log file name includes the current date and switches at midnight. + """ + + def __init__(self, log_dir, filename_prefix, max_bytes=10485760, backup_count=5, **kwargs): + self.log_dir = log_dir + self.filename_prefix = filename_prefix + self.current_date = None + self.current_file = None + + # Initialize with today's file + self._update_file_if_needed() + + # Call parent constructor with current file + super().__init__(self.current_file, maxBytes=max_bytes, backupCount=backup_count, **kwargs) + + def _update_file_if_needed(self): + """Update the log file if the date has changed""" + today = datetime.now().strftime("%Y%m%d") + + if self.current_date != today: + self.current_date = today + new_file = os.path.join(self.log_dir, f"{self.filename_prefix}_{today}.log") + + if self.current_file != new_file: + self.current_file = new_file + return True + return False + + def emit(self, record): + """Emit a log record, switching files if date has changed""" + # Check if we need to switch to a new file + if self._update_file_if_needed(): + # Close current file and open new one + if self.stream: + self.stream.close() + self.stream = None + + # Update the baseFilename for the parent class + self.baseFilename = self.current_file + # Reopen the stream + if not self.delay: + self.stream = self._open() + + # Call parent emit method + super().emit(record) + def initLogging(): """Initialize logging with configuration from APP_CONFIG""" # Get log level from config (default to INFO if not found) logLevelName = APP_CONFIG.get("APP_LOGGING_LOG_LEVEL", "WARNING") logLevel = getattr(logging, logLevelName) + # Get log directory from config + logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR", "./") + if not os.path.isabs(logDir): + # If relative path, make it relative to the gateway directory + gatewayDir = os.path.dirname(os.path.abspath(__file__)) + logDir = os.path.join(gatewayDir, logDir) + + # Ensure log directory exists + os.makedirs(logDir, exist_ok=True) + # Create formatters - using single line format consoleFormatter = logging.Formatter( fmt="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -89,25 +149,15 @@ def initLogging(): # Add file handler if enabled if APP_CONFIG.get("APP_LOGGING_FILE_ENABLED", True): - # Get log file path and ensure it's absolute - logFile = APP_CONFIG.get("APP_LOGGING_LOG_FILE", "app.log") - if not os.path.isabs(logFile): - # If relative path, make it relative to the gateway directory - gatewayDir = os.path.dirname(os.path.abspath(__file__)) - logFile = os.path.join(gatewayDir, logFile) - - # Ensure log directory exists - logDir = os.path.dirname(logFile) - if logDir: - os.makedirs(logDir, exist_ok=True) - + # Create daily application log file with automatic date switching rotationSize = int(APP_CONFIG.get("APP_LOGGING_ROTATION_SIZE", 10485760)) # Default: 10MB backupCount = int(APP_CONFIG.get("APP_LOGGING_BACKUP_COUNT", 5)) - fileHandler = RotatingFileHandler( - logFile, - maxBytes=rotationSize, - backupCount=backupCount + fileHandler = DailyRotatingFileHandler( + log_dir=logDir, + filename_prefix="log_app", + max_bytes=rotationSize, + backup_count=backupCount ) fileHandler.setFormatter(fileFormatter) fileHandler.addFilter(ChromeDevToolsFilter()) @@ -133,7 +183,15 @@ def initLogging(): # Log the current logging configuration logger = logging.getLogger(__name__) logger.info(f"Logging initialized with level {logLevelName}") - logger.info(f"Log file: {logFile if APP_CONFIG.get('APP_LOGGING_FILE_ENABLED', True) else 'disabled'}") + logger.info(f"Log directory: {logDir}") + + if APP_CONFIG.get('APP_LOGGING_FILE_ENABLED', True): + today = datetime.now().strftime("%Y%m%d") + appLogFile = os.path.join(logDir, f"log_app_{today}.log") + logger.info(f"Application log file: {appLogFile} (auto-switches daily)") + else: + logger.info("Application log file: disabled") + logger.info(f"Console logging: {'enabled' if APP_CONFIG.get('APP_LOGGING_CONSOLE_ENABLED', True) else 'disabled'}") # Initialize logging @@ -154,7 +212,7 @@ async def lifespan(app: FastAPI): # Setup APScheduler for JIRA sync scheduler = AsyncIOScheduler(timezone=ZoneInfo("Europe/Zurich")) try: - from modules.services.serviceDeltaSync import perform_sync_jira_delta_group + from modules.features.featureSyncDelta import perform_sync_jira_delta_group # Schedule sync every 20 minutes (at minutes 00, 20, 40) scheduler.add_job( perform_sync_jira_delta_group, diff --git a/config.ini b/config.ini index bc8aeb7f..780a9e08 100644 --- a/config.ini +++ b/config.ini @@ -5,21 +5,6 @@ Auth_ALGORITHM = HS256 Auth_TOKEN_TYPE = bearer -# OpenAI configuration -Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions -Connector_AiOpenai_API_SECRET = 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 -Connector_AiOpenai_MODEL_NAME = gpt-4o -Connector_AiOpenai_TEMPERATURE = 0.2 -Connector_AiOpenai_MAX_TOKENS = 2000 - -# Anthropic configuration -Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages -Connector_AiAnthropic_API_SECRET_OLD = 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 -Connector_AiAnthropic_API_SECRET = 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 -Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022 -Connector_AiAnthropic_TEMPERATURE = 0.2 -Connector_AiAnthropic_MAX_TOKENS = 2000 - # File management configuration File_Management_MAX_UPLOAD_SIZE_MB = 50 File_Management_CLEANUP_INTERVAL = 240 @@ -36,33 +21,6 @@ Security_LOCK_DURATION_MINUTES = 30 # Content Neutralization configuration Content_Neutralization_ENABLED = False -# Agent Mail configuration -Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c -Service_MSFT_CLIENT_SECRET = Kxf8Q~2lJIteZ~JaI32kMf1lfaWKATqxXiNiFbzV -Service_MSFT_TENANT_ID = common - -# Google Service configuration -Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com -Service_GOOGLE_CLIENT_SECRET = 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 - -# Tavily Web Search configuration -Connector_WebTavily_API_KEY = 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 - -# Google Cloud Speech Services configuration -Connector_GoogleSpeech_API_KEY = { - "type": "service_account", - "project_id": "poweronid", - "private_key_id": "88db66e4248326e9baeac4231bc196fd46a9a441", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTnJuxA+xBL3LA\nPgFILYCsGuppkkdO6d153Q36f2jTj6zpH3OhKMVsaaTBknG2o2+D0Whlk6Yh5rOw\nkWzpMC3y81leRLm5kucERMkBUgd2GL4v16k6m+QGuC3BFlt/XeyuckJNW0V6v/Dy\n3+bSYM7/5o1ftPNWJeAIEWoE/V4wKCYde8RE4Vp1LO5YwhgcM4rRuPmF2OhekpA+\npteYwkY/8/gTTRpZIc8OTsBYRbaMwsjoDj5riuL3boVtkwZwKRb+ZLvupXeU7Ds7\n1305odTcZUwnImHiHfuq83ZJViQiLRNhUAFnQIXPrYLwEpCmzRBGzYHaRlb69ga/\nzqUbKnclAgMBAAECggEAH6W9qHehubioPMAJM7Y6bC2KU/JLNS4csBZd+idb52gG\nwBwIEFjR+H4ZjymhAA4+pe7c4h7MKyh0RI/l7eoFX98Cb+rEq/r1udm1BhGH3s2h\n2UiI8qRQh1YRjF2/nrN5VjhDBOFa6W9opaopZy/l8AzsT8f21zIgPen8z8o6GpFg\n64fJFcbqCGk2ykN2+x2pIOT04tmCszrfbXZP8LEs4xrUB/XwlHL1vT/M3EWIKbnj\njDaIMjw7q/KRgNUvmKS6SU9b3fnOLcQCz9f5cKdiWACKIU/UvuiWhWJ9ou6BWLWU\nva1A6Fi4XJjhW7s3po58/ioQfl0A9p/L92lGg4ST8QKBgQDx8LIM1g0dh9Ql6LmH\nBUGCOewNNXTs+y3ZznUfvVMoyyZK5w/pzeUvkmOwzbRGnZJ9WyCghq8aezyEpo2D\nPL7Odf988IeHmvhyZIM4PLJYgDvSwGXyf/gh6gJkf/4wpx+tx/yQYNBm3Rht7sA0\npSaLehK0E0kW1uyBzHGKgyQOhwKBgQDf6LiZ7hSQqh54vIU1XMDRth0UOo/s/HGi\nDoij29KjmHjLkm8vOlCo83e79X0WhcnyB5kM7nWFegwcM1PJ0Dl8gidUuTlOVDtM\n5u2AaxDoyXAUL457U5dGFAIW+R653ZDkzMfCglacP8HixXEyIpL1cTLqiCAgzszS\nLcSWwoAr8wKBgQC4CGm3X97sFpTmHSd6sCHLaDnJNl9xoAKZifUHpqCqCBVhpm8x\nXp+11vmj1GULzfJPDlE8Khbp4tH+6R39tOhC7fjgVaoSGWxgv1odHfZfYXOf9R/X\nHUZmrbUSM1XsNkPfkZ7pR+teQ1HA1Xo40WMHd1zgw0a2a9fNR/EZ9nUn4wKBgGaK\nUEgGNRrPHadTRnnaoV8o1IZYD2OLdIqvtzm7SOqsv90SkaKCRUAqR5InaYKwAHy7\nqAa5Cc73xqX/h4arujff7x0ouiq5/nJIa0ndPmAtKAvGf6zQ6j0ompBkxAKAioON\nmInmYL2roSI2I5G/LagDkDrB3lzH+Brk5NvZ9RKrAoGAGox462GGGb/NbGdDkahN\ndifzYYvq4FPiWFFo0ynKAulxCBWLXO/N45XNuAyen433d8eREcAYz1Dzax44+MdQ\nHo9dU7YcZvFyt6iZsYeQF8dluHui3vzMpUe0KbqpZC5KMOSw53ZdNIwzo8NTAK59\n+uv3dHGj7sS8fhDo3yCifzc=\n-----END PRIVATE KEY-----\n", - "client_email": "poweron-voice-services@poweronid.iam.gserviceaccount.com", - "client_id": "116641749406798186404", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/poweron-voice-services%40poweronid.iam.gserviceaccount.com", - "universe_domain": "googleapis.com" -} - # Web Search configuration Web_Search_MAX_QUERY_LENGTH = 400 Web_Search_MAX_RESULTS = 20 diff --git a/debug_audio/audio_google_interpreter_recording.webm b/debug_audio/audio_google_interpreter_recording.webm deleted file mode 100644 index 862174f4080e3d2df23dc43a6f6d2bcd4a9f9539..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 118148 zcma&tV~i+G7bxnnZQJ(D9^1BU+qP}nwr$TI+qOM(_V=Fq_a>*3PI{$RdR0~Oq|=ou zERo%!Ji*WaK*864*Fyk^;9~%UU|_JDiJ^^P*uO9&00=|L-wY7oUjv*`MwH2N?v&|@ zQmP6@m8dXPst&OIzY&^pt<}FM>HkLfLai?UqE(r$n17i#N?re+1p&?oUe)~XJ^#<` ze3B z3Jn#|lap6bGAc}xvv+Y4H!&~<{5J~!uK%j<$Fu~Carpz}1ppcv2N3+PzyEiL(|V@? z07XJW0sm4zfqi!$AlvD98!*;YK5cx3oW&YQ9w_`Yuktt8+}o=x6T@Q*xe_b1Yf$Z7 zG()u?Nq-dT;CW;VS|9Z3fZFyG6+>|yP;(xYuB`T`5-8V6Y}wf*sSSPcvwRAVEIUG$ z?z273Ls#!OpmsI3|LpLZmL9)c_Rks7B?s|cLHIla%6nNOeuW3%tb;5*VmkjnPuu^0 zVpjEwDw3|H#Vq(uUgocSS5C%z%60>B26B`jv2$9iQ;PSFG6DTFsy!@w^P)GZ-7MAv zDT73IB!|b3z`ZaNPZ#l?%3u6Fk0kbD?}a+H!;}=15hW{G1!$CSw0(wSQf4eaa5F-hXwBMZ}HHp%fnG)pI-*`&33zgF}WPuOa;Byz1S|Y92CLDy2SA}W&IK54J#QgM%nLQf8z_m*y>i>B9@FK~_#Q z*d$>82gf_OJw=mLR{-iVHbP(uaU{d+pTwqWHM@PQTjFP^>_*6E zmgxCeohJ2%of`I>@4NMN&VyH9;D<=Nv39f1J9PVZ26Wh{(|zj!6%<|tchK;7Sx@-i zG?s*3@5Ud$PN7g?j(MR8oDaOkq*m5pBUMi~@FrQEo)u?V;*C#OBZEpDN@`slt`>7F zrHUoQv00?3N8y8agoWtim3HX^SsD{L(0lVf2BO;D_0Q2AB zHa$H6RpWsWnju%>LKPA;{x`Hw-Z`IuG*vz5P|CFi58JAJ<6Pg;aEDAnEyt~?JU)vt zrknFWOS%wCZRi5W#QD()Phr$rPuG~?>&cH`? z<5l;Sm$W0L$iz|A1o@?+&j>j*e^LKa%>7R>^b#PSGg!85=R{@p#T{W~HZL6$*vJ6S zdm7~FVuA@lh6@nOz2V2J*UJYVE!BtmjmHaVa185UM3-QYQ}JoQhM0bNNYKum-7jqeJQ@hMtUft2EU~ZQ|y%sVf z{!AcUJ#+!4d0&lk<91A~fijYcr;W8%VO>0GEYG$k%63e$qEgsl$MN;_Wk~fhIAwyE zWCgg3U&z{gG{02#QBLG&IMO*GA(By(h|HLGp&|l%cVwvWcH1Ms9OhJIG?PACQcXia0LZE1>EhXV_Hn z%4BW!50nXq4Yy%x&zCut6#BQRXp~2Ns{Ty$E!%W?a|{n6WB7fv5uB8OS))CDq1e#+ zSQjuPuRyM_j-r%OcrC=(TboEh{95L4s=A)!r?mPrH9f0tG}9cR`}V2zTwb6Gz;M6_ zWs1S#;M*{$#dKKmaaIq(SjrqILsY%{#|EOl%3E(N)Y+1KUq!imv!RC1WZI&I48Hei z%rbeq*OKGoa?l-JS1|*FA?^GcL5)3~%wZL$O9kANjMFCFdqeHztUnGnlv{9yu!C=E zqbw9?seWNtc)w|?}6%b|!*u~76 zh0EK_n$i1?LByBK9>LItb{DA;VFoNF1qGw=0M}4={OLl7X8{{M7HFbs zahVU|$n9)SJcYn`=KF=T_(i-87)zU(U@z7X=F%9wrQstUlW4ZbTXfhFvz5FZkV)Kj zG)YQNdk0eJCwZVgZH}ABw+wk?Q*Lgx;C0eCY#%Ru!EC2;CB5TN6#o5lL|=Ofvnhki z+`pK(30?4zzpS6t%X7hU0&ffPS#U=M2+sY^Om-8C_thshVUg=N^`ez8+z4^KGPQ=& z=u&R!L0h+T{HyW_L6ozrY@oM^wUn)OQ+L9p zP7wOu#=~~MbGl+y#*y@DzD)T;OZya7zSXn%3iI*2r2zEyc;?BOtX%MUQ;b-)e<=XJ zjLhup({piCa!>BiD5JG~GWk{i6LR;jxG$(>wcBm~jRWCgkztsS?7^#%O?iQzHqe5< z3em&nCd^<^XgJxWiTBij9h}FU4>Aol8poxvK*-Y6Zbi1r{xTgM{N#f;w*%NYFF0Ci`fKj<{Do(jd zN0{8ov{1ElFlP}7o$9#JHEBSV0a2ti?k|odEigvd`(|l}3C_+hpk2DgX7-=kx-h==W~LJgb(swMgd5c(92fz*23UG{S0g*DR}c_5L2J2#XP%tB zwq$`ZYM>Qs2peqU5SinK3e#SF>s^6;g)lfOi96$0>As+(qCOJfdU!V8Kb11uwx**Z zq(i0j4!FpOJDSaAlx&k{b5rdq#;=Zz_Q$tg4lGff7 z!SyU9@Fnx@ZX+zI_?5kDJMxXjfN!wd|0!<%r})Y5+FxZ)V8%gO?B}@AlaZvXGTWIs zhJ$IS* zbbFQ9V$I7no(@~Sy{=>Tx?g|kw4UPeH0xRsN-f1`H{;ayd90DLSA%xF&>qpFqGd-F z1>IPA;WclN`bkY@ZV!0LnVPP}^ZEznWmuSJw8s~4CB_q1ZTYJ!iWrFiu*=VHg$ z1CU2g2Tg>){U9DL51xqlvf7-bPEZs%!5!J9D7J9l$K;+ZO>1T1M+Y<8I-%_x5Pk5G zqZ%4?z0ya#u}25o&yac?CW1M#P>EWdz}^z!eZR1X^w8@MrV!$0G4k=iH>?r)If{pH zaO&v%Ewc!WI2dfO4dDBJ3dj`ZfPDbBiX{>B!dqL`1>miuk+Vz&C*!lGs}XE?WMf_N z!6D}b-;T||3})egDC#C-#u+Pn6M{SgSX}dbY*_$F%LNC>4iII*5+lhlAPO`}tRD5U zYbH>N{#V`OI60+1{$jzwjVOIcnsILE;@s5usZ-w)FECCi*)I*Q!;++}HDpL|zxr`5 zI#T8HM^9JjUr{xx^n+)&dv9*q75b58mlT^ly0*ID!OaYYp-F9#?~Y2maaYaH7#J%? z5Q@EZ-!0{$!p()FR!;lSPqlr1=(iSbw1U?I<4N^~>aIV)bq5pzLkBaZx$A#~ay4|s zQU<@ZE13#6$YEQ?YlE8GOWKt=pa1s0s+5s}7(|#wOnpPk?eu|yMi=q*C!0oH;T<6i zuQp!xl%dD?2>{O|hG?8eEYd8eUA!}2D|4-S*#*{{A8W!bZ}c-=lg|D77Pn3CLDu%=-@+_9Dk#hJ z)l*2pnbPOmif3Q-^Hl*}$tlhOhyyqn2q&%TB!6D^Y0)s;)FykzSB9x%-2IunB3BI` z4(45>+oc)cb-soebI(6@w6c9z&fWqrMX&xYIANwe)tG-kpCVa>km-FXUnPn@2-{U% z>SY4LS3}3~M6%RYY9bm1xSDy^b&th(7zhWSFLtOTDP9axliE~LNYtA>4p)2zN(!() zGB&Fr;k5Dxue~Mo?x;gqFApHjTf-t0c?oE4I@BR0LlQ@q1+t5*?EPU_qMJ(znZ-H& z>3a=<$Glbi7CV1M^g0M|*Z0GV<{|Xn;}BEri7I2f-GJ5*UwTFAQ=NfgCql@yy6__W zPx0+P#o$-&frOpZ(hrMLLxHeTOvS&Hs2QQbI*0Ixb1R+U32)>0_+$8>rksxIVECFr z**Plf*m|-nE0W7T+bGt??9-~(?Bf-)YM<>6%OBEHL0%{iS0=Y=7UFVv)}BCNI;pRS zZPwDo;=h8@w{k;GhR)h?rBB^}C$=2RmSU1DYNx@rn+sT%&Xmy$5yo_MSs1f?smrox z9$Qv(zVq8;s)&x}qX#cS*;&1%XJo(5V<%U(cs%9!&>}Dwi!LwKPeD?Dyt4~ye8z5G zoKlcCAE6O`LQT%-v~vFj@M9K$y|m;ruq_MU+Mm$9LNg0!@Gp=Op!=Eiq;2B}$^ss# zo*XJMGF-glEg@_Y->1wow$B*KyT;o>brLn0qmMH!F6-k!apF2BA-|(wd-qM*cfj-~ zh}g*^8(Kp4pd6I=?+(vzmy)exQD;YCc2JC*v`ajc&`N-q=T>OpjAP~(27picl(ffO zXG}I|R!31$;%?!!8=l7U?X=yeBxH_JjdLSSW{F{~KLNLA9R<6$Y*i*JF==XvxuHs7 zhJDqcIx9Z%XKQ&HhYIX#tlgv%aBHSInH6%mrAG$yzhTyd2U&EiJ6$x_k}*Yx0jPr?m6wNS z>qJkq3`80^uRpl>3YXc8!l6?wDGNfb!nK#C(5ayu$Sdp7Y&7q-fallDh#*b>RN~zECvwMI zhE{7mMda8(sw$5FVhG@n{0od_T0m0Ke`(W0L71P%OYXx%GjZ&~FKwI90&vWAZZa{- zUm-vxxg5QcXKIHmWA8yzUmdTw`8V6zcs#`I&zshASXZ7Xf^Ntd@)0QaR^%|ESRVke zS-1MyH;9vEeeLEfNOR)(Mq?|y?|h02stNl46a&)yQ@jBuiX#C-iJTSUmVR`AIAbX| zsr(A9YQ(YVPRdk5I86UzT%OF?-kX2SUT==%M8S8<*A3kAEhnf6`!N2GJqR!VqR7 ziOGgG@OSAM`Z2M4t#OVU-AlWO za6}Wj5cWWoT_qaExFbf|e-L^Ovk@d|dGm$Jgj?8?*k#RD7821we4d2JQ4K{tR?tGG z$#3Iqw}%L;>3ckUBgP6!;8kCE$GvZA%G$v|mC7PQkv7qUteO~bG{_+Cpx5$Vuawa} zrVmkGHGd?P=2=Uap|YlnQ%Thu=kH;7BQvm>KUJYGi<3&WGInAX;);rD*RL?ZCWYMz zyks+bQKyKqO(A?tFx>QwFH_OJ+m4|i3NeKq#yd}WwC20Zxe3s;*VxK^;0mFs04`>T zdvP^AyAz8TFa@=&*pLO4)Hje#dW&JA6-rWl9wxr^daB||_TH4S z8(|A%e_E~1-7AEKcQtX>x1x59EwZsu30U}nB}VjV>shJqE5`Kg4K zr-VhEDeH8V9E7kqk+sZSezp*`tZF@>A|DY)+Z?N3tMSI(~W}=UHY)7LzKee=w zUIliKXFlQ0sIPhjgF(i@@jzOsDVnSRBZ2ZrJ2aI2;bE5lF8m|d0v(a{#`2qA7zr@z zA{tZC_NUL(E!a)dR?mmQeS^#mlpYO7*DVpIEO$TDx;aY`1sm6eKK&OIZlH)I1h#V7tm^9|-M+-G<+xOPqoA>3v`Pxgp*O!`TWm;3vM?L$1F1S#=l-B?& zZ65wlvDrVxz|u(AJd7I3p?3PTAr<#57;^8)JWDJu-qj-zGzYnW{r2~%4;_)(ZGYGu zo@$#9?K99V$Fd0)YDY}x18|z(_rLCw36yBS`L%@ zEn$tkydpZxrV&fo^pgzG}J}dmXzMDe*UU>i^RnS zQ7s+=Kd%N|RO9uU5QcSsf<)@0WFD&!nfW2P@!vG2i=+^}V`6di3&pa(qq^CKACNrD zJzB2;Yei~A+`o=ezHktXR6waZ0=GG0}C`G6G z5fP&HDyMx)30 zF;aG_pU$JTz|)j>;~)5PylQra+-=Rd3;$jVV8OrtMc=xw&&H;x)yQq{b}HJ4@?!iV z+hi?$MPbDR_E1zXSL>%TNB?8q(u7GxDOi2L z&^xdSHwm%7NDCelNbc4hK7tgj4xo~|D$lI%Vqy6~Y(<2Rwt-7oN82p@pq~O+3ZE$3 ziQe9CB|(|cg)M8aT|?2d@R9%g?Eb1^`nq zQ{)ypDF(y!eW9?#j2#j5n@`A?3Tps-8#ZbO!{9%~MgJ6gR$bCO{oPc_+stQk|3PIS z+Xfm8Y_~uH5jLy2Tq_SR=*|WC_(Xa$2u3u^;FI99WXUOmV_M{`Zrw-xyKb?rm%elr zLqSbeB4%_r^0hJjynDNDOENeN*)<8iB&<`&(W*RiiSix(Wsufl)3o`HT@i7bq_?r9 zaQW6$j@!XQu25WcwXLfYgT#|Tr2V%pdit%HcSKgF={UmbGserRqSf)>c!KUe@k7+_-DrcXDj3tWL)5`76;1|-iMHu>n6?8+>-R!~{Hnu02hGp^@i)ElF_vQxDR_NJ;=u6OSo zR*a-o*%^w|-^3b<)5kThuF}6GCjGsnQfzuP@))#R3eeBmyc@c4l*q7#GFW5(# zs*wzSX-)nrV<9|i@~`IZMh~73m|w+ z5pnG)Z~11tXO0`aK)UrjS}^-(wUD=%2f-2>hY0b#z&1?Qit5*=84C0G$U`vaLO`0m zPRfHI+vD~&5L|S8(1tNN6YJxf74BU81+u1{<5x7r=e17~>ZpK7(8b;b!3(m4+@Z@w zJZp{_6$ZIVb5v6`GI`W%O@O$)-w(Mk@ILsqJCahED9mT?0U-V(WN9OB{PI#2{ZPyC zeel;@UAF(Th&@z-j+awZhpq40=Wl((*$&}}Ulz|cla@_@j2vcQpzwol@Zv|*IeMO6 z;hZmsRI5B(B}?lY6;nYq4aBM>*G3>5cRQAH;J$wiaCUblTs{bs>gH2+<<$lBAnRkK z@KYicJ0(x4zv(N~o<=d`q;I1+?5j5#!npcp?DF;a8`q)@ubHvPC7#oj^&LpVf)0Q? z7U=bY-hYbs{wW3@Jd{bfjPxugJ6wnP32E+_1Is_MJri7!cDxM0tGRGU#K@Qy;~FFi z>X5yj;%B=5gq5g1!9VfJD^YvKJr@CGg9$D=vMUE(6}`L=XXtP86lV$Xa@bBBE^ulm zum1b7CYFpSxj5^$J##`kPL{ve3)|h=65+!3Rqs`Ngca((8g~^`QB3{}p~a%s-e|qb zxZz7kC+LB9Ur&?+ebt(H{}uqap`92Wjm32G5xU{admXE^^%YjU??jTKfV>PkCDefijtwUkP~@q~<@+xVtb^ zsCA)VTcaM_C_~&EnNHv@`4D%_tuHy~D&mK=e0^2F2Nqg$8HLC=)fq4+lhffqITE~d ztF^i65xxWEbwTq9XiL9^+oJ7Vjc6M4rQea3@&{I@mL$-hV>;)etET_p%2hZ`nrkOf z4%JuspqPVU_#zAiAvueI%D`N-vlc-x8I4aVfXV~eIaVZJzRT;82S;7-Y~WIsUpafR zrwn?E-&z6_^G}*$?f%S%Bj^e}@~HSwJ0i5)?XNfvr|VF-DpWAnNWE>t{!jBkX)B|k zWJ1~U-?|r1w9d!KCicJ?ohb--fYgFywI7gdSowCQazW)*o^koyodfcsQv-UG3Dp4iYl?Fr1Qz^ z+(a9TA1Y$_Haer?*X*2k*SUCrFgAbiI$T>P7mlX1k`L>Q*=?@BwFkPw{xsI;LcfYf z;CqxR@qZA>y5si_x`faVCImgJA;SW^wV5Qx6S-oacvLDc4u+3A_dJ-;a*8jZuAG!; z?JqaRC`t|>i1tKqM(<06+P9}5S4M#&TC5> zwU%u0rlj!?yIr}2DV1uu>K0K31;CCnl1TLk9MK$*@-S7E?`YOx{T&Tc!)yfw7?M_< zsTOrhWzh{X?!!#{CDXyWk;C(OaI`M{h=Cq#yxC?;pCi4B;Zj@*ejrerN(t{o_bEzK zgKORB#FL7%dl3zLN0&r^OmhAFd9Wt~@B}gmo*mP#|ECxT=bvK19Mf+my)6trDV(&e z=39G47j_sv>}%#@d>@Fo7cNVK9X)=~ZTT`p7Xu{SViYvl&cR&n4IFE%QaiPd^&N}4 zserl!{;S%3`8I}X$AL*~4jc%y)Mt304tFXHw740@7Ritt2fy*zn5-vZZ7N_j(H>vX zMCBFiM8mF(1RMV~9R$-ZlH~|CNWowst*HzrupI))VQD`CXKq6VnfbrNNs-ITTc)!Q ztOr9H_j)i}p?o=JlXi{xlvj>2Hl#v>F^of?t{;kJTVbvo+kYBC)AW139YG*!Vsb^x z)hZ~oK2YfJ11*fJB4s>iP*hvuLVz1J#rK8nOXk!Btv(Q*!@*s35)6C~668qHzZ&&D z5LCX*c_kz6$D`Eldq-8jY5AREP1whQV(S0>3diei|HNAz+T4*OzA$)=QM3DjxL&hF z=RyEs{E5dX=fe%Q4g$C&UX~A1fAh?V5R8%7ai=;2kXH^uh|ojbBZoU3iX(C~)8F@F zhy}9ze3NQkj)2sus*DiyzB-X0;XLY-7wNxngPBruvg~-YaN}{ z!ku6IewsgQT#s1V>g?!$9cv6Hw5WqBgZqz?F`&OrjI zwdmSQ2{?kfct4_oj7x{AlE=DZ+_x=)`W9$XXWKp<;)CH)&C_ULk?kCI3JI}u(KDZQ z$6O$YhWPt_mfdVWug+Ws+UchiUC5p7(GH!1vpYRA4ZX+~mr#R~=*P!P>yO)$%BwC8 zV5_J%PUqbP85osG2-NDEajd%06W-hUZJPK2u$TMx6K{yS=Zy1BH{O9em_Z$q!i{11 zRyx>my?h46zt(rA(9KVxD-l9-sh0lqHKF}d%NEGM|3W$wA{oTY#{KM59)*4An6rT5 zS^tdnA6LjTS=k+qUs2}+6&hz?f!30o6R0+kn9Vhiq+#0Ux2m!_S0TnuFb8k~whiT?4YVMuJ1=y*X_a!~-}u%CL^mn@wRk z4kNwGp=2XjY~~PIE~?Og#I7)c4(|_Ew736x=b=qdVQhd|BRd<>nNSK9+wPr~2plDx zabra)-zdeN?*sW>o?^XFpUBjq#_$ZDYQLpH_NEQXBK3-QFHUYHk4_K+h6XP~V*LZH zO%wO~Lop+WUPD=_KApI00RQDcvDsr$nMV83rN~@b`w2dQmHgsrmR(%_-~jA59FgS; z0J6~&cbK77`mi~m-b44wTN}hx`P&2`F=mu^7f|jKL2v0!`-kxtGCCFwmCPXFSidpk zL@NI=j~9ZU+4Qv3vQk0A+YCc635F{xiCAhfN_I{vv1=fV%i6g5wubV&uhwf5A(sAj z-|;JglW{!>eWqV%6#I}q{>N|bA@98isk!I%U7)9_HGI#Y-kU8d$y551;W@)yQBdcm z9$lG`Jd)!$(MFSyEe!;!fqDw#2iSb9h7*AhZK}Mu#bNv<<@l4^k!)ayM9I9ht`zC& zXG}CwqSZ%1q*ea*$L0fZCB^cd)caK(RBW|@K$K%h?Wh6D>_5dx{}g)>5NGWh@b&u? z9|X$*XJ^O(jED8*lU=QaN+tcUtg#j*E8h_{{lQ?E%Lgc6z(3go{?Et>T=$7j6Hj zgiUAe%2Qm6>yJ$CA}?8%vel3;d1;pNdv33%f?NE?#LL#p;fq0Kc26c5$GJ)`Q-?`UKo`HSYxPkT@=7dEIfc? zoY^1?XEdl^@!f@{co3QGL$>i@(g$>KxZu_?xAyhYVHOZFv{5}RIxy%548c6`I_Wxj zsg;=vft>~+L-q%?`oS->{wKqe#63dHYb|l#NqaM*#6bPKcOna#j$<|C-|{mlQr3ZH zD&PkJMgkOKfO@*Gv3KH9drk&&Xy1()z|IoCdjE9GQL3-EZmjNk_nyMz$bDTq?YZN3 zf$T4fu6RSF&L74~nL{Sh2K;^TZg1zjn^1{Cv`&b0s?%k{byDH*Pw)LRu7fj3K-cs; z!|~OKSiod&)T@f?(Y4(v8p-vnYQ=|jQx^_@o-8~F>XM}pPC5-IZQ!} zQQi%0Yun+#^XNE=7x=(W-VUHrSl-BRT*u=otJf?B8p!zy zWmK!ObE|>rx%Uaz{}j*vQ@k5;amIb|(C}lxdHmTG{yHnMC1{@xG>EBD(%?l&Aq$DB zjMsuEzL@;9AK4|Nj&sow;PE`y-n@y`lwhJAcVB}{nx*lrIoBQO=e`^C5$MGk@<77ehQ-s zn!i2rsSjtT6OTqOrAZ7jID=0;(5@$xkrDcEW5v`Hj*JDkx>ExYWJS7kY($RoikhQR zVO6d9=|arU>7jvunIg#aS7zX$9rq*ENES@T-(N6d!a&JfoD#WGgoud5 z;q4>20a%FzC7|fK0*MOkwia+ch4ul{5hoiOdzHr50ivg!aKzHf6=xQXW^SyiGj;fq zP`j%zdLfY|G4q4!;Y%g}jhH(KJ=1|TNb|(4i3@+yE?!E*mt{84yV=jBn;wjRy^`W2 zApt&v?mUa#U?gCPK9_TOrZ%HXG@*LaoN~eU+Bb4o&PLEAD}`6h`*rZxGKW15XkVpt zzwjsoHj2yluWr(+18MnWO`$t~@O0`?CA0E89q5Ul^4!^V5~k!z1!An_xIlGg_R$cG zE=pTui#And_I@L9*!1)H?5l+7E+X91NqbQ^tLQ<2<9c<4UTV#8p8EwN?Bpcb`a#Y29BVAUy_(PJHGS9&d7yXOXB9h9^uu48A+s*oK+L60TEJaapNi+ArPq*UX1_G{ zBVQ_JT=f1r;z!|fULv`b|Fm%ajKD`$=Yb1Hx&BDY{d}^CZd!Q78t%LkPS_~@3Vt;` z<^fbkGt>!!O9S8O&&N4JRCvTbfJQA@Ns6a*3aA~n#tkWJ&I^&GC6GU zSO6BuK|e&#M`?W!tHM}3F)SAhd@X-xcYE5ME-ph+lf%v^fDaoVJ(lns4OcQ~Q@Sz) z^t3kd6z*~_-8kc*2u{xvSm`#t65TDJs7`O9_cw?l6U$Ye)?D#(s|>)Je1~8v#(sMW z(eG+y1o@?iS+Jnc2%QTjOB^^GX7+$dsgITTDy{~XB!ybXuQ z>B_H?yKJE+H$6v7W8t}Ju>z4YL6|q@94ovsqp&hd!Q>*@7s_L? zlR^QgoHvVF!B?>{|5xtwFPiF7$+^ z=6W?G#uT*3hy|W=k7unwQ)}73=|j&Stz~Q++&J;I30zMO)>>UGDz~@mj33pW?0*hm zwP5FF$pg$Z0WFkd*0*t9#Lc|gOwt^F7@|pT)&LdXisw_l+HpaFQj@1r0I16`1`DRa z(ebj<6}&!@>!@SuDvg{T2Lcsg?rGMl04u!KA0$EpL_swwchmpFDb&d42+q>ehS_I5 zNQLn>JC9SlqfV4XF`jSmW}Rfgk9Vzi_hE-enSq=II<@1Hc}o1JA`YFQ0~?^-i`GzF zIDd={&ZPRrVRl8o%OWW&-_<|9);3)0kq9iB03~=9vwMe-C;{2z-f(#)w1BRamfBQ^ zkrPEy8YF9>v7+Tvx3YiOMYM2_zHTh52@v+l!}muh^zjcMgU;TCaR<&*L8fkhf>A1O zpr$Hk;zd(T03yYUjtMUX5nA;|fA)1K^W|Ap5opwOkI5Ic!}NoBFd}2aOnsz>J+N_` zNr+QcHbsCW#Ir>6I#Hiw6|YzUnkz;9|83$8vJ2_1VisKxVq7=OD*mhjpw; z{~Duh_6;V~ll$-;Q#SOX`;hHY`lZ&<`Vu%8slV3)8*@bipBmZ=HZBNMblk!Aa4d!EO3Qowuhf$DD|qGm7*C0funkG zQWhg+Zo5@4!qEr*owhXE{@s1ht+V2EJ$=@r!jtC$zLheso@=bYa~+z%x~`jw?$I&z z`SX;$82{(ybb@BW58)VcRdis=t9I!YUmFbzui1po1LlXywcnf!^kJV?J`$XhQ~EkD|k$HJ(1zynQ$?~m)e0z#ywU>!4BmsD|qy&T>8&$Vh;^HFP! zh4XzFt+~l5?G#7*@Yuv$)`lN0bfY6RN%;QM%9%&g{lk5mxfFT3OqvY);^^&?^Pc+-LO)UYu#`_sx3D9c{d` zYw{^ZooeB(^q!3bT6*!$p@plV04~i$nzfT{jB3A2xL*#5zgF4Ld`c&J!`HU0Lk9ma&tGKqgR*ZQu8=)sYaRsutUvTd900*8S$?WW4jCOX)1 z1#Z3sgxLZ?uuXBQSs6R{~+f(czVW#U z%S(Howwx1Vl=))zD%@ z$*M)(Q~24kfEaT4207|z6SHk<`f@W*~n110Hcn`pp9W7VRbr zN#A1!xS;}xkB8PF_y~c21*iIxF!Kf6K&P!&2?S_p(I4O1d?HeW2F7aK z1pmakDN!n%qkIeMrHvN4yMaMHTk9GxlU%stVTO@^9XTIM&hr}{(25cbG|bbgNFwJ5aB)Yn7WZ-Ux%%+9n-6nEGKL@U-VbZ<9(uj1EALMF7RsK3h+k z*=ER4phZOPZ+MCzl8*ie*CL_W&JYd$oy0WKX=D#Vldm_un1$_+j-u6nii7^A7;~W* zIzPfLiL|V$5Alx9Vob{|3;;M*jS9PNgQ=OhsOH73Fn~(iER_5HOQj!Uei(~C8CZPp zL^SGn`55ep&3qd)L+rv#$3FE-;`aws+hC3kp(tyVAhQ_jUdpPX##oViE`N*ZJ^;ze zklPh5`-QaTp8l^Tjl}~%GcJ>~fGQ1OcC~wFr8v z8O9!|jn^5TF{3yb37Vbzb?Cq(-ev~9ke-?7@X+eZ0M`nvHOH%iH05|4Mlymt4X140 zK$uKcPjSW0^`#dl6y-^9W$XOGJ)5iqs|+u#ocl>ZHG6KK;F?z#m1>U`Z*A&K;-oNl zsRpqV|G3w5?;$9_6NHw%BFBEF84^zAq$IkPbEa0wRJl2cReV2FM0n&8m~&f| z?w(~dD5s>icqa`hi|E!r@W3bgqI<2taE4dFar#}W+fk>ySvVO67dGOmO6O9YP=JI- zjPV?rX;N4&PiJ*<1o9dq`w3)1X)?OO=83Dc?Cm@tL(!9w53n`{s=;$(d}vQocY?W^ zM9<)8?+IuhzoH<{IT<8&P4R3@Ul)E4D#bN*uD~3|RPgVKx-z;8QUAl( zuOd6f!8GN}Bs|E2-MH{9k=&#nG4cS3mektYMU6hHfSbHWlZdEb{GyYi&LB|X?SL9qDU4=WDz_W(aM z2Vb^rG~PZWFOzouoyV^b`~taB5qZQl4?Os`gL?o3MTx=J1>q`C7wC<4CGDk1N&}-0 z5gA&kL46AGG~Ao-Fjzs9nqgoZ3!A}gLXZOz31M6fQBEQ6AM-#p1DiCO->vEk*MAFP z3VV$mXn#nm`69rVMsIha3@QFo-2YE8ml4$ET?1`w7qD$OrD%E&>NnoCY)9zM5=o=L zROpzC^wIs1cJO@FRv3Qqb}lQ3MT^_(kao|W$+Sr8L-;rgXfTVa_U#;TI9z2psZGfgq#hgRqO7pwPpa*ehOsLiYh#LvVj#UeWF46Gklhz6pHjwW7*kDcVF+3g~5ap|J@XI>5RhP3f zW+!0Czd48d0b_qTow`xN)?fHo@u_ZFtso<+{1|TrNZp*EkDHxIxN&+>{I%io+Vb7{ zc~Z_yl?;B6HXnnRFu6@&dSM4YwzEzi8FLwvY7wHbPRXM8`i0PihCjRp7JIR2vaubk6zH%oWJQSUGCYWq`v~&%B@T8&tOp;yiHifbQ(Wp% zJgD#-*PLDLFK#~J^ane*KGMc2SY40YXZT=uK~D&}K$|$~9}4P-rcG=-btcdK2`;2W z8aI`pHmpNXXz?+Ym~eKg1^+hV|iEIgtvE z?R3THAa5uRmaLW-pnrWInAJ>iHZ(XZcSWLRNncI09@CRxz|Zsc$lmd`De-jMj_A-) zTbE}$&IMIl4Mpm@PWz2zrT{}2?+4>088Ar^i|yk#Qw!m!N_=)t{2AlHLA`ig^O5#c zJS!LW=qa~Xy9R_P=Q|CGa7W~6Su0Q0w*olg_c(TI$&cWl#4ZNj=uUi$BQ@;*{!ndfPHOyD+}rQ(M=Vn+2wf8+NL- z!1!*AvoyGXjNW9*?_nHSU9l&ckd=y)(5yw(Gc&WJLUT7l$%hOvJ3} z_Z~1eM%bOZ@M!2zgH26}Nv$@A5EMkhNQny-cJ79O% zo5ky80se8 zD#ENh2nh&`5)iRA5$x4Q-}f3g06X58V)X5bYEE}}n^NycsGzkDp;7ZgitC?W zSzBbBk?h>n3KkE4oT=tn_i~f$ z^d=~YWkyAjv)cjk`TQZHx{-F(5}Ru2usjm%UDsUpn-rw1*yFN&@^{@@vQTAAg6QC<|$$T>7gJ2F)t7vuSb=jJCf92j`F*R*Z zQB%Jt4~QffKwll$5%ROC1Q{PgQF**;$*{#xDf(nirxBhJ&rkPZVHPegWz$!5b%W>y z-+!Mr__8-9^@yG5(H-80|B@dhfv= z1rm?2d%C6y!meAq+G-M+`Q%|kZuQ=oU88WcN%@M7=$Y)wowOkl zQS%4fQY7{9$RI$03HW3;z-OXYk<3D1O7QA$#&Tbs5B^K$*Xq&<*KMR)kF##&WKdPX z*!i+*Eu~<3Ic1`89Lf5Jy+yb7&}PRj$H^~idDzIgXVaWW@pK%1(v6Q#6CesjM*D$d zlJdG7;M!Z&{}qF<|5NOITa_!%D5*qU;`Naf1FOJ)vypc$p{paAOyo*nTo^5f|Bmxh zRag*-TnyAV{%n%PUoU(cjqO}|a)Et(V@IACuh|&EB}OoO2JmYjnb2a+!631TnSc3p zMUD9Vb~WHS>bV=X|LNs%Wc0!uEw%U9KCpCFj+{7$KwM~fBFXEnqx97|%~1xf>a|_2 zY~PEcm8i^Tn6Q<;8BNLMRpt|7VS3V&$H(y7Y)A$UDO2Z^-1jX#-t@sCGv5g-YTd_* zAwEkiUK>Fi7KVP=g^4b=?i;BmA5zlmWvP42kvEz!iQ4)@69Mx@^O-YX(WoXTz zRCl)Rmpu#>lgAx9?^9olpaX5&=RJZN51%~`6RmfJ?#O_FV##yz;(-nN0)S5{(UPHB zUL#`*osRQ>A}rRAi*%#s7-y2F^1@C=PeuA`p%6 zaMm6neqzfvdXt4;qbfMWIU_9>yd`V!+UvVhBrxM@Md^lr30xJXHjFevfgj zJc(U@Aw8|VVR=-W0jod8nwv0v15FFzlfyfiP4Y6f;al-5Cqu5Vo#xwBM)}loP%2>u zpAE!h=cKc$57ITr^~|95!LuC^sFwh$G($m8tA0s=F;#fU1~*)Z)3VYSybaVfpc4rS z!qK&av(+g0{hqLke32^1u0&`8e0*Fs(Oc^RG+d|`=1{p(-k z4!cJvMzJ;40*o1+rKEUs8EUT|{3kK{u@CnHVXgsj zJ6(mGO0-m(-<^HCaO#TgE6W2pMNiD@?8u$oZTbKrKG#=b1lTSWkX>uZzojOLU@dOv zefk>5t-azn+Pc$NQ_Q9;2eJF@2vA3nC9WEnT00CKnsRTT-85?psz5Y4HI$A%KdtR& z#Le~J4u=3`Wn?h!&8~s!iBAw{gP`WkX_Gtq+j7xyNUM4asfJ5i)MTJ443JlKt#nh0 z%)|)J_Ct*4WLTT#ImlWqMvqy0K>T*%X^1kUDv4h70}KN*YdgSuQM9<54bkji2C5K; z$IVZ)G)*Cu7b|+P&Cu6gY`>;jz3ZIAHJV2^f>0nn$@&RNv>FzjUi6+0NgMhK$A;bO z^us5hq4_K)9|Up~U%(HO=%Jl#(J$$W@xn{}igSI0_CbvO_+XGlXlR?R2P#ZJv)`V0VZ)B>jn zExB5pe2xQ7!p*HR=sn9PNF2P(DAWTn&QYA~(h-=sNG(vvua@8-55H6$lp6F+4(3tN zk#w9j1akQ@wU0iixdseR(U7jzdgaVpuV&D@Z-Tz^!?qg)8R>*!QJ zAAMFMP>Uh2;yBT)>2u3i$z?J}lE>a$`I6O&K7X5(TaooW~*x@w_|V`5zW-=9)~;T>X(@A3{d?vq>;Zp&*Sy zBxg2dfPb>0MpXNZk&32|g)eH7cuGCb(bk&XCMyH1rz}q(e%{CiOt)=k0DA}S=E40) z9C6dCkSVis((o7obD}q?|EeuyMFF4@ReD05Uu!qI9{cF-8FGKn47HIKIOLmL#o7{S z54Q9-O#&SH+e5A|LG#GFbyBLf+9U=+fxY93$jh0IxCOQGn+Q!z?u`razhpR9%W5Y_ z6yKW;P2^;tMwF13FO9(<`fxw!6m?09oNYB9veBQ1M>G+XEx=$t4fGuH0E7&tVXxe| zUf?pDEmq%EGhduM569TVp)5wm3M=4fz6v@!sz^9rTAI*SDCk#CmTX1Z*^jgc~W(S5oS z+bU4>UBukF+KL~S6C7a3lF};hWRNu@A{jC+>%MRS$P$r;Wu79Xt5Aaax7zHcyv$rP zc?Ox8glixYajQ=P#qHg*M{uf8VI)rdMw?)6rHfO{;3YH`5)K1w%?kdJXYNbxsvdj@ zx~j(K0eLBt(enu^7K>>J8ynp^l4`V4?Fpc=e)~Q(ab)_|GO-j4IFf(emezQ&*?M7F ze0=6ZMusOWu=(>1uYHbBFtaDU%{3H$y|9^~QN~06(u;Mj1~S2QLMPNK58zK4sCem6 zrO?^c{rC>>d2Ax4k!x(gWI$)&LcXDBH$Kp|}~ ziQ;yDN*e;3myqpBX3Gkmek;R740P|V<3tdqqRU>+d5D7^cWM+*wYGt-_2%6G%+PB` zf#CL7)RSMzF?tB6G{5X~p>#ri19n4_eY2?)bS5UUj9Rc5lBl<~tYm9AoY7`Gl7F9d z%NQpt-@hb5J?l{JId=%FciX0zKMa3Z9Z_g6T-d+4X7B;I^O##UoB>)bMAvvd{yS(? zu6vJEsO>!)vti!}pAs4!TV?%a2Dl$r$rTtAbRkwDrKvnEow8HwQ}v1W&xajcdZHV+ z*wT@a&mLi1ek5_=ntcItZN2np;~F)E7y?{$or6vzLe$udMvsJ8JG>fZ`(a;P^gn$a zUzviaWHGHP5sye`VZNqOS4sL~G~M21UbapB3l)1OU4Mzu1;f?M)y1q7)(F+gs92;{ z16oU%)*M%8jP-5s1z<*#w`6TNRNEdf4?IJ zz2{=OHJekOU#h?Pe9-#PqD5uHTZp(if=df>1^7_alf<_BqC$cxwCKTit8Xp2N(rG1@TCypN>kGeH8CARvV$-={ES-N8p@ zL_EUTl5^|Z>68AEL$<(gMDAWCGLIP#kOn^CT_|F`r;4!nSJ3T0#aI6n4>4V0Xz5^t zCIHnm@tBbBJ0)2-fL8d{Ypl>8Q@#jbo*A$u8-W-AVCTDCCGCi?M?NIYCESfE#Z|Mv((~gi-BShOyDNR z<_op0m9wW*Y32DrM%D7moBC2v;Cjk4@eQo2zrRa$f3{oPz!-%BMbo8GGO&O1$d{bx zp9hIip~(}&{+h;LWIOQMyLfC&ksgk0Z?idY$aPYT4NsF2O?~|Bxn;?;Kr7c~&0i~V!<1<>-(nS##Wm?^xamwS+(cH~dOq%iAN+Wd$tq zc4Qww>jI+{HtJ8b-B`KpzRTi?&qE?--;B!_q6cIUZ7UFvh_56w2|(#)a?EEWQ0?eo z)E{4d(F__>pXgRIzk9A*b7sqY9o{~6_*DGbmZ+Fpeza46Y}Az0+a|bt-l5a=wZ0da zHB0Sp%W3))V9I-0i}6eKDpCZGh8bM?M~0nYmTaqo4@h>xyFb2IRi+vE!+m67CRT0q z8)Qnc-ry2EFYzwGrzN5=0D%L4!vun1_f!77OQF3>`J!sbij_)48a;)er8C(6ge{v9t}4dYPG8jq;MPTwCg%mU7HE-G zqXUPC(Om*npGIfC-aTd-+89ugOQ%w(D>7j@=a;>U%PX7`3y=d5Yvmb zGCD*Z?!91Kw;_0+#H1QR#}90NTVev*8mf4BJ7@&C8P^3#lE^=WQ@h50!1JiPgsa|9 zp|b9ycEbx^kf|znt7IJffLIbd-J8Yu@jDHe5hr()Z947juB+bY(N9&rM+&2zGpO8+ z>g(vfR(f8LYJ}f6^Ys*+-`0 z7}b?E!eyF?bJ&3%Td1SR9evgdvemys-%*EeE>kzu|2ob&2$7-QA0(agEkn-SOpE=_ zQ`(SQ5}-C65v|K6^Np&OI1$MifdW4B@uomVF#l`$%VS2IImjut9C+6+)V?4Zm?$i~ zx&WZX)Cg{cUz>gq*#w!`9?IeuARv^Ur2XtL7=#>TO3wm-JFtCxGPX8YgXkhc4KwcH z%@^-NyA|~3nEdn@BlDpfa3VTi-C@1&rFl_EwaP6mVb5E^K`-PoC`M)L#N{FRX0?854bVgT_wvdbwlj@1q9OL*IC_MM znbL!19Q49HxbY{xQHdo+uOlE-;PN+p_D8a^|KXi82zDHFDa~u2nllj;5PsX%~)8O2@uBVDo(5NVa+^A@WRyUW9=ye!cAK-p1n%T6?65?gE)VN_+xER5N@ z#^MP>D3wm&H*|L?7fkQ=<@vuJ2v&R;E^z;AYFcjnjN~oGa*Y$f-UcME*y;ikPAJ~3 z2yL3|Gkom>yK723a9wX52~C-L4Wv5)0WEn#z7vyiJuAWM9tefHSuK32iN+)~Zv1h% zE&w`Q1lU?1VT{pGeri&*2bASgrD0FOBzVLM-^>7KDYdGhj9O>qF`E8UZ2V6#$OQ6> zUWBbDuy|a4|KeVc(wzq?`2}9QbQ5!K%QtTp!GnBGWj#h`huNELQ+q`(@aPf3Gs766$buQ`sN)tM|HgIkK zyvg+X4`FMvWQsx-F4b=~5y}>T9m*dCU|u~J4?d6W^|sA{9|?%|J|7$%czm_FhhRUC zd|o>Bd!^w1zM!fiqZ=k}!xNrL8wHTLW_ z?sZzvv2>9^`X!EE?y;9QpwzJ4AbN+OtgrFI8*or zV70XXl{uSvoZPt+7(WpA7gpS!8>7Y~J+V{?FwxWy8B!aZ;AmrL1)Jwu^)9YK+5bJL zAie#HcijJcBX>AeJ(UJeR7P7855t?-BI#gLMF^_`knzS`nZlgOK z*D&@wlL_h;0zp8z{vEJ7vnyWJH1v!{=%(y{FEqa`$8Fa73m<30u)ug2|E5uD{w(i~ zM~d4)uRL!6{eq)qc0P~4>qHx{cWq7Pov92UXf?!OF$+Y}n$cCxHsgk_6)57`*o$F@ zXx0}Q7gpim$CM(#cf4|L>DfNF8t48+61=|b(=KS?yS6B6_oI1Z&a!~VmE{|!Jmrw66MWXoxF_Y zfY*f<;h}GQ1jd091U?Q2rog9^)ffBwOf#>iZ`N-MeE}aLg!zj{<`w%hAeaFIL0K_VksJ>jBW>o+hTT^ zlugE_7b9owvXjO$#t5-N)1=O1=>p2S|A32592i!Q+B$=N&9qz|r2>EW_q^#|NxFYw z+adalX-!BltJRZkg(xkd*n)e1Qd8wAuJ`1g*C}1|Nb4)pX zJe#s-Z^2s6X+Jx}c#kn|fYWwXMiYT{%%3QNN|T^;@*)l?f!ptc9aFEy2*sSfnW{=e z0nXyW8X`ja58_cgV(*Kb@&xW}W6&97jlB<1Msihf>e-w>&PM6Os zZ}bJVBxDIh6V;?{?O-;_v$J^NrGL50hI8r#dOQ0Oz2Hr|5rM-JQa~(A%=NFNZ-k?> zAok!wsoG7WHo37fnn)I&})`ROx0m^VHX2|IE zy^+hm9J6Gh$cCCn`oQl=Pom*?;LE9P{)_|ywcN*~&}PoE))(oz)lWoo^@~B)FU-vz z<|=SP0ZYHNGPCuAfU67LlL_-Kl2N3s~S@qWff z@{%V?=ybc;F1~ydiWoL$iSnHN0vU-Y3HL=!u*SKTJCFHz4a}{kHCI%znq*97e^`IV z4YEc18(z0Xtb2v1v45qt^BlnNzy42#TTq_qKl;*wMmw$Beb zH~>h!<1S=vP_i)qnYEiu!wI>^?ti7^>r+bkFnFa0^O5dMRSN z!m;c?fJReQ_j^g%ExqxVjQOh|XWo~2qM$ooIaNxk6M#&Bf%bwakL^f6(=&a7y!eFc zjCPlCt|s8tW(qUe{+6{(RQnx{BZ2qU(=1$;*sPW7$MXsZfX%BL)e&d*M!4YA0pKB`pF|fmU z@9F1MuHJ0wi=KQBT1%wQFQ9r?FL$~zW~(x7Vy$4TmCO-?{rIN9 zQpXz%;q=wS$>;T64@nl4aVm@Pua{rMRvs5$wL^yBZA1LR`F!+7>FWw}b8XfMY!&+_ zQX{b7nT3d|N+uNd=c3&+Jh)^8*%0I@(Z~^74ZQGk|8I7T9!u2-W5n@oX)C> z*zBwbk<H?A;t^P_84AU2gLjZ(4RNtp^h0!sZ219y9%pM!d-C1hmLUri7 zq_lskCYw4fxpzVrmfFUFz~UmEchzT2wkbU+`U z^~V}uMe=x<)!$_dOP*>(o3->PBhQj;VzGLZWvxX zh!trs*D{=tU(dxsXUj!n+K}86iSwZ`bo3N_;XYE=L)GA#sdWeMv(Vi!M}W{CkH{d! zhpcfGzW0z%o;k-2qIiMX2egCxkESx{%u4Si9^K#%V&}322WH@HrBR*d-C9v1azf7oC*TklN zaG@mHrc$2#w@c*51F!2uLcAh36jgffla_P{jY-F76!puCfk(|x>uk+n?ZSZEg2s$C z?_oAAXFlz_i-4nWK#2AsUT4%Hj7}h1`dWbzl}a zlL?d&;Z@~$(WH&))1AB`_U4^w4oEu01&6S$s&wiwHG4B3xndWHc0;u~qOziYm&H4D zl&p=)h>vE~`Z%mm=O!Qplt1k&&Dlg{mBcHs7XzvJm@6vD7epQnv5}v)ak_}VS-;Yj z9Fu|?I#mXlZ!mp`=x9HcpDVklb4tUOwg80O@L51vsOHtv2TB5^d zBMN<1UsH8@zQUlZngZN9GB3zh)qXNY!F;eUprpf?0l7yJluWHKQJ)a=MLk6R%t8=% zZa7I%J}%G~D{f=9R?^1SIp$jaO?RX{?mjxpgmFg283ZS`-8QE}%Mh|eH*eXpRFD6c5B zMk!%Ql4w;#;;pf)SyT3u5CA)=xP$dso96+jS=!M^&1WXf-g3bBeQOsnqQ|D$q9!3h z6Od&vr^6$z$Rhdl$*ru_hny7(hQ_}6YT-p{yObP<=CG=KI05DOac zv*0#`2IoRnZB&p+b-#^jifPVg%zjr02^it*)L4gKqVuYMm}ZxdeQkr>jX(!Q z4y@f87e(l+J)#sstjl@tLwG}(P>o7J7J?U98-eTduSP8^ySU65;ChnDnYnd3c-JIP4%SpXVRC7%KlTV z^iMI}1!=_aug#z4M6?o*0iaZWx89W?o?yi5UlH}fE7eWb4)plBb5*s&#Pnu|52P9i zS4Bo~OC^ylpC${}bwkd4&re~9m#_Rq0D2n&NGP)z(!>MysnFYk=V>(Q+*4q*2F9(f zIcbp}I?qsW>Jp;$BxQ5-w?0=*#IX#oj_r%2Cch;|0MazFVHSSaM5G`Z4*ZEESED{J zC=$|GtJRXPN9DuXmsgp^tzwUuW8bbk)qC4we!e@60cqU`PQUgMK=w|=#c}hm@}(vo zD%4#~<#LyHpqAFQyujjs_yXd0aZ3NNA3543*{{P3BN3(~%lXDv|5V5Ye*x#q8ISUV zo*^36>$&y1dWvZ|h3jAmWJ( zuLa{PmAaTT1wi>6NzxjY)H;pzkWcsLC_Jhq0S1JvicrV*Kf>umN|S*aPv#2sHa)hN zcgZ?7_IRk%eK1+de7BZfzL{*|kNStbrX2vkGGDs=5w_^8!n;LRGUecqeV?b<#++aK z2(`6ZU+oP5+2D^(<1l#-P%=6xEX>ZBONtj>DrFY3`YyO@KEaqIJCl%B57uo^L`QNw zfa8h3+ac@wCy)}-KF*OO{4ARHY&D{_rT1vGWQudL6BP-|?6@}*z7SxrbG7@ULB#fp15im%-bqrBD^PK zYzP>G3R7a@K2^D1-`Id|puo#o*1ye3aGIp5k?CIBgcJb)r}T3rZ9oMPqi?bHEF(dj z%Z>28DvsAMhf6^-Y`gV4WoK1-7)Xf|Ce>N&K4!<~i}S95TXg>{B}dZWoT>UHT(S74 zW#e_JP37v-WcqWB&#x{b&oXGH00y7OOpue?>DyjOJbM_p=yEuy6k(5HX(#ZoR1^Q2 zOPUXOMv=_w72?gpOw?U$G2YE}w>Gtv*i4^egEBflLB-ud@a~5Cu8KO1zFB^ztHOVe z)2Mb`IzJ@Hh6zOjZ}KSJWxY{aIIAib{--$ppJJDJ0~FVqwR*~i6MLsO08T1a-LC@p zK_LACmmURcCd;P%Tg6b4RaMTd4HTLHh(F~i*ul+Amuc~MOf9YdC{{TDmZFH%TldW*g(DGRz-dO zTkat#?K2c?*bw_PY#1hHR9(?c>~(nD#mD&bXh(MS0VNTeOT|A*4ge&Yxe2$ue>#h+ z-tAO3Hr~aCyd9*{x+OYk@F(DYQG$;qbP)K?z5`w)@yvdbhuKY6cU+@91T0BGOuiEg zEO||<&&nNO5`5X|AEx?$lGJKzK39$_)`n zG~v3O;VRlEGQhikYOX6cFPcQ7MFOD&Xz{k ziJ*JJ5~>(!8z8kjH7*>SJQWQd+gv-aaIvd=0BDHM<3uBW^)WHT1D(<_DdGIxsdZat zrUvu3lyv;)iG^rJI2>^GKbn#HVmg{)1z~<0oT4(a6K+u8);af{5gJ`JI3SutbiWsR z*LRvH#;4U3ZRFKQCyHiEPI}@b@>7EBsBOckc5`K@jJvio{kaf=xeeAy-vl-(@=CDR z^|%9mw(0hw8+3?d{M*K&($!`f`>o_d4@&b*?as|TmgZ~x!SP4TpE-9>gVx>lZ=R=? z;y{Yg^oTr`+49S2k7P*va7e~YSvWGf=BeUfPWu9{eU1A}!!IUG^+Fc~^Ds%$T>H>0c zu&&To$wT%716}(Hq|C+pE$3-lbQoG|{~hkBkEs}@!HNFQ(3fMy76k$HcK-!fim&P4 zZK?~G8e)>CrUIs;wd2}+NuQI-Xz-cg2rDx0#!I1gxwTWfi`kb#(95#ufB!X^@ofuh z7l`lsESn6b@QI!UTea8EXY&bQ6` zzw|t?seg)BdV<1`Q-&=Rd~}P$E@;4xDF3uGiT8Z^0n;Yv(|jAv#W%XhgXF#K&`IyR z1zJF&?I9Ml6{#FzKs8+ZM{>G;hqku)&~}nr$+@gGG1AEEByd5d(i_*Npt!QxlDZug zgkHHA4DakkwX;2S(pPjrR*(1)lQ=YO9uSK&2`S{klnysG zNJ%T7e{7BYeAx315i}bIO7yJ;bXfXk48wkD zCJXWY2C}8N9%8I-ZiHePX!rD@Zgw+Ca{uZSM|is^d=&uXM7BL2Z#KowazH~o{lHcA&Bh> z61pYQr)+fOrN5QPxKFVkXJH$1#8e$B0mG$Cs?9fPqrg@XkXL?0)Mod|lc?v!`bMVx z40}i6zH%Tr#dRURN=1^e;=SjmwTRZHp}S^(FkKRqy&XZ8qvpDAL-(;TNm+4zGRa8h zg0Nyj0Y*L&ah=1hR!^MfJTya@&^NJ+jxmvh=REgprtV#QiWVJn$9t#@<$Ku6If zU$2JT>?nf(bK)3saueU`1(eGKvX8T&x=x zR^}EEb)@>y4@lRAb&>)!C<)a%>qy@#+u^x#YW%+zgMkunio*u8-Lpu?wLdg>WCTe`7vb%jf9NB{*fuqO5^8jk@{GmT?K zs3?T)aG1Jk*!J1e>EL^5@b@RmP2gx;S6e!~9IfkQSYXYPHlb#ieCSYboRC-iDDicy zA3osc!|xmqmvHJDJP{o5u?-eYZ;8qBECO_OT#yj-r1(y$G&LL`iZmMr;7_=4!;#e7 zy-`2{6{{lg64NZS5xGSU?-b$kb7Sd2x+&+74AXQ>>a)OgI#Z$djjp!c=+#mfJ{w@ALj8iM<#7=RP}&%| zvtd4t9PTm#SH$jtH`4DEs(J+l9r7kr={86{f%ZfP7LxSGeTOtI9V;jQ>K0kifH%Tk z&I+<9*u+yXC`|e(SORt&J0wQR4lh978idJJfKY$+7su@zi)-N*WeS)89JGq2f_h5K?_Atpvq~I`BL9Tmf^adSSZ$O5?REM%Uw!1 zHUKCN6@Hr+oKYt?k~gkiN??rQHiv^63s-u>ujz_IvBXj#eDbq&3Oq0U0ET2=$&G7T zNr{aOkDS-w+wyJv+;i`)@Lqc${&kSN8d^3Wn%b8b>hBR_F&me~QhRa*5+s5pU40)xanL&ahe@(%%2V|I_@_Zm}3KfuE1 zimr;4t%;ZBJnt|K$%>LmlJvzdvzHwjspmo3POFaSB5BL&NP42@oWX%pnn%?XTlj3Z8uYyX|O;lCCzM|=~(u8)sLwELMVSH`ZBuNyJeF> zq9y#`o(LT*6pDtd75pfQZ=t0h9iSuH?mU@4?AUl?&pYf>gAPHg z^-E$IEKL5VSm>W(FSz)~hy$hkPHg}4o-}eaAc19-@q8nv?AERKF6Xicscb6`AS_sH zK#V~`_%e}Lf)3M)FYYwzbw00ofqBARb~(FYPkGF8d(0Q~vg>`c5-F}04G1SsOs8-! zQ>u1nYuco8!$?E->^!rzr7y{Qhtj_1>Us(S>xM7kPsOv%E}ZKOTaM3G>;F8m$x-dLq)rT6SzX040`BMI=%7MJbNSoq2^=sxlJK?&NR- zW*3XU{?s1vOCLAL^|L>SfhtLB#QgDIv_H8knXc&L)M*#_5g(U+ov@-$CO>ci&yq>N zqATMxzM{Mwq_S2@GJUl%sD~uB<{bACdz^u!A6bT6h8y?K6uG+#XQvb1)doDOCsu=O zhmQdS#~0zff*WVWG^9MkKbazU75s$4yisz=RZ_o*ao=y45ZV*uw*+cN{8sVd$K&HWxKZBP){r9yjNGy*x8`@0V@# z#oYB;_pu;;M(OR|LX zfZZK`8o|lMpB)K7a;)lN6AT#_$QoKs6Rn zbkp|hdr&!#9Nr>^BHh8H_J-bL5G@0hK^jrHiW z`aV@EudMQ*+#^&-!X{CZ8piCoRerR4@(N#2MB+vN>}f<2-vnBCjR8emOtE`0yA5{H zBrgK%JBd(UbmJb0?lWH33eO41@*HPK|RmSP5 zZ`Zq3q89U;?9l@Bw_u4N);qS=V`|ko5)}pM9QYbK(|0WdQf3xLarG*Q1>87%`Wf!|^W&x3wv| zP7Z{Lf-CnWV>ZC-q|1L5nm@qkukf`o()8OZ#6S3CuTT)C8Tc@WSYxrooBLzfL2$ZN zyI;2|#Eln|*GEA~8wr_*CXS-e=GYJ-`yEz0eHn#|D{Y%mpv!Azn?V@Vk#$$msQaeG zfw^LLFs%IH9b!9QPQZd+dAal)sOw0l$ei*9Fn znp%#fn=cWF$G^SSUYgB{H1XJ&KelN`ZhkU0ZZwwzT%-|V7WAh7%&#xx&U8L;v3#MN zzpAwYm)Tz$(EEvJ;LNJaxd}?Y0CS(9>k%%?7|={|u68SCAr()U&mYS{izjFA^g5A1 zbn3T_LKXx<&Htyk>!0FvoIyeoq;4d(gggqN$>%az*rZU+Sx1f&KH(s83|Xrag2>%m z2-j6Bl}3jGu$ze?SURmF&7|o^_c!OyN=7VrdzF2mfv`-tp*0llT~~3+ZPP?;0+=EM ziygDs+IP*f=hB{_>`mnXF5wV{ngE6KJ_DnZZ>zeoliy&) zKsqnQ-zj_T{bAzLUY`B{ipx5FGnpV0TM5@(OV?V2Ja2qv5a+0wA zcnvjsU9w&E-V?b|?M;QClMjSO0aC?Qhp=n>>*%D>z5%0MhZ?MYQqIp4A437k&`oU? z{>&UuK(V0uB98(jW?b)0kat+3s@I7jt@9a&;$L>sy$107?Sb*@g=LK*5R|Fn0?eqa zV7lH-?1N5~il5V=^00!&3s^0%+ka{9OZeOG$8H=uco30 zk-oq4!f*{Xw0)T~IVk_loFZeX-gMs(RjqDLyzF)y%z&Axkk|YDhQwQdR(H=lTXS1{ zN#@Vg;IC1X>5U-jjcDJWTJ)D=0)sSAyb$GXnzun;r&0>*W)X&q=Nd-8KE0 zo2n}=j}k+#eMARFZ8Zq!i*%hC@x`7U^hy$}4Ye-EFT@p!wE)M%$h_!lMU9L^4I zY;UaW5;;3A2^bhs8`7?-sT zXPN2FZpkAs54S)fj6=YOI;kG&!n9;#>8I~pNv{ur+g!5uVIUi$eXa8e1=)qjfL|0#yPV%b34A!9OT zq^6h~(51g@(RCA-}nX@5D@F$wl%*lYep zeLqOg>2=?mQi^#bPX|T~iwMLZpaj#$;s!=-?(@HiGM>7FE*s^ty*u0E7s%d?r3FZ-bby61*ZXFE9xeByEZy&^j~0nW zY@RCLiuC`LEmZ7Tm}rpOIxpsB(_LORGYm<|LP|QZLA4mXYq`pId_XAjx2B3}4;1@PH}}keW4B z1xG2i`?sX77WkS_)DfRJ8R0qXuG@U!j%u%G_bgOby^wZT#Fx9ZUG7n97W|$)E^xQAN{X3Lf;`#yv@U1D{*3jp;lPx%cdvo_B3|vgP*f;j7dn~>| zGfS8Zk7=?0U_7IDPk$jsyUQ|iNl}XU)nUVcC2FuA6iGaqnd*+`iwpugPaRRtcxvK9=GV!}E$%1#EZwu_Ki0DZZ~$jK6>4KkOwlZ^JFe?8e5 z7pmR<`Qp~TG2@)JqSPo23`A?CZhE1YCX9!+2khMnhp)OYHuX>2r8W8cg|vn8R=1IEu2lyq2v=M zuw{Ay$miJE-~d+9?fsi=a&k!w2ZWyaN+)f{R3Di%S>4 zc1ZvnMyFz__FK;?w=%4L<`kZ)Qi>6PhO`0sz9%34+Cg7`FZgPL!RtXji)*04`ylJV z_&>!EO#c*bsivC_qT9pkLn@S+uZ(B5PU(E#^*25_%)ZM9F57o2u7!&1TIdXM0NUf> zF0=lgxaI~p!Sh(6W`B_7jCX zarXLZF7cV`6#W-7c8?P!PR!G|MzF#XI%bm+LeRPQuKGv!9)yct-1;5vtmSPGZ?30;)(|84 zdk-zD`iCSt)5MxvwYMvlZv?DIr*(!cEj@jq#_z|en)@7@K9+^*7cYA1!+Rs?0`%-J z*F8luRBTPNl0zKrixDa=q*ddWhB*z*~`*gDOad-@(ghK z94e5OL3jaT@F|!2z(+SOneKy5*eMUR{*agEn=Feu=V#uAvSCkKXgH(T#YLtHC`3Yf z>;643;BJzP{{30b5J|L2s6UBLms`NQCf@6Aih9=0BEYwl8&AavyO>ZS5=a)I0s=5V zH;@a54ahVzr6F=aS}3h4sH8-EV!~J&4C9J&=(woEkxaQ21t~QPXF5Ci;@J3Z6+b&7 zdG;~4DndC=0*imw5Uwj}(Tm!7LmvvlpA|w6%i9FnTnSQ9`)L(QLR<1hllUNBc#DiG zROY1=5%;GnLjga;^mEVyj#X0FF=2*0#zj#ioCHT%_H$`g7CcBRbHFvuP%C5|fn+UR z15lWe-6q+kUXN#n7X;k}4;4toL@rR3lRF0x_uG&5%~gq*X{cb%v?V}Ysg+<^`Hf0uC01u-n_Zd37_jFF@K7?# z+rrS#+O{uYCs<2Z*Omt~W7v#@U8+sD11A4Oor#|O;gczqj?y=|yPVlWS4)bbd znkTeA4qvqw18*iS`Re|Z9W{qK)jGJFx>;KOEk(N|3K+22EU zt@Wz*8%Vfckr)3(OH{b89muLRsKx{KaXC%Ir+BL0`>&lP21De;=_udA#~}ds=R%eD z9}}K_tvOCc>KXTQRr@7>JM@K_iT9%X=RFoK9LOFy3B>eFBR< zEA?w!b#FOz7}Qx^EO9hqBA2)ct=g5jxdCnJnpz*K?TgRa zc{YI~D#;O{!}6+y9vFuh-~J;?iP{;6bW#*F0iYGk#SS`^GA52qn68N_`>jblF!p1L zj3E+^lO#z6A0`BQ2X`W|Q)G~bni{%CW=Q~52qwC#IyRN^#R*g-tM%2KdQv{cMT72$Oa7~P7TS}F;(K^ zxWkjnEm#MQ&6+$uyaXbH%5`og5i{XD>5}!orutld2wMnYUOXCXH=mZy}EPBHf zu`PSE>(Tt($I}c1!MU^-x7{O@ycTg|&@){8L-Pw^3zNrWJY;8CszB% zWGaVsqtbkSG92TKE41Rka&M$y*g#{Xpgyq=6UmslEOMRl~$x9 zBmP)gE?37z>lytA!&ibo2pBzyP*PA1%Ysnk(XlVvwrOO8ix8Yt~Y4M%%h0YfJeIs>i zv@&l!tLrrnz+cOd9n-cv@8O3(iTyW6uU@LYiiIhsV$Xvf3x-;_FYLcGguLQ2SpRqy90u6~r zcrPbV{6EE&{}eBWpqEtKmq~WKY^k6n$@vHBtu0Lj|AudFUqaIr{&IIH7syRG$|c*0 zSIe;;10PCuA8LXI7%Jz4>t1tQRGm%si93*{yJU*<$g#V#0&2lrh;vY2bvrOFQaT=> z=jLvunV$J;=YrNKvuh74#lCBQm+<02ecL|-$3Mx?ENnWMF;%?tcy^0%-yp#hsyA0; z(^;uD27CUCp{rvZgNbbTdE z=ei+}Vywl+oHOB1BZsG`-<%3Q&zH)-U9P;F+e_C-SG{ufoYg$2=V6AYbsCODSjoS+I+~q`$Eg$nrawUS;s-K|f{$ zSa#gGplObV;q0w2lO;41iO?;(I9D4L0baK`O>gO#AuAV#i9SU`%7fxadi(?<@JeTN zz=CbQG(4NE+Q+3jayLF)1V0ukeV?v# zsd$KhApb~$R@WkE07ET-Hry}!_zs~6%qiJYUo@?<_mip}=Dm*YdHk_L7I+yHEvempO@oKR>SY-t zIa77vDukEoB2)0jQ(IK>Ot#uCdQc7pANTJ-z6;R&f5oT&6c2X*&~j8^NguF3*MQ8t zfzCMa8eDP$0b977?(@P9Y*_*r2^v-db>QnE?Bfy!5CT2a5mI&x_oQ}Z0vyx!wuu)C zP;mBVl>_JMGBlA)Gr{)vb;l_;aLXB>1F=%4>7c2y26&9NMZ_O?Gs9U*SaWWWe}`H43(VK71a(oefSi z&|)M(Y{i%cD&I2^1EwFpYSJYk-&A2JT)eE{RV$UGOg0x)e$^ZdQ7NC{pI-^|JMv^N2*QeP<>X}>MW=9}Ln9z#Pa zch#P0$CTSthi|)?h(iH)?r4^>zTeNy?EIU&jkm+DYzuEk0EiYb`5nD39Zu-a13s#5 z2DL2FS9W+{#lvb{Fc=V2@kZe?81UaKF3gI1G+7`_Tef1~Pcl;Uzagj)0h;!A5>>II zzMa>V$sa+o?UwYFOaSncNty;?O!)p)8PcpCDtl8i8u2{~I-)aYKljcBA*arX$k`%U@hZ8w96!W<~dyPB?Ns9D~3_0&77cq%1Kik4vq_Uq?wi@ zCD1rGCXho^oEjVj8ZeQ!2LjY+dy^BD$9B;6Lv2KtH|aSvPNn^=dUco`%W~}tyoRe$ zngDvT=2@(dJNO+?){JvViwEl^h6>Poubm889pzm3wzqls@R`gdTNCy6E`W%rToPR3 zlKr$4Kf$8cE(JQYkPh^T)C@loGa2$sl%G&0%AI>)tAJWWI-xp?PnEAb7*Sb+y$&!l zPa#zM%^BW$)h9_ugtSEl9V}I#3Yy?mL+NMh!bGZEb{I%kg)3b3-) z6>Xc5K>RZp&7?g1XGbEUIUciq;@_8#x7Wq$%BEB?qTu*^ZHBJp-whDopCedSatrsh zEUiwnoMw^#)nZ7Ze~OhjLLn4H}rW8a@~d!b8F5;Pv~tFS}`T5wT}7WB=YbM=B3 zT`SRyzr5dTT6iQ~dD8_yS5ZfRi>pqCmHKx_G5d?sDNl0dQxkjD_^D&JrlN5l0G`Fl zpTmaDFZ*9S+Zs`<5XBeUe|f#<&-+zD(;HSTGY5E^Cv1<6r&vkNlJ;0tSSH!(Zk?+l ze8uf>>ef%_kakMzPJ6rgQn4E+*sZW26$uFhUZ8+IkIICP?8^rIU@6bYs`XdNF1lRN6Xn-yS;8b=%60JqlndQe|0n^yBw znwi~w|1q4scZ9jNvzdN~X83-+ut5q?Hpmmla(=gAl#VhINWpuhC3Gyo>L+^d?@N?# zH8(vejPK%$k)X?vIOCvHP8g&GY8s)7BDIwZx90UW4bG0wapsvVz#K-_pRSWQDZ1oRqI5p+9VZadRa@Jpn0o+AE z)y_CJ8btCZ#KC)63e6U+-dnngGTL`x*D3r+m51sD4Z~UuAtTM~Q!BQ=`^$ssR3r!Z>8v*9WPK)ekjDkIA5=6rHuq|UWSSdnM0+dl27^+-i2!(}`oVJ_z?VEk$y_@F9&FXs z6>N2X!U8T=qC*1}9xMR!=Z+)e-pI0Iy60#PtVV1cW4}^lB!^_2#3Wa_X9V9`TUW(# zQG}afO$eQC4)?2LEOKKY33k8*?Y?j{u>b0|UN`e?L(cbiq{bn4;6Q)CpCAzjlyx3Z zK?xxrO?y8xD^b3gq){nE5BbJt0TCqviv0qXvRgk`MVSKxBK@kk4K;bu*+cMsd- z`gwP6RT^fNJ5|84_;BUeQ*}02VF-2hK118D?DMx5^2icC@xAmBdW;rS#L&wgP4H2x zfaX6&6|)~(FfSh`lWI!k5d961tdi6X7^4Ig%su`#;yn*7iMWC7sR!IOQQ%f>q)rImU6h_naiLM14JBXhxyGnZvGPY` z@ger)3pUnG2AJVYI9~+S@s)_YMEr|vo%hsX?8LXdH-qe)Neh$rN>?P0p8j#^hz|qJ zgNI@ZVCP8sY9Se0N2dv~B^mqJYNk@`mKXCBfa4GLz$a94LOx2->a-60gkJpRd2n|u z{V?&2gw9`ZjGjXm6b($Tkf3(I$bJD8zn@af10iFcQaUpWG{K2B2kw=WEtdS+ASX5z zdNh|DYpz!$<<2J`>!6B^AC_QgB2vMt9<()NU!Ik9nuUREG+HRdOE<;!a#?ujuYTR0 zTQ~+)OJyZ?wPl~9=xVC2{h{Zxm`#-luMn~_ObqkvQ;A(aj&$SS&OqK z;6lgGNm4SKZ6(_^8tQ#WGa0HNT8?zKS$8kdtX>>Sb&Z^4TB`oKp+*OI3QL=dF7_uH zVxcpPe1Fbtl5IEP5k?*guc?>U^jJ=b>b<%%6y@PJ{h9Jvma5)sH=<@x*acW3APZBR z-NAQFaYAGC#L2yh#0&u%s^VBi5)+lW?8@vfH{B37#J0KLxyiZ$#iDZhnwdH78l)z= z=vX{f191h}fjFNN0#hT&-ROzMTd6ur8)c$xre;MQ%E|?agbKADT3cp?47o~?`xcH9 zS7H2im?2>gZ6@`-jh1Ch`K_5E|FmbGYsZ~=M5Tqd95%jIKCBRXx&7(~IYQ?8f@V@R zKg{WQgVvU7v#}WF<*OhYhqgund=4eWZ)ua43chM@1R+R%V;KANP1v@6Q3a59dgM)+ zauJw+iK!P9Xn_~&8ec{J1|XNpv0n%!31NXF-C*{yV!b?(WBi(gJ3)T-d;GRm!lhfT z@}6E^vg*sWP7D6G4FQ??Px0o@YES)=1**b^mk#8!yOA#|nyE$W9>{!~oR<6u4P80oil2MY21n2OJKr1YPsor1XSgo(JKlp- zTiURnr;+FjVn3}Vo|0!P%h5r~>2s7Pz`T*qNuPean;7GYC$NY#Lzu>hBy&6&Z112X zp7?;k!!by6g;f65){~=*uP@NUO zYL~Jv^GQj7o@BN8b8%Ku{{-b8Vcw4dVTM&Q0+ta@dXj|fx!Gs{jeU^Xf;W7^+Ih(82fT$r}?!?2!(>wy~ISW>d}F=xYh( zMKidX7f-mKRKv7nK#&sVyc~|UZbM{UX>An2j5${UF+W;Sejv~-Q{?99T}bP@+qzUZ zk?JBi0z9*H-X7`;$KpDXlk%Dr8bnwh8Q4$d{m)e0b3Lk5Fz*m6X83VU?dTIoG>opX z*JEg%dLdiFXDmd5?r!b(XHBwkFI1l3g%$P4fN3q*!~spL^Rh$J>7nH8SBA!xP1fC< zg(s3KCyd#ga%~a;2d$4}>A3o#+7z)SnnsjUC z@MRJ^6(!Oa^#BvmW#&soY#;CNyhx|phyO-i#C*=XsLql#`J`3{f~b!pnE@l3=2aT?L1yzUCe znwG4MtU+*0X~{ZhXEi|LnQOR+f^e-N(sycB$fpc)=n7>HHn2XIm>P8tQY#A9=i#Oj z7hRnK7&bpcJa^75Jb|r`Y7Rw%o=iFV4aE(xdVm z(m#=qGNDIJ#B30(BnR82_TpY#YO@--r+#Pz_5L;j7+_2GQ&+^%<*fU&|C0lpfjC4tuc$GR3((2Gawpn8}Zu+ zpd^fC@=C3k_Qv#B;J0?aBjC^neCF(*^0EPqrNn;N4DmTau#MyMAthr7A@9G|D`KX& z@>rtnO+D0*x+HoDDRss!7hfdDWho(La}2m-?@2#}u~*=g4+k=}SG<6K^=QJF3tijnjTxK1zO6;&=h!WkQ ztP8g<3%f_+{V>GtwG-2JZ4KL*_I`y+dP}FHby|T3A>w%Emm4=mp(9Bl%#KJP%=oRJ zq1@xLRLc1&`^$f~BS}I3lyU6u@zP)9Pgrgb^2z&tfUsaX(V|&f&abwV9bzzrm**{y z+1H-+L3tUmvD3b|%oG+H(Zk*w8ijXkxKc4x2q%*Z)Dyt9Sv%d7;9BlLyrXHah&uhl zCq;UqEM8#m0b^4;oQojwb%a|Fqj>C&ysqJ4f3iEx?g0P|OERdLw{v80ZMQy7NhLgm zH#r@(_QVd!M6f4^rwRpKjrB9Rg5bSr=11l=-1+B_0qsUq!U2k1d_}el0vg?y;{${ms#Z!1MIz@g6M-~gHjKez9g2X)HZ27<&%xZF*Ls`%K2jO@m|7^*@=zg zu%H$V?R@KC`{ru>X*2GBilI>crXI=6Ulf;VW;AE6H)GXdud_3YOS-t?NOKG)dVNa0zTh0@B;22M)U~WFK(^Bao2g&3N9dlV5)TzxVbCTU_Q>S zqZfCGT5<;NjvoPB90Py;)jlURDDV*Z$`ui1jt@I1z5Tev)Vt5yoT&T=SsIQh-C0gflZ#c8wD4M-6$>a;yp$SdT15n zqIG#opSBxyN+3odvVzPr6k`=^C%}b3&J_cI71(2P=&rcpR~sGJk4(T{NpyHD=1fU)NHI%d%J_NTY0BxLxhCvO- zyd$~kPeeo4(n6=PIPa4Cx$Ji{S@QSFny{o)jbkVz&h#;oe~n#%d5c{PFx+r{i%{G@ zAueXGy9ArecN^E=k{hvKJJjL?m`0l?4lic=E_FI=E)45+wz{axqlHbbS)6g4yx=lM2s=jlFfC*hCzSJC7BRgF+O(XK0EH}X0aX9~|hHspom55FpF zrokg_G$r4V=q3Ebl1PL9S0&cLTfF@RWZ;8_sn zY-*nmk-vV_uLXn0i&n=Aa}C$@45XX}YSKbN!8X!1uU1lymQaoudRH+NpQrWGik>SC zBRMeIqqEgE?Pw5j=gp$4cW;V6T^KvrHs~+r7`jE)JxRajQh@oiTF0TR$OAZjWSahs z?LS6->zYaB>16Uv#tEK9*y)$Y;~O3nLPgZOv3Vw<@80tH832+3IP3eZ2F$`%4}!|f zvh-vnkIa6@`qJU7{x-s~dgTBn)5*Y!6Gig%sq&sI813d-eb(gR@VXP+&T6=FztOVUw!58v*%)CgB zci!P0o>6Ug5C;luLsvd(_tZLq*goH-g|MUu^e7aw)?sG6t49qh__(yAqs!KsRkN11wdfg{s>?V&Up zZyf1H7zq09=xYzJ);UuA7r46%pMjw_>Zmj7P2E#?!G-_iPF#SFATQHg+c|x60Js~Z z&S*X)Nk88lku^g9qF@|p7pGkufZ3ShRZzEdFLmrXehVLBz*|crqBS&1uo`@jTGs{( zO1)diVQ@%Fxzg}}g1r20$qaQ8PIsb9<Mvc+c)0$ zv(s&&aup)J^ix9B4#C&aovU`;s|%=FVBqM7P=Vg^gXOyX0{?J4dtp++Y ztl(X8pI_vNR==K{x-C_Z-CHSAV-GMZbdmfnKFeB~%`HJ7nQs8_tYnlD_Sd>!;hdcB zE92TfffG;zDpkd8)vMW)=YtxZmI653@E*zB)JovaKW=7LCih~IQno*=<9VV))p%sPGf!bf;o7xDa6qNk}nt`PL@ z7o$lC9ZdKAi4^g|v2jG8kk}VK{}1}bFo@g%;`y+S>Lz%dE_Tvu=F zR;1jVsz2TaZ*w*bhulp>=3t?*egPgvcZET~f!4t1SlU8lP*@v7^RL3cmxvB@k1}(^ z+#lqJHWWGOAi#F;I2Vr}f?YOuvsx%z)$<4@r6c7IuW$(IA@kH6QU`oLPBxJ;C!mg1 z4o`aZnl(l>meMd5F`GhjO-X@9hv?<#V2z*KClCVvD;>1c37Il0Gu3d^yy_igz(6Hd zpx2daH{E=xxUp`s6tH|E{vup@yZnRN|rR>^D@%k8bJkefzV3wb_-Nf@218Tw?w5Sn>tZ7 zgkahFTy^isARo1BNIyfIG;Kr)OCD&1gkPoyNXSIoAjUzx5-=1_NPossXN6-^xw1A=?kmZqc8aJzRa303q6mt2;u0XRPchSq+(lSlB-E7@1>T z5RkcY_@%&Rx6>d(dP$|p!lwW7qSpz)-6-AyBYvRgAVoX8{rPI%|NI(r`A_la|7!8o zfd2`ero=Us@kW^Cj~5kNK$XyE&#|4N%9VVR9m|G43DlQu&S=5pyto?zr<}OwYu1o= zP=U!{e+ht(RR@!~{_P+s6$B=2=HQ)EeG)-^;jiQY0T)e+2uYgKTbq_}i`n<@&Nl@F zU%-pUn9uETj#k9>s^lO57(+DzRv z?BqQSq%SR{i|J%n&Se{XGJ{S`u4|d9{8kx?Y?r%dQOBexpECXUBH(w z!pwpq^bR(~kx~L|q7t>aObBnLrT5a;F0#HoV&AL_Mg-rE`r)9qm@vz&uwuf8HKC#1 z1%UFn7c6sh%f2Dc6#y%G6&7Dj8MXJPNtVwozqKy+DdW9<+ha$feY%}4gYb~NSy#LH z?`mm*Zcz>L2w8J02l=Z_@B}2orkK5yn%w8zI>5yGaC+3Folw`CW|@$7RrjaN3<^Td zR3{zn(sgpoaV!dUssqO#vP;GOUbT)qT5#;&+V;u3Z0o;QWY<f&{Lp|Y70{)zNH;FD ztIVrjVK&=djNFaHGiyQCWSmV~MinYF8CHlo^upiDaIs@8lI}*6awUaFZzOdE!1tw1 z^uLeHSN+;}(%La10|Cr@fQQ}+M$JM#vz6e5qqQj+cP)mhStjT?SKa z>_C0Rwqkyh;V_ZH9#5M+29R1YkUW5;#G4oX7f_jrOBy{P9PPI-al5@F2bk>=^yeNelVTht6L1!iee$1z{kOEP#}@_{ z5utHrY9r=y!EfJi=dr8Fab~uAu-vhU-5q>i>vyImEacck zBb$&Sfwar!=W|k9$iIOSiaYiHQw$CKPjTNf7HFMpnHOfw_r_EOr@R#@4V;N%dk_KYWb^W78QAK zf;2#9(dn!Pd6uKT{G|#Zz2Ys6J6Bb(^nv^jigLUeM8!m;2j&v4;~aqJdv(k%vt=L4 zAaARPl<{A6@F~Ht=9&S`p!a;#CmY`AkhGEg+G=mL1vuq3>Jr}O#ANW5bED20oK1bF z?8&4zv9;-avii!I2xwK5u|yQSj~(p!3Ma^7^lXl#tb4N7qb6^iG3Rbk^VykqOGreNs4jidg|TXlpBQiP_SJ1%7wY*s_iwJjM9 z64-fkW(f(2lG@VEY*uR^3Vd5tSmct>_oFlqpX53=>{b1B z+3zT&5IrKJfr1r6HdpTH>lLUktFXuv+`n6CP^!c7b?qs&Ck{FXD@K1GYx+DlCX8+4qA}Ta0`Fzj<2oTzmXmnTeN%-4gPLEh(W= zrfn6o;ova-utQUTT6GNF_MBBnI$z}l4G_)>93(K_x&X_qk$()`35p+Hvs)o?ochi= z*GlG>koW7S%`o zgGo_xA$kC7?qA<^tleSyApgbwx6`sC?i@9&gAYvkv-!mP!s8^NOKwqzSTx>(uUpDK zB_miya0x7_)Q=sD-Ka$$162GWX8UoRsMRn0wcjIeq{+3IMbEA5Wudrq9yp%iKgE3i z6cai%25Hq*pKjvSh5Zb)SuHo_da>>id(t9=Z+u3tZ+)p{&e#~}$)fK=yGAqU!~OZ| zzLScqsr%siOXreB+ejrlESqi%ZDERt49P##GWu8h4z0VDFq1bKBL1n9O1#RghM(-o zA)N|J53yI&T{$VkFK_3II+ixU!>`Oi^UnxUa_)$5>mSyKn7(S(Kj+_e3Ec-IC+(Rc%!MmQPB_at5B~vxE$>CPJ%ONB!h+8c}y7UYFY0B#Q=&b3C<&OXR=jL-E!Bz|Z-1P7iH z{v{p1>r<$F698G=O4ry(Tb07sHM{hi6@QrjJNv*#-S#`xFo2f@65dUa2JzVO{_ zdo&{zA&d?#nifq}gs#unrf+(Rf}_VGs*=DuY2C=?%rei+xZOzN>UGu33A|fbkQ)bq ziBVL{@h8sHNDrN?HTI6!wK!TaTS#>qcr~0S`cEFw6ij1P z2%^i(tiqw8Pv`rAc_rUTG!KPS~tU&C%XCH@) z_;(f;Qt|7Ok;CHvo50@!6gmh=VMeA@DS4AyG=4isOvvICb^Ha?D%ayVKLI!GK2l+O z8pxi?b@od&&zLG*Lns|~HmrKT;g#-H zvQDFyT*)~rbugTrHZlg@0GPY34NVQE{}g-vQ!JDhoD>{>Ba^`Tr|KuP9R3A_#5%Y? z^=VCB`)}?*J`Q8qJl5c(#17sA+>mbo$j!f1?Ymho(zJG4NZ1GlU92r6Co_UT0}I<% zpHQs&51S(n3~d;wmFR7EMFvT3nJiKXh?Ow#7qCsRhfp|cqCgp6AOPjNkfMtM;~^x& zzuqYtW6o@I^&KW(MZ_edK)iDy;|dzyVth`SdA7>i+cxEB*r;X04A^^psT0W!ButDX zp+}&4Z8(7hru38pSeZ5+VT5&Id(sXztmyTECg>tv4-LGqSq5rxe+>s^fMlUAST7dMm=>mOeiKsMrmakk#GKl@I9pR-X5 zdNkxoImStiK2#c0EM&M_(2|Qriuxff2db8(6Nt;7dogvk+Iw4|kLw*tR3z2B)RR22 z%K6qQ`3B00t#0MvQL3?_=y{6X=dsQxcN5YFxfT#ba(pQZML`Q1!rvNI(3-H}=f;oF zi%X;KHayd#7cfXRBb5}^seThJ7R9A})l!7@4~O+@q;R^u7Sb}|Pc3^O5n|!W8g)3< z(Jd%He_#SO#>2|&Rj0Q`2mOzkK`hVPy5ln@BSeK)UR4@vPf}3`efe&5x|B1B+fOceJ`Z=!WN~ zd8!arn|^Fjvgy>pVLmyhX;tgdqlswsA^9hanqH5FlpeQ2QfSSM`_6JT92_5xe3KP7 zfnplqyC4UNr1W4!I@)XMZ?Nj0m29O{QVafYOU#{9DfaZNJbgn|+k&8QT#JA4b;rDr zJd0Xj4oLE9QN1TbGkJAL{Xr{tW30%U&1;vVWR>Px-hHLF+v30 z|0!WlPlv1ohGPIdQ)2YFsa@xsGu4dG8_bi^wngN zRvTh5qpx16eS|kuGArOQZfpwM8gq7s4Ai5qkuKrmxRboSP#3aH@x+mZGqi6tvn{sJ zOpTVcaC%}%+@Ci8X5{k&{C>78C%ks#qDGtXX~J}`_V38Hq5^L|L?gq1E$7r(uzuE+ zUS6Z@n@=6G@I_6=@*3xjgrY5npUe_7@InKTc1Tw%qSBuqGY2O$Zx1(6COJMvKDH`I zaTQ`7Dar=e{acF<2S+$RyFdBj+(O>^A>-j4EVeeyr%-w5x(iW)1|kzP*2KE^3}V8D zud_LtXVuo>_<{^XWIW0$%qaCc>^*w}3{SdV{$OiAdCa}JA>XGZ@J6$t_|CEVQAP*eAdiQw|4E#@YVx=r6#ue07;CC^4p$`$a2WjcjFqV{ zG%;+@UNgI^vaL?0I11QGUDuAEkhWDsew4->vz#?8oqU_*KHO+3os=w8^cwNb7KYbw zFV0k~*C$kC{7}>iGq{nHI0AfBQ;uyjadXh|q_fCWq}%V;?hc9Sp2Wa^Fm{<4p{Vy9 zQLv|JnBXSR{^W3=sgE8;_SM_b+;%E86l?hr)VIQ+;1iFPMT{KGU3Wqs*ELCkI~;_9 zQzS&l-BA-#sfLVMQsDvn(p(T&6g{ZXt+;WNAKYnMp{c z`0Gl;j2soN0rD-zGXOMcDf@S?T}!&Yp9(mm5EK45><{ zmYETl>iNI}=Jv~Rc?8)mE~hpO5`AQPx(M4ncmSkk4b!?D1m)e~>RE*P_9C zjtqC`o2D^I8eC&mQ#1XXHcRO*Y1?K?W&}~Yum=fm7Qm7=??qz#>GAB8axYH`-eO52 zK(zNuiDwN7P|EsRYn$oiNy9Z36W0CYHWs|ne~KUfDHcpwzj|r>3-BEp{B4{H__-?4 zKGH%SXy2f%psPt!1(Og`Q500N3(aTG8)a5JFs17CxiXr%X~;JjALlg`yBEA9M=s`U zgdzLMB5S6Hx4LpmiIBJ=7bUROb!X`T*Cy(OW8afAK^~;f0yVWw{!0YTCD}Gd)AV8r z4_BDAky;BpNEjETU0%dq-~XgUAA7rrDVwUJBt!PWTB?Hs2Ypmp91U;W^nWDXQ+r_B zwgu4Gwr$(CZQHhORBYR}ZQHghc5>_N`xDkf`=-|D-Np@zK0&^x{_5QLTM$@HoJ=w$ z8kiBooBk*X5g;!YS?>N<;0T=gZvjjD(IB&=4wwzy6M&`bZIs})!;ox`4<9QrkDMn_ zR{LFvnT#+7lkLka5C~#UADrzsZw9j1fypb!X1TwGLY!SD@PNijSzr$GAG`Od1ZRKp zN7m`X^V`NDH=b#KxL`B5XL7~y`$aEH_WMsX5r@pw@1ns7Ty6HSlL%#CMWt6mhL!F3 zGzyd6Jwaiw(HOy{g3UN~++pV$L-vvagb{V3Kr>?~-{7#jI~<{M6KB{+s18cHPvU|g zhL|0sYLtLeCWZq>+um0A)C?jlC+-;{@`fK#-n9`eM0!XuKILRBTp(k$Wh1b6g?&%v zllI_OFy2oQzn0$jg|D@JoE8Pxl1zPcO+n8C~( zn@hbJ^s$5MQQP*kFt;ZJ#_Wv2-F5_wCOCC&sJwMnp?*C77f{e|6HZ0E+HgH6vG ziEn^iGaJ*^9n5iUO*3N>5ii2@&4(m3Re6tFDRjT;5n2jlRmX z3up$D88elF3*CCLvWlxAnF z7K`?~CLC$$4S$4dTK#g>bcG2xnurkOet6jd5xI!2umdlS#W4i@iEsze zuRFtU!|3t$bnmn9lHFN(UJK5R!i>0>LTfhVO6wPS!ZbNiZ_aRbijQ_16giKqk1!Um z$@|W2wc34)k~5pM9L1(;bR>USjAg5RXU^ViSSV{)qd@uw<9sD*wuDd!p$K3$E=oez zMP`riu>V%Nu|+O17i^BDlZ>CrhSBgCi3~>|Vg-KVkj`Ep@d=1S70$a%6+`r84Pw0; zylRi)CNn?&txdYF#Z9$8);*PvKvk%_G>LV+T`t{1Ya!tpH7Br}j@0D;pJEvLe~Qs% zRldf=kUX6Qe;D};=VZRlykV&`6xCVV9(N;>urjCOchlzeGvsTUJD}1K%FAmXg4q{KFV^Ur%@mw&Fx8b`!kyCo1=R2_x;uR@cEMrnV4~065%OF_ z!=@F+Ye3MB&UKa(m?&P62cIgT*V(O>B*p3%4$SD39Qu1QZRmg;>tyB8SZ|noCXgvb zJ2T<;PW7_-bJEiT6L@v;&9n=Xex=kOo{dr_F?eZcB@q5D*aaS`O8du$t)5eu`equ2 zBwKHK;VfkvABPP5>Q;~GCGF0ZUNk=Ou>V%-BPc_qtNHAHtYdd?f{Izpp#owP}l;9^m z>yMemZDb;+4Y)QI6g1o;rJV|JbTF)CC7(Do*#I^GJ>Aa$$>|oh^M-dOFTF5QpMgld za7YdqD#ESFjWQA3gQ|st7R5w9+1qxK!Uv?XJcW!o*)54HXuQN1he&Oo{*Y1cTB%wN zYV^hBLcJa>(7Mhv$Nxp5ay{!!_oVvDn)?Q9xuc;xo|pPa&9lRac^Tl8>7sBx3@C;Z zTAkwZuME&DsDA(h34P~GCv|l(bIhV?Oqh=;PHyiGbX?I~V_Ip$@>y!2o^}eT!R?rn zc7-W6*076|YfXpA{nj1MMx=-YF-;hi36Ck!@xvNHEsG|=^BFx_Vp#PIc&~%X!|Pbq zy2}YCz`4j8;SHAw6<2_L<6Ak->3lIS1HAdEofzx_ooO7zLF!6pv{iOzvjrYY+)ms@ z9Uu1Ej-F;4QC+OLmZI!QQ=gi81o?jdCiV_=tdIAaVi4bbUa0Jf#I4Mr0%^W+y$1>*Mw**%pvJ_Oyh}>Z~KYv`ZB!%|uoBUk@6b%5d{ZP5V2CRZTl|_t~M7%n+B{WT|H#n*?}g=A3J{J||UD z*bc2{Y*shUT*Ofq`02rENBUkWYV}9GNVk-zoVeW78Q}BS^A{Q7#C=4&-va^Siib)N z&wR8QLF?oewO}^7nAD<=SbBF`Vw~7Tur=Cuy*!&x1v5no@Jrhi57??lpS=VLduq54 z+vZLv+qoE3dE1ocU`_w8D9-!kJHp?pJgBCfEGb48@fv-4ge<8#@5;D9AsR#rv7Dyx zNYSZYCqLO$wY}wRC^(S0H?Y5*0KWFsuV?PJ3ov-2AWV2(t{K9-rTSJ!0s!8|l4%7l zO+i)KJd-NIBkWy^G z91#}8&8&h819XfYCi|k{CY@;=fwJjcVZJm--ZerFTCELnW9k%8xof;VEJIPzVix!u zf11#P8!T4h#=-2EO%X5xQoq3(4r2Vg&IvG2+OD_Ii^t%Lmja(h1)uLZ7><1h4fzPo z&-InAOQJ$|#pK!Ixne5Esn6L!XE_kjoL6 zK5H>)c1eW%h$Rx;NnnDw^dM6Na!-!R8tyF9?~`V~U{#Q1>5PXbK-3VLMD8e=r|1oZ z=WX8?F8w%gc~lE)0C*lG5yySlGkH0R#=(NGR#8+>?lXN%p<7YDi8f#-ae0gUn> z6lsG#eu0uTUOR1q1YoErZIF;+l48!Bcg6JL1&wmK za#xH)H2YLkPuh)8@zn(+5h=%UqOKi}7jS}i^?>NOsSpO?3U}@{njq7W5k9g?bsvt>@jun? zYZA*?fFTkK0ibeIxuSx}3Z$|2DP=)8ohv#*TCK_z$zI7Lkh~O@&M&Fytmg_-ZNe6X zCw;tlW>||lViBEvTZD~A(lV;!^*s>?xGF`ag9~2c)NMP**ihW8k>}ED=!YrDQa;>> zP3@}W{{n*$VD-=MsHRSs!-4STkM48{ypOp%U_d&C@ACzSIRW~sLI~_s+%Tl?TR#{_ zo5N$eam-?DMJyIx%L4|Z?;6U1js4;7ZPJ3q;^Cu@p0`6R=hVJ6uU(_Xy5PpFtIt7O z9CDmd&z`8}4sV~3xngw?qnk682=Q}D+hc~r0mf&!*4A$-fRpp^e3u5p(8I9-I7ikG zyb%iVsr^O9Az*2(PQj1&#DtryZ9hOIGiomWQ2lIFFFCW&3N)Yx#v`EvM=sB0=nJGQmY|>;*$b_NyEqEV=G4LBK z)Y3T2Sxwfcs{lL9g`LFX*Y+!`Im(3(F!mxF5M<6_LST@=XZrxDcvR8)Kn%SSxYh53 z-<^D&>*R27&2wlnFjmWWLy0D9-PjQwc2t$gHw|m?Hs2yvv*d_2_&8dDZN+?dcJv&hc)FO=x+OZP#8 zK^)L)(0yAGPa-S=n6KszDk`1LY_wxnQp}p|R`rgUPw`l9SUnB5EBQ%ItZVmNu*`wc=^G*r>%g+hB^ARqdt7SBzID5cgZ zw-W663vrGM-(womQ2k3eXoKl@0!c_x{_jixlo*^h269F(#|vKr4}|i6ijV#&hJ&hgnT*u&tgIsD$9!;+p2 z5zZA#0LG#@<6(YMhw`rzLr$C=i?qA~18Zh0Lc%J;A#1SFp>Djv=K0*uq=8J6X|~yB z++ogGD+2Wb4g=d9zG!~BegNrdTdvVG%RLd;*8g!iON+h+)AaA|!}YfS%bC$dVF;C@ z?9~5+nHb=s(eX!CiOQ+&8p>R|B{Hv7D&GsnU7X?Q+LS@t+JUx_J*sjmn`DN^)kP#M z62qG#pMPCnb(n?&uWd;k8{XUmu1+hhH$oG>PiM`Sne3n zrZs?5>N3)kz&iGNXJLEjz~^~|)8n;bn6aElyN#C)mP4hpqhQfdQ?3{r3|mTp8}Qju z+N}%Mvn)L*CXIh6xp65CS)qG??n95s%@)kQT+4#PS?D|2j1;I6Hjzgkg`qb?^=QvN zmuP@@_L~tTN0LvtCs{)IX$xW_qNS`hw4?TGh%yu7HQS5Q$@R@}1rhnRP`4NkB5BYd z{L{Tdqn-Ntlz8#%H&h?zES!6LC!=|6wB5PPVqM7i)r(n{!U|=Frb@8tYM4lQZU+(6 zat0Vieroxg>D!%npR8#MzmKhffG*MLCkz8wHzUuN2dNHQjkK{iv|Rv%eY_(AS(yG$VK z)w^*Vt_n+>O>ajsbjLrh?egV|9CIYk z>WY1lYJ4SMm0@ZwM%DuWL4FSTEK@BaOmY-2g+!dDSJ3re&W@1MfQ4r#b1Gg0O=rZijHP<33l~ zUJ@3{)}ku1RL7UwZ(bCjFGLU24Y4iIy{=1zRKGQ{%kA73CQAp zS;(8ep|j%p;3m4maxrfF}6Ae+R@iM!;m!93rxr7&-Tw z#@sJXg-!{m2`15_hYiJOdcf|#I5IOM&Vzab)Aj^Kq`)W5L!7|4t@1?z{gxGV$dy~( zW3!S*3fnTL7~x*&lkGyk5z_uOEh5Xl4ICorvqTK>s4Mh2WuY$)274=3J-L&{*{mw7 zO+iJt;UZ2nq#b`D{cDnw_ThKo2B#ENP5di zvxpa>Fn?|lC|Pb=Un@pUR9E~YZhMj_+}_{NYa=;uDRXPl2xB9Y86rM98MUdl;2l6R z!ru-}sPO)d+{~KW3oelnC2mLzhq1wcwv?89e03bUF0#z!K!OYh-UUQI2woiEO>_Ta zDcjvaeZ?G`8=LPkg<}zGvkRrtB_0WRU(H1IxH~Xin%-`6T_&4wA`OA)u2ZXR3fIyI zk)-(gih|Hz5asuq$U`*$$RHLxggf@WYZ^r3M=qZDh*uvq8JH7xS$cOEjMaY)#8z); z+C~H$C?*842Qb6mg_g0-Oj7+ibXI5hko-1!C~X;8^Cs7UQm>-VWSycE8^Z4K7^fg< z<5$S_WrqB!nFH&z~^s8ZihzCFJ)KzXSt*cEF^Rrle)lZcfPL&HhRyZd~P|MEK*C)+~FX5i=4oBw8bTy64Z}N{nSq6XCQN_b4SR!)M^Px3@TyaYXK#uWr|&hJiaJpo(l4kxCY0Xg zAaEW2sq%luJHaAK-;T4~z>fC`)SHU}w%kfRC~uw!H57eO2sApcyVX1?XgVYCEWMR> z8+o)#KI!%(b<`Jxly$zKODuG(QI_ZG2!&m-xx*|h8&pVq zMBU_$=*qELR$iGbJw2lfb-esL_GyyJ1~hf^*Y}$?kXIh4;jHU~;dMb2)h;T~p-JI0smy zVnl_x8#a-Vt@{Tk#~kUffwvk*i$|mx!YA|s-EOIiQ&)SsHw$p_CP>i8`E{Hsc)PuG z61DNlh|Qz{(oHd#M_Hq_%Nbq59o#qru2Cz?H+v*&aRYutR+xev%#4jM|55!d(wc!XU23vm>iphcWaC59 z-MvVe+YM46jeqTW7ojjb&kThO*7m-`^p>apUjsyx2!kk|LoSjWe$-*^C3?tb*Pxku zz40VYX$ma4KZKKaDD`-;KU`arT4(AKamyg;R2I>ac}Sxz=ZFJQVj(|RP)mrB@EONL>VAsG`muk zag-iuz@PrZt{59-3L?HU$2blHbdgK1v20H>berjPumdU18r*XmR7EOc{kOmyh{!RD z;-avy_$ z;)$7`_g?}i7KT{%e}vUnB%dxLMoA~A%h>8~rub>)9ZTox%-(~3xd?r_LvXvb766}I zX!R<>?3kQ8e<|z~l7Tj~V!l|=au&bS(K6;Y*f*Q_7{EI$@DE}uoN<^T;GJKG@c_yZ z3h433#E)gCI=Or_=-ZyqMZm7P#w4n~h7^L}Ffl*&;Cdn)y=9Kmr(~1HZIT{QHB4tq z%8e8q9ryAc87YL`oF_jw<6%JWDwA3HyLW7IZhWx0YqPD2=R#wSZMyZ@>GLdwN#^kMdlkJWF zUvb+16qgKo@Ri@avK$2(P}zZ{BJxd;kKGRDI@jEG;!Rn!xydl_IT{DUagAyCULu%^ zfMs8S^2sY{6rWZb)snoNv#CjKr$Wl%D(q}W5J4;GQ#u(0By2s@P4hE>@T7u)VjAW# zj}0>Ox69*ZF_>|!p5B|bo+r=LP*r()U;(+&)cj0FL=}Va zaR!$C5nq+L$4Wx0JUjY*@gbX!W>se)!0V-~nTmVqtGQxi*9|!uj0R_HYtaFg)5uWQ zAT;fsoS|I69Jl$}nB*$)S|eZU1bj z@;xX~lGX`^`-4R6s7LQM)VJJ@!#jT)`ANxO(wX8#$APB#<2*8wn+CYFc~_1=9!CGH zbBoA@iIhTKNuQ_E2GYtdV4UgF6>ckk(WPlv#PS|UVY6sq6cf+FdygPAkA^!7^>XC1v%nL{m$7Xsoi1RDg#E$s*qT zL49l4Y|BRtH8dsrk*a*KzrV414dfZ8q(bBRIeyO>!8Ii8aDa0(Sh(bp0*hy*Yi9w6 z>9Mf=Wi@ccu6&%2ZDO}lNsE{VL875ogy%>;bS0y_ZCH)GA3I<$fN9SK(CT&)l;r%6oN?;Fpu$M<03NHDXeo81o)RXzIQnmH2OJ~a)3)~~GFq2+()V%VjBiiP7s<<_K$cizm>{Igpjd#@qF zFPYBUz_14dqh+}tW!+nql*oyWa=Msv)C;0Aox7Y9TR;6z+TM1dgf=&rS z#ewo!0lDmt{RLsm2YQbu4~>o)(`WMer?eFx$2rY8^*Ch9r$#zaM`({OIH#ah4e0eU zC3=^;yR^Mss_g3=!ILwZ@JxmL@my>b7!0o}LkDlGvwegmjEG~4>xe+m^ zb8GL(J42X%QgXH})==iZQg$xTwV1xzcWW@QNoqLphn)t3>ZNnF2)r3_zeMojw(zfcDO`6MChq-7o5x)|`G$ zQ=;xyV|P(kVMrbmHnmUYeDKM7z<+52EGLMV5U;uE&rYB5mT<4IX%vea^WOM$YWQ20t1S_`<7KO;zuZoRK_Zp;z||q^wpVRJZswdV(E#9_LajDl z{P&!l6+PN*sc{S_RW)0#dIWp6Fx_^4FR3Eh3>p=J1nE=v+>mIzz9WOH0q97bWFBcj z@r^!*TE7lm%()5t@pAYOJt+>0SB{N~R$I1XGAV_j2bVRv3HYQ4KP_Lxk5=U>$;5;E zj1d!j){A<<_;zLGU*ws(nL147t!KZt#cYkCQ!Q&Lq4JYpe1kAHqu$dsC&cY%Cy4WN zY>>nM6vH9>Q>+NX%erX8u@b4*jp>|Ns!+^1vdjcevKX7KtV2M!n2Z9otHq@0S?u~& zcJcDX9$entP3y-Wsps|@vHs*xUQKYC%oAe%ekrGwh8uUsvL}Ml&M*II8c-IJ2E4Q0 zY_VineR7RZBt#cm*BZc#Mk-^IfKAnLjjd;g(8;_^3!EYg&bnbTnzr6!`BzW7RZv-l zFxVRQySCcrG^62EqH8}(B14@v)&b_n``=IA`3%LaNuly)5F4C9}AV%gW6A!tc211zn*?1<)mGh)v|hZp&SUmv%JM|@(B zKaNj0QbOR0LvRL9eXkL@RED3~ca?VdTqMIhxz4SV@bQZu{Y>vFEd8-rw^UnSd>%?= z<}>=ogpl8EF@}w@*(HNP%A5ZR{(4#FI z*`N>62+P1q%T%%2X4eNzbb0IJlJm)rHZ&7i7>?nIpK$dSlGsPg;8J!pvyIBCKG|o3 zKdN)WN;d&U5hth1PWBdVV;E;5Tw}Cj;8*}@f?JazQbFLbi%e>~aX$BI@m zNp`fj?d1+vjgS$mK2=(r6%CD;gaui7Kpii`jCtmth<h#uvpxh{yegijmHr&g4SJ<1rB>u!|Ev8T> zFhWnvWD9!bjpt%Vg(|WEeMV^wl{$CQ-R!|OKOo5Ege+3KEw_7BU+$*P3RreczH8o? z*qne`YYqTEmES@2dR-QC^p&Me=fTSQ-^~)8kwr4=fP;{FP(a7!-4|c9Z?3^(eD4w> zull05P|S>0+P9rgqR?U+%=E^KTW|9ti{}8=NQdEW1$aXU==44UvPo)*;^wr={e6z- z;-dD$Q2F_a6lmJc5p2jD-1Oe*Zj-EW;9{H+gDqi$@~Eaq`&4wWg91B!7q^ zn73b$WCkGM5VRfkHe`Vxh3#bZY@uc#@v2N_dkind$6vkAsVw3(^5#pX*8K8wGaZU*2r=1iDdK6YCV6Ufo)J#cdhN*vORV<+vzJR7* zFH{jdT9`McX8gETkv@QuI25l~%o;fBkRLMCx?Sa_O|y^bF(I1oZQf>CuVwwV7tD& zKQLg)dAuz@VujQ=Nyr96-DnRzUt1)pic{1{>wMSDEgJ8e7Jb7azwo(el=4MuxAX%I zuj=u#C3Ans6kA1zEku(4DUo;&h-`GSw+`&G zQXLx>SVHYKU)s^*J>m1Lj(ZMd=UnJX3|$z^dRsmnH^SL7>GZ-?3~rtZ;&0 z8-f?&L^CVm%FlCcp)*?-;V?&}oqGF*eB|rs4>!E^YmKESf>T?;UEACp8_Ces>t1Xv zaYDct65V@CY9XCTy3O?n%~AestKw%Ebc>SQ(A3!ywb`Y!Wi&qW2Y#8-_S3V1vyfL)f4s_z_*Zur`&k8H?VzlyO0 zSIr9TL}dOQY5FMr{4KUXLSZ_CzFv7ATqF`jr!9r{Z%QcGEH@X$we$JA7S0CA⋙N z(BdeC>1(cUM{l)xYrXHZi=K%{ecz)<|F_$5%s>$9wQURcXSaEireg9Tzf|$Y;k|DK zw_1fZLUGY%UWX9@BkS*occe2rA5uM~`l)RY5?ZwkZd|dyTh`3ug(?F14wg=-6+*TE z!&fqyMrrTx?u^J&abnwUZFNjB6*M*x?(p^UkrtbcxsWtOsxxa7&)3<|_e5nDsVDNt zNj5|=59P8_C0ES{U-=C^TkUYEPyN$Ju4NLtJ&*)UUqu31XJNPpL&E?GXH-Z0dj+YM zW$mNRMNKVLk@-4&>)oUWt?Z^*ARyj!Mnr@SQ}bGh2zh&WyjbKpnZ ztr}CqAbO#f4vsF0^~8Kp@l9K6x3An{8?q^v6$KkW)UWWP$0N~}WeD|VgDGp(@a<&v zmHg^{*K*mKgYmb{ms?c^{Tt$<=YCQ`tTbA-CIQkJvJGfSvH|kIpq;@D@~zoTL|zkh zl&Te7%3jX5;J@5Uuy|@CYtzGEj4yOeAr`}evFSMITc%Dz8BuJ%eXP zy?`42hw}dvhyQOao(rSJ`W<4W0pSN)0f=6nN!R>L^0K63&+6KH#$VWjOrO7|VuP_q z89dnk@xo=?bYc$#_Q;#BM78_!-00gm;t;8&#$%9KkD7%q(!mV6^2pYQeL7MvLdH!* zH$cH`ia7TinezCIX=W-<(-%ej+pm84YZD^}KO7GJh#DNDi&4qRWppwdg){G%_4+bq z!v$VSGwrP>G6P>jKjm^n%Yau7|{jh$ImoU43y)cuPM z)L%mwYpE>DwTUOxGH4XnX;hf1wyB5r07$g%JnY3y$-7xs|B&tu=*X1A77zL%pQ*|A zVF2j4g~a_Ajq~dpuPBp>LJkS2jiY(DeuzNS0TEN8oS-Ngd_qOsOOW)*6`aZayx@Qh zN+q%ck@t}pdR9nTAPKy?^B|S4Q++|xl-~1j@mz|K29O1YugZ-TnvCZIGnPXT?c#I;i)~c|*Cz+C?A6G}JF} zR);flmV#1QGd&D5ONiL*F*Z|tnTyA8Di*;n^e)OcLDfNyqnnZ;c&SZFwbFf17dJBL z;U~yvVB?)WUJniRe(kkX#5{oM$Zh7_XLI-j;2OSBtS~0=vK)s8scO%Oai2iu86||) z%}K4Q_~(dwEw^A>$3=J|u(!7`i<*5^!1&)UXpfRN6Ce~JbwYpkz0iT485haF-dkK6 zysEn!f(tzM8IJZS9kU;DF|MMpv~*Wa-a`uEs!1jGzCnA(1zcUwRe6%3l9V>U7aL%` zB#%#mzpud5i(~LDuk1vyS0PUBv@Tp>(`0MQ=`=GyCkzXem(0R8XQpVkk9;}s7=urt zI;_o502Lnbw*pmq3z_*YhPEgbzw&^*_vXG_o!&3sa(n@*Z+@O&iTrzb)=^S&*mV9RBNjmaJ+7cqUm(jJ4 zp{<)$eVhbdN#QuKa8guzB$%6@;54R01TSf5 zg9}z_-+J5$g#FJQ6b2-8Oo-{QL zBbV3p-JvX>H2arFQq{DzSU`dTQt2BHv`Ar&L-Um2tr38u=!xOeS04}26Ko;c>{oHL zNW&F@6gIjtsRvWB1eH^j9T7dqlA}9V+LHbh!^Ub+_SS^&KO*R&0zZ{KgzW-m@@*sz z<@{Eqz1VmnXE%|*QSe+I@z&FfwgYNKI2E5J_j9H06@J8Pifyh`t|erWr8E6Zkxp9n zc|(=M%>%xOxN`J%G4x)p1TKouOM5uWqAB?=sS+R`rNqh^&o>FiH!CeEJP_L- zqU!J!R<1sJ52N&jK5^s~M^1iYQNziK#Yh%N6wCFR?DuFnpL|vcQaa8H&!)jEw0^&E zm_Wsl;uF&#_#O;#&TRk;;ww;U^*+WugQhdY6H5+ zjYL>rs}%W*6k>w->VL2D)juh=nW*hf!N9i0Q}9Sb5y%A+V4vK7;CUk#LZuG${cY$Y zee8QXkzyydmE^SNZJ112p6>!31WQ?|of*S17h)uLt7?~imGI>c&e{9CSt37XCJddy zK+42!#Z>7aTXVOJ^79H=AzBH*IkPftuf#!xUOpfKi>*6d_(X+@eVzV(k+c+5zC2RN zG|fNGxbs!oSu@F1oM}x6y-&P^NobZ2O1LRw>RdF^cP(vgtLw1&0@=Cq_4h@(6&LS) zW7uf9jO%X)rTu2Wj8?A>d00C~lB91TP~TUU@>1ni*@~yf{54Ao{7a_y%WZu@xjKiQ zJ!w!~i#C!4e^QG4CI*!m7euf4-?|- zCSKCLl1V(Q9_BSJb*_BRexn5^^EB4QEl?+x9~6eKMqq6dLb_(YNwO=;yA~clfy77H zZj074P|J?BJ3CVIyC>dGa*P12(_4O&BXR-h5Ju(sD`ef*$Pvv4iGR3d8; z^f#t8WVT|~-3cFd`lP{1mc(o4E)QkTyR_%)FxI!ZLSS&z7mM{CnoJ5fxoH=@FY6Tn zQZS+0nh+Vq^EzwKp#KK!H7uwxJOklBBE z1odgt3aIn^!a38%p0U}v`iOON8OdMk_&OB3TqT&slNM&Os^af`nCzKp77KZioUQ}j z1(;B)teNACD8RPGy4Ue6rMSF^K0>nY!OlV1d#e$=#^T#>RHqbU)rF$IlpCEf8emE@1TeDuc>I!C(Yyn|EzZ^ZM zPZn`(PQBPlU~=}_7rwYS*0c-4nMW=&i~#R}_oruz8I9bjjh-T3v0fmx!=LJxEH~)^ zvw`^xr`78?*4SWNze2*9(ZIh%JIFi!*C=PO4=0?Afv=7027dt5#7p--Lm?{i-6-Fh zy&MKrPMKzhmrb#CE(kQThsJzYOq-`Hq27v<0eDi zR+Fj^Jg@oI&=J#!O@+T?QlHp8c6%$m{POE*{A(s++#{ z*jlfN(2!zwm`J}Ut$l;I)J4?_6FQRO$Xk(M_@`nn9%B?U&FXC-Lj$4}sc4)yFA7~On2oYIqmPdO zXK91OYr}&7U9`deJ-E~#=yDiP!k+1mKi+YiYj1GJP8|fK>g=4lUIbxuez@076YVMN z`oPP1-;@G1jD7WzWcT?pdm~zh#GAU~&ZV&C5EgNNrjWekj(9;r$(S7$t8lMi%&vu^ z5G23NhJPiBgD3PIH#)URaQ>L7j1NimUO7ARy8V~AzSi0d?a6|wJ zu<|!;%;^XLi;wbxzx1W5l1p->*?2#^{4B%t2l`4phM|yxi-yjUtM<*fsXTviwvBZa zfU6JU+&##fSeI}!3lVIXn3ga&SWZgSQg6zKCaT;)`X$^**H&mkkPK=_pF3u*Nle_f zRCSyS`A7-ZE+j-RGr0cHmgpmF<7u3k_g%;%EpW?v&pQo}Cb>lqRD8-!6t3!kwraO0 z9r}aVgU=afiJ7i92x+KCC_wJ%35`klu@l_~kS2@vKXTCJ^hQ+a0+=9c4bW($_bD!+ zFN#3_ts;NVT`ermZ}b?`)jwNEXPq)kn_pHytrM|3I(1=a^93I{=q0JcvJfjZlP*1~ zj^O&iNHnc7IOK11Ao~1!h2HmSX;~3-Y{U+w+v>tH{2fvi06uc6nggZY)0LTD>SAR0_>fLw9_oDSB#6S>H@+`U6aAD1uJX<7l>ZDcjLBl2Aw%f>k( zSh-5fe^zs(Gq95Let~t%#XIEm8)i5z4Rz4q@88BxKZ`uYqY;aLYYy~a1+oF==m76k zNt;iZuZ;2LKPh7`w7jpk5B*kC!8r3dk1^4DL*Eu>vMaXse zj#@x`SR*K}sS}2(X|ws`mG2sJj{7B)X+eG%(XtNqhib4q-K}i3#G!mpud`CqeEgGQ z+Rk`_5lhG$c$0e&`KjT1!8t*Kq=5FS14%88ktT|@+?g+>P$Ox;&+7<*k`~*rDGdRg zhij|TOR*5FqvuPtYYCu-&6sL0+UtFJgwYyi^j|++^k#_i+Yds345*Hwv^fhT196_4 zsZd7#)=wzu--s60X7$Do(Pt=spXhTX(<5{%HqUbHDYhz5%r%6*|5D}QoBp>JFCEvE z+=mvKbp*KoG04H_1#a=~W9}Hsy1=*TX|QxXKSj4ZOv=_5SCUs>G29pvxpH41C}H_` zMGQ4e)igEAyN>WjE^!_Tv`fGT=h`88FI%MTH7eeev9jErN*3RuCHs1zHa<*+-WIvnD~Zj?6Df@)>AD8n7}aTJh)# zFxFhTu_mjZPuOyfNm_X{#YVK7J=&>C4L8{EO|*DVBlUF;&}2h+KZ}!#k8t(D{7R-; zx@&0=VM6LcjD4S(lXeVzn%_d=z(K)aWl$P!+rm6;caI>dy@yyK`SxW4hon`ZZF}c& zjN_h*j)7(yk1J0N(AR8kl_>re;oplgp3+?BWdMo;X<*J}4xQZJu2>qs!q^phzki1& zg#%OpzO~TT4-IfOZMlh}Ft}+v0S9}9rBQk-6FSbBjRnp$Z7pm zojY-%zYTt4;To~dO8~1=p%#oe8{L6953yyX@hQ~Y5^4MVGI9ol>f=xQE3a>jyD^|k zl`%<*)Jcnmz(J4IX|zD;-yojzcE;5$g;8FVJr>j0N+;m=*M)>Z#wrxxDzZCKpAl6h z8Gtgm4yVuh(b-|iLOg+CY=hG7>rG`xS-}snZf@VkQfyjP`0a)Xh#-z6#gv`dsZV-$ zMNWS$Z+$_*p#>BbStG(%1 z22(g>L;53iCD9+Jv0%h$r3x^Nq5--}!`LzkAG}B-_2nZy29=6d81eyUE`aa=zzfPU z3WoVsWtR1fTQvlD+Z$^^^EVT(Fe@tBr{=?<`5n z1+i^ouCmms2?Ktw6h2+*c$L$lClqh-J2>hlG4uN__7DO&<9Pu{c$}?ppp6?|r0c@) zRQM3O&-()Le2UE?*+qjddvpO=BL|gf`6<(XbA_DJam68l02BnM>85u}Exq_A=)3|4 zFID^lZwP($__NRY>Q>T?`HDPXm*}Es{s5X<%W_7{vi-Z!$EG6?M;yRwxzGNQG-b)-0g^WRlVITu`<3kN| z%A7{W1vbqps(E%p`(MbCTo5H(#u; zE`Ln=L1g^e(g!4sm_Pe2rOoN^Y9kIp^pTpU>Lf^3GnIF%tE}HwrHXRQ&4_eykB;v` zm<#^Iv8(?d0H{D$zo6cGSogk^>_@<8Kto=VOEnrJ@yzAXeLYZvGTDVs$WK3f1nY7O zm|3vH4MIv?=h&GK}iT4mEe@=qm&&3x*BiO(oxVa8_hKs-S5 z(zSpQO{;P4dd-hDfFGf73>xW}BdGI7>o0s@8h4!pE|7jsO@S35j>iE<4E3(zWwOs% zmolSvSq$v#ld(X1!?Bs{iY3pU=-3P!kRx2rNJn@azOf0ozu_TNUdPd89DTnGa?h4$ z`IN*H30V)*c(*D)Z#%G)85Xtt=G#08(MbQK%C8F z!}J%Ehi`MVD4faIv_j#~>jL=SW@NJg(H)?zd&TAo%{4(>>CYW|Q3zSjEvLPF@s=KOdmTEsp2{8IdNJBPcm{0|9t zj9pTnG|mij6V$8Dp4G)3MBlMv_(r^I;HsHp45`h-NNQeU-Q+#iuHxUhwCLCK;q}mA zo$hO>x@LR`R8uYE;GmSm%cs0Ac7D$VI1PyoLX^shRtYcf>{2BIvB->m19dOjE>V2c zTT5t{Nu})oueOy+g+ej)+A*Z1nan_0iGup*J1lgRosq|`tte+`6A3B6U0>AinQbd; z<%v}}ZNGtk)$nQOp75A9);Xd+e-d9z#(pF5?qe{Y9vPNl%f3L>7_hi2OB4$M`Cj^v zzDVuYlZx-+j`HWxc=~sC2*k)N>j5x(VsMjkko;N?haN&IW7O0(usUL@=@*G|eDWV5 zqeH`i5G;WE19b5?FYb`9fKAoE_Uj%4ciKIs)OsY-Kz2cLy||*8uz(#9=B6!&7gaO{ zR`-{Pngf+;G(LQ^Lm{;X0Uv^ZmqZ%hn=;MLA!ISGxG%QKIq4pY!Q#h)3gx;vgIdaB z=P<~Y%fBE55ht(}=?A0;4-kEYyQQ_xq8q53)@ijm!}D+4LFnV4>tZeno+8sfIRHX$ zN)O1RLI9jtQ*eHNTI~?S)eyB88RGueV@k2HO{H~VXqp?p=rdWFTb)_C z?+6wz6MZi4A$|0A>e|RzKyygp!b~X$&%mi}CUCN;T4!96rE23rcqsh(iio`*RwGclcP6V#>$qa}io&;jV)R zAdpK`=Z%7kK+Yg2BvM@%4NCc-Sz<@yr*}c}EGV2xi_)<(Syp1yFz3DdLM)K1{vaIf zLsCi!FeWFw!2{79lvsrzS6RiuiL_l2s^s+r4Vmg(cjfp)Hy?uCE#q4h67IwMgAV5_ zFj;M=#aGW>?j;a?80Eb&0Wqc1*7<`Y4z${NXRz&DXN|?g01%acbMq67N*TjAmv=P) zI+A1IoCq=p<0EVSpL@%~@MU^C75f&JW65-hGJTS1(XRrCSG!4ix}<$TSurDNuC*xY z2KR+HLRqf-_k&bw+}Rv5^Wd;6Ry)^K$^Sm&sU~LspJglDZn$9Q)VpjKUDhXZlEa2{ zy4AF|6O1Sk30HFi>hV~9D3uY>J*Y$9ww*m^Hy42aq(hQA*^f0;tI6B{8tBRzTIW(b zbR%-pZz2LJ+fn_KTxN2{c?*Lj+lIJcF9>$MWYKsC_6Jo~-UJldr+aBIG+U822pHG7 z$!q)AsO$P93RRc3$A@aNn-4;cE%AD_l4de_d^eH8BADs;HtZX5mfE4-{1)hACZ%rA z&NdSa8)pkbN}+&e)o4{XL+-Q)D$u5333vK>dLe=GGy@7CFpf!V*tT`9 z1*V>vRgLKS42UXdNpkBZ^FCaTKstvLXR|$m12xuc@{oIdE|xQLkn=3&SPVKQ$buyL zE@qDk;=mbbJkA#!(#aRW7)p=q2X(n&JP);6JTap~!+{WIfcpdAkEC#HNevfyP}G8o zexe~ASsLwY4VaKGl5FAB4bzd(T)PvWx+gUG)J|aB>Xp08YULY7md0&Dfu)FqX4EIf zSTs=;S}E#E6g?Silv1*U2Wq=|CAPQCgr+*(oLL9~;ysf!c#ufWK2(oG=ti1Jz5|m# zJY3mmZSEShXBhi?njd*mSl6|wb`Wxc$$3)0`UK~ZWoE9s7D(0e_VcD0X=6usp$erM zmeSCOZ%Hj8D3*CoGB?TJcQmmg;j6Ugj21RAJ((X5qK^7{n+FWvWQ+~3k76k1JxpUg zZv*j)a!0VKBfnd=MbA!{_Ke{IO=2Okw}$=Tekl1eX7lG)n@Ft-sn_C~uR&f|513}@ z6$3nRxEPvilUu@{ke*j{zsRd!Hc{2cETw_IYqhZCIQ_*=vmWrbiCdJZBst%O7JaPn zLH$xY28s;VY(zX(|Y5;LTG5+Febz)@;BKeUJokyCVA)wd&`oWS%0YZaPj@;-?`Yaa zD`;`f=bj2gWX1M_zxM01CR~kB5Fmc30D&zPsGeyVv~QAy9QUbn%IM5z(J)*^#GI5W z{7$u%j4FSaBv+uAR358`KR5~Zk?l-t@wuYvC!x;DCGewocjE zqY%{AE~*T|BqYf^=vzmuafAkwbtNq>dc+yH2Jo<>ZzTA3*gXP4BU7f;|8Jkv<%tTA z*N5XQncOWkD4=<~)ULRVPms&jq3mX{^;?H-^{@j_s}JoJy-|H&T&XK2mGxy=&>RH5 zks~BNvNXye_+>{Az+CMYcGLDN`X=Q#!{t?a->HjA&z@_J%VxBPz4 zE$tJi3Q_2=oT{@^x=Opffi7!lfvpdAy|Y5BcB52F_~uvrEAg$9_?u{p6?^dA6CFg* zanm&WP8Y{3(zHUd_Nny|TB+&hE=>{>d=LCfJ)`cEBvoCav_>cjaDiY@`iDvGsSbGY zbu6r1s*AcwqeH`i5Tk(m19lInx~9CPnHQ!%l0sfklbEvViV8I*zPYp#4PS3JDXg=~ zAomGWouc)b>=+rikgHhQ_1>TJ-pCjtZD!XE4=l3o?QJ;bxlv|5o~_OJgqq9$K=^T* zQo34sBu0`_ghnzuPtGR z^H16WJ6b;9o*2tlQ^ifdWeN1`ZDlRBdJ1xRwK&@gDQr)21fYsn!}Z2>z(V-Q*&|_K z2d*sFRKr%+AhCU|ugBemgxbklxy5Sf?QUgpLxI@ou&pH5#YZHdgg$S(A7y(3`~;l* zWZTQ;^7W@7FwfJbe9#+D>?iU7j>u)d<)rETFfW@SfmsE#pksh8&1AA(bQpJ$7SyP^ zxgMAzg?8X8fkz>V9P+XBPo{R>87(em*rGorAUZJ~dcCy%0T6i;-A!W?vFR+T7whuL zsMDg`$7H)zj4<7Beb$&8H2QfpLAVsFTNO1YB)+Rx@4-box^ z$xc@^>R1=JmFK4)(AIYK3dbHBZ6tXp*ST1+3fp>2i_n;Yq+x5uX^K@sAppNxQ{m8& zms`~t5#!X7rCuCmFTjTn!g|%+aH8oILZ_7|bap6CBP$s7Q{v%wtHlxE9u3li`cTo$ zdYZW2JQzZQ^%C{*`W{{Aq>r0rV#+}gs*bFq3v3*jsRqk!9H-RW32OVo|N8Wk3Zj&c zkaLMUjDK$hcfn1+AfaKqm4vxo)wdEJVJ&ktH#?I1#a*}24IN@Zq?_ODS@447z+ft@ z){_XcqEsMhLq(wzQ$uQ38n#m4OIi5CGmHEZ4C;VPs~3MZtcJq}=S@5~b{_${WrEh( zHz?G6PsMr&dzZ0QfVhzys}muADFshVi1}H0tdZb-s~Atcfm@?P!+{Xrfcpb3PtS|3 z?$1(78V3rFRI^v-%vC2D?P%~C^MVD}H7G8i_DDw2E!u6)NEJpVO<*OG z<4!Z>r?*){D+`j7Kd@(h05+q6o_`o?oj=eAGyfrBwJPT{ctVA0c%Bd$`=&UqIWb5( z3;p$9e6@!Xf*iBO9%(0e>b;^rzq1!#JaPc6C zkr4D;0#+i+HLXeGxhxl)&7i7O7MS5rbrDOu7Yx`P(XbAm1v_W=S(Y4~Fthe4D~N*d zBaA)oK9p+kdSrQSg%6iVW)q~-?>z6;q#o{nyBD+_B@U1N0W0Y*(9x1%zBcs58`dc5 zO@m#hAUHJ)QVXueUn(*03G-~zty?>OlFCB-U>mbH51Md zT(5~D^4HrITR|ArUK=OBFE|qKGhA3=^Mwu4%DJU}W|U?_Q3Z9qW|Vg^g00yI@#evs z(1Zjc*44dz{7oQmAqwEUSh2;)R@yOU5WvetN^7q-)iSLJ&GO*eF<{xSk^1b<&+d~& zvW};pdq+NV=rDPl2-2jokeO5H?QzJ@_2l9b61nnlaUbNa!8Nnc3S zeA#Sn&}!@+jqT5zrQ%`&Oj0zs!?47312v9Q{IZ0gcD_Qp4BFcf%@!04o^%~Snil-K zbHO=a|)&%A*}*73&*e?*9Sz*C#D9Rp}F4AvXA4Hu!r)xX^J$=wksiUaf2|rP2otHlPSIz2rt}D-m-$kc%;^!RcbbvU_ z6<#1_i~E2Op*Jm^^#CD3L zi#K^zC4A@Tn%xlP>oq2#_gb~_gdXZ#)>RnD&!A%cvf zZqGBgV)5nox?vN$&+bL9M9IF=D|(7lsBNtJ z!j_`=L{W}Tx^#3Gw9sDh05e?<@GI?UxlK0!Z|B-2E0U9oj2!V=uu3ChoVZ%vj5tDo zytQ+x&*vOoB;H*lq>Jk1ulLTG0?!NdcA<44UBlwC4`=v|ibQOB`;kYU#o$aT+A;7M z0$VBA(o9_5i!b)tGW7~TBx@|zG z^rcLAm}H&eYE?uk6_;rDXE;D*w3%i%TyZ|cU8{|yh>cd_@OsTcqb{L;$CLg}UP-$Z zVyIHdzE8TEFhz`HcR>1wBGT1`qUCuUZ&%9~k3)Ch)dRT$lh6*R8Zd*CxXfo69e7KP zA2{ruPVmI%X5)E55$P zMuDi$aP3V+M*?iHR;y;laPiM$Q*t|&smzknG;4+DUXA8aqgy->`YT2fkGu_?xf0c( z;Zp0?Q`0n-v(M&WtX>4!%CXTIyC&VDMv;v6{;nxNjyMRfsQXChb5TL2z0XHC8^pd? z4m>ppo3A>~*6Js9ppKq*sMb&-cgpbOy+z(KeC=`x7)sZgAVrIwqR=n#3B~E;X@;Ym zQR*UO7`u)rep_lAn9K5rr2bIy{aF^7lfZVLzHZs(trXbpUp9?ndw|EeO^IRXTSz0Vf*LICRC$C!+{Z3 zfcpbDOmHgA$Ra}$Mlk7wO=}_NWm>0lUZeF6`%ZV|<6l1A6)3i%u%N8RMKvEuldkA& zm<;06@VT5mcD~=bouWg)ER3A7vM%zUSq!rUTH7L#U8k6|=Rl1U&T?M-(ZG<=eX_X1 zS$YU5w|`o#2>AQ!mOe76zqAtGpZX~Ypiea0%@Fq(QgXcOek$>QY>7*r+l`Pv=g7Me zP^NjltR7tx-!bm=?M(ACh=-CnU1=g-0>w>TKa2Ycs%{+T==?L`LsOsX{(KJ!{&_-M z(`?&?A@X!?2hG8f1Emg}%`l>mO^VAa+^X{^T`> zs`c&fVW(kNeqPT+is_h?a2E=gK#(z@2pMijZGmf>yFP`vbmgb~u};I~AkdD(q~vwgOdr+*JQ9;A!QIhC zBsQty>IQ_9pRr&jUfmRDA#V`EVzg&kM%xgr1rpz31Mi4$FM&uH*YTnE^&zlbH&V!o zuPntKS9~&>x~p!GikSy_aJIe7jZgg-$&K7IGxYU`hKZSXWv%%G3m>q3yBCsaKbfK% zP{GW8lbeuBD#F0xs_8i|W(Uy?C8ZvLgD$l%rfY60$(j@=^HQUqfX3=S0|-&DYZ|(8R zlXToGaxOEU)}5S@fptNbr6BvW3caZ1}=r1-Pk) zR;2*Z`Z#8I;)0poilwNQ(X*1M?UYEmGP_<4Nkb+!vpLN7F_6#xr*-EF3=<3@ke4j8 zBIO=V#N~TH1$ELbgrlgcQ#z^O-^<+IGn<6!SVQV}7G8j;&6E%MjG+8n^Hf?Wh~+z0 z8gn|ZYfvZ_;E%Q0!ezxC$O#P5JzpYIzReFE*V8ihL;^Kxl(Gy<6I7Uky>qC^7rTD9~pOx*US6&22odlXW>nagP+4Fr zgSr(5JVgWV!k*eWUI z2oVAekKojY*bQZU0!XTG8ucfL98sCtB6wW>c|ZD{p5qT0tRlk^8)~6aWSo%i2h)9?0WxyWYutTiRl^$NYcDI9sF0E=Zl4CJ+3^H#BsP1RlQ{VY~4~}##`PD ze+5&sGcyeuZq*9(^!ctdULftoYU)A5o}Q0hi9FoDT5ZHt^n_k@kgQN?)FMO;@2mDS zEy0LZH!~R=f9S2qn#qdNo3@cZ|F?Y&zfOjv3vW2n7U`fnYUTp!7tIjdUNKe|tY6ib z$`1v~uc7hF1NFj!; zd!@Vro&jitBE$W~M8YF9p81TM9SbN8r6Fj0YEO`1Oghi=5 z^;tTZX8n$S`R&0Yzz6R?kbig;K@KdTs`9ctH%9D@e(H$Ux@Shn_s z5$*R7ft_){e3qDF%LA+`#>i~_|NBR#pw7%Jawiw{bEPB9_B&%w zjc}tw!+{acfcpao1uoxAsgWw>pHykoS^!FWjV-x*lKE5KHsicUXeeL6`P!#7|?l7*yn{Uf2-{`+agZc^zs)nrG-a^NRlIYq!Fga~U6Q$Mew&G#u z3I|b^#8W}%d@$}HI+a40z#~TT5*ip0f2n|hMsLv&iAbZ?=Rd0-3F(~ohwM3~z^31$ zgdEUFl6%^i8-kXbHzxike~e=MXVOpr9q5T$CJsN366HXC+DPdYhNU0b|x7`5)-M%W#;3|gcBWzT@4I`t2TJ81Zv)oLvXA3}e&V%NI&s3@Z> z!_`jVGQqHH$gheGM;{Hfy)6iFf@LA-6Q&Qb{uR}aA-KAf(S?v2;4^ori7mf0&l2ZEenoAmo3u+}Us=>puPfE+`4tUX5-zBxdM`<$Q$A!226 zN*!YX6Tjtx5rY+)=#~83?!H|EcUej#-!moHgtS~b_ZDW*t|P-fDRNb8*i;gG!Jh2b%twTT)LJ{uD#w{q8)}2Slrp|(9xRFhB%!VHhowWUInznJDa?b@ z?b{S8)idAYws9cA|Dt=APtjKTfQ;?W>3N#Ysqz z@3~6t{_*$ z%rPaz@U_Zhoz^<{D9HD*9j}SGim>EhZqeH`i5(|L)12CMF4IrTCnZSwV9C;L}1oW8D?qe_P4|LHkcyrJzd$X&pF|9tanL zyL`!nj1&1<84qBccT}slHNXgJww2UMQ)Y|UN8N>S7{4aO7~G<*E77J7<`z51P;)_! zQ23}ueIlPhr2o%gM(IjnpFB`jqa%G!AFvN7{y)C^BqVEYNY2S>($ zT|@LSS_3`E1lK-qkUZNKN!TA^`p&gO^HL`Kfw4w$kC@zl|CXMC829xgFdF|a3)%C~ z)-oll3#0T1K)p~aT%20WrS(MoU`2kfsTR-J6x}bJ4zwC>v&s2j{d@S%F~d1Mh6=rk z{u3K@Cj@{Ea;fXOSr*FH_=Z-LBw%$a52^wUvs1(4%rG`(=8VzSpi=JkC#6J(7vm-P zFETp^@;PXglZp-qS9RYul?&A4#426Of)}6G)A_He9i904O{Gj-ev4;2EsG8=h zlmX_|@U4DV_1W_^7{NdPzSAM^Qk`-BQV1}kn-f%71>)bq;_1`$PEMi z10}020=O$HYku(uLHn&-G|g6(#eC3C=lB4|;&(Lany9|K>kE1A=^aq32MzMV4<@mQ z_NoCdq%ww!_NcM31U3RJ#PKPd^vaBfYR3=`D(49dd-%vXK_$*I`f}#>@}iE4xY5Tx zg&6l^J@f54UHMtBOv$~|i3}Az&wPq5pz-K=>Ssh>^g1$t4iuw1_BVOAu^a$V-$Vma z$u6Za5oTSsC~Gbt5;(%?a0?yMkeLoF#f*)p{<#hNNt-viKKy+~qu|V9s0lF^Z<6*m zQjJv}MFU6>%@u9d0iG?#m^n1K%?-pYh;P8fR10(Zi~wiRB&l79)TT_2IJ^slmC$YP zA*$ny_dSk{8i}Jr!+{bs6njdL-O-uJ__QWF^)s?va1Om#KP1O=$=_?L5IAV?k-m6#s*$RV;8 zlt@WTMw|`$9<7kd_2Whu-#-`~6Yd=FK>pOF(pnhCbJPP0*vU{O>KUj~fY6U36fgeM zprm6`K)l)D!*yh7v5$`ci?Ixi4K)+Gg>Vf&Gdpu$kXBFh0cSX<-y_`o+_&1o3fiFh z`n)N5rni|K2n2rJLq9M9<3nh=m>D$@*hmS74kxLjb^N5>cag|+(C|*1!XGd&ps)P~O)WWA{9{r2uDIR% zpRQ)3XQfo6UT?$9Vf;$p2p8WH#iD=FYQ~9h)kxx?j_%k?c$3B@a-yp8KUn$LE9`7P z;~-5e1l#a-fIRzeum`o@&v7W$u80ZQ)eH85T z;DTQE@~-Li()V2&kcH>_Y#NcSV`jSD`l>~(^;^Ns)ycg&i=+rf3P432-}sHq6x<2O zMK?quHfRbgJU(r4lauV^{3WlqsWaXY3qCVIo#?{8njUN0^|`u#&Kv2qbCKAb%4M*T zeg~;R$r!hQX}Wtv^|_5`tQvwo*lGvx!}DQ&%t^~LQMyQ{l)ym36}2X*Rh8N>qvA*M z?>rXMNxGMCZez~VRI4r+*q@!VZl@2G=U@S5ObO#s2c#=BYfv91937xdTkPum$8n}L z8je|Hf2J9xfWyvUfnrB2Y605vw(ub64Rs8J)cX0jxIa|T4JyEK^c8ko?bk?QWEMu7 zoqgLz!ub?I$b}w}4pZG7{!;}mrmpYEnS$z*#(*iThIYw|w7W4Z;yD=->P8weQ`pc# z#c?~=&s@iMf4Vnpz9v~qeH`i5`%#I0|e7x zVFffL09N5Q#>R77M=71_UwYuIeefG&0B=ABUOoDp|wr!RhvlcBmXy4k{ zP#ACGfkF7TV+cQ%9D z?<#P~jCy&SLw-0PK3XhZNoPo?g&->wHn~AcW;1-Z@D~dD8Txc1HuRwZ$4s$K{ zt8E8W3wRG)N#(t-Em%lyT(+;w_1YzpL{4e_#bv(4=8&D@EG~}#_ov4f$1C|R z%Wlnj+<_xmmzYWUFUwYbXZUWk@$n4mK^QioFBB^PRM$EH{2FDNEB#3L=8h)ItjS(_ zn5-Zn9RCg8=yg{boR%ud)FEoDG zBOAjbIB%i}9^S2Z1(Zhh^Kj3iK*{}_3kIDam!aZJ^88p9wj15Wx_D?%g*~kAN3&Aa z`Xz@OHX3rb&^SeL-^e#fBzHHM4)9Msh)LU~YFL(g;@j3?q@~(W&qqmtq(XJNOKMS` z--fljICK=DaUtY6JvBQM#Y)eNAW4G>`=qv_!1F6_loi?oi!PM+9@vw=@o7M7RaemM zN0l{DrnqjoPMUVVCrvOk2Y32ZR0|MA4Oz07^||I*Ys-6#HWcT<>;n&;8{jhMp)*lwWZog6npkmJx#R4a7)0j(X_b0j`i?%H7 z>e}mvGBajobL_hp$N`zI>@o?Kb|LK0mPOp{)e%SWJeNoT3MDDjpyj8p$2zl%EKKIM zX&T)_z(HTF3Xwl;TJ$8ssG_=z~=|MF$m*yyKB| z==35yJ9b1RN#N5HptUW@ZxpA^5xTUgiDdly7!_ zY!zCieVCJSy&j#x_DuC@y>7rhH?hQte?Qu25h}dj|GW4iXk`hGt(Gnagg-Gugo8fzt?WpEFn}s zIlhEv#bY|#;@-s~5;FaTNRfpFIb0->8rhQsWdnG`-qw@6_5f48dv4cJDipTWRpqe; zKyIw~pFuN|I-(tQ2*BY!cJ0V?zss7V05tHdEET=oR3-pX;j?L6O3qrLRqqXyagQs_ z{e>3ZUJp>(wLgGp=j^W>jkT+VC79wQ?-ki4Qxwa7+u&-!~1I7Bi7?>RR>sG z+`%>JIlfWTi9#!AXG6@Ee3X53#v0nC`@KlGJ~~zmzRu^L2|(oKn!^G&VXJEf5u98Z zIZCAI(ia23Z{kK|3-z~Xsoak<&v#HcFUzeTTA@1`spqTUfEPVfwX5sQTI;F=Rs`>9 z0d0~X#jyJ3wVfG!ASlCSL;zGBZ-or(gQr^{Z>CxOcKLO~GPwa5H~w$jI2{*6mhFIx zVMYyHqZ|DF7Y1()m|>YwlhuE5zQRZ5tUYc-6BTWW^d&7Alf_zNjPhhR1w+n$6RWj& z_;qH@vTUI8IQx3__eM_Dz#ZU#b!$?r_R0^5OS@I3L=FK#Q4|3)@zt7Ybk$xRKKj&W z@nFrbIZmuuq$_q-DdzK^$faM63WXi^1u2Ro=AdUvuTjM75Hv%X_oFqAM6HU0pnKcZ z0$YYZDo<6~%A2(FVY$ZzGpN&L-pYA9UmGP&XYJC<#)UhWJI}bvd5TT&bW)|!DJOi> zL|HX|kx&^hu*j&TJpH4i6xt`_xKSi6D|v|Lhsg&d?-5(b!C(<&csAi{x?SA@1kI{Q znOPNWH5a$yt>Z@DfhW+$j=w)?4pJ8!er7^w&DCeu&PNWZczS)ONzuOb3M`zvqeH`i z68nJr1K0mS)XUG=?ZzLFcn^>W`NNpdwOox*xVvAgNXtevm+%HI9EgC&G$JEDif&?w zPM{!QvsE#}*|{YXk!~>+`;DTF!HNUy?r(E}!CM*br0z6c<_T-+PRD(LL>vp3ej^_~ z_G>f?#(QTSM9TYTmFg8eDT_qa(d=4#$2|Al3hU&{SYJ$5(ks}zIfPrn&DK_L9&i25Zvp=6uWm0~EUJg|%RNV+ zQ8^1S5BnT(p;dkqHkeac#3A_MO!lPh*!zy4jcJ%J|9Bi_Br3MO$;wQB_7cBZaT<2sseg*^C_sBbuoL|KL4 zRhea=Wsagr6sRPz-+!3Mt$IVCl>Fb<&|j$7tefiwa8c5t*&hi4`zeHn@NO#eqSeyY zk$6gGiS#dIjZANAqiE_~`Ib=1yO>5i4|9r*l&qJ+9X5qu0o;%EeC#41REjB$8z8d- z{5G}jja9l#FV+6lSxMj0vuX;p9PsecIIB&OxPUo2Lb+G+=VNCQ4TVhwG=#Twx29(JVJMzNfUaxyrmVRmWee$0GXSZlqXyvbDkXFU zQQHMDJm-3uatBpW3X$9o76`9;Gw;Eu%8N`p zA}o7V$oN6^dMWqc=yot!OWOUmlD7Zo{D9tZR|HEKguBs%nN*=qnE7QkRG-~iB)75C zLU3!NrDp_8q&#+)JeshN3*N1jxh&h8yYrtso!~Uha6-F zXd1`a&OJm1da~ihA_!Oz9UkX&+vIuRd|#tO!+{ewfcpbLzDoF*K0_w6+<++p6L)hR zhQJFXNgk_JD(qcG^Uq%wPwCxEree#5#-B*|bd~;5Jt7PKujdG?QIOZuF76P=Nj4g5 zhm3=Opb_DDG6nvkz(L3R90l02V|MS>i>*a2I;?$>D&Um(kWd2T{K`yb5utnhCMF15 zYnadRdVkAm0(WgBJD$OMk#H1aN(0x@)8$R4->-gWE8C1Q_ITg!&t^rt?T`hHaqnVH zBCGF6+N8KR=>hwrrF^obNnA=1iF*a zKGIcwhyy|@KVE$)mr2JFWuoFBGrE(U5=-Z`W&Io)0sD$qBW?HwM^6T_>mn(6qJ=B*TQWLzz*0f2HMZ`xDu-Ck@L1`9;}?=KklW zQW9DM?wh`k(jmpL)k8m!*5S4VC1!jDaE9zQ=Xb1r=))Fe4Eu%sGA6hA zNY5Ai3<(x>oAPM|a9dw471p@U>+>zIr@UXbx<-|YaRboX5{3Bvfo6WI{r`Owgb|lr z<`>Oz-Sv6cuPSiQL8)kf*ibQId@vUeJkmHI&=xhdcOWq3g)#WCI8JQ%oKu|nLu|Z|1+#mUYY*5^XB^*Xao9j?NT_pqk;AiBW zukdcBGvksSk4=1DVp-oVcb?t!2GCZFTabbx{wS)KXx==x^1_k{m$V&1NgQR)P~l!f z2GsH8aixd*| z;GMco`Xl(eqeH`i6LWz312)&+r$Jb>FB#!aI@yqH>vw*u%G988_Ix|Va^Q$2Q~K;2^>1_v zzjM8VwW}Xvw)-({pB{lN=y{q6!!V;Evig7vb1dw_8CzvQX_{5f*;`AqycWi@CcbF`_3m8dik7P8u^H+o*$J1AM8 zNzK2nubCUgnAyJp@m5*KDk116PmH)SD;P{7z-gw~VpECQlthqlBGA)OV+`fdW;pxQ z9IChYpk}X^NLkR_?+j8y=Q@>-*i$cvqk;YsKC`;C#D9nDlY{za$0T(1se|%RJXAa` zav0C|0^RiNLae|kdHMtxA&gUbvjP>vzTUnx;sKFWhp|@n7yZ8plHsF~sw0xi%fPZl z6495mPp$w)DDZVm>f5A`G?}g`wGq!G?^-~EsD|jq45e7@(HARVSM@@zCDvyK)Y&v+ zh}U}b2n*ddOx+sAo0R$0!Qj_QlY=;LWJ0rNiRE9=0=SDtCpmBA=d|(MuEE{Q_Wk%A z&Ot?RY#`;=hl?p98KR}(SEsh&JKestjrN?PIVXG_BGMtTG^Td|gH-E>Dfj?kEA3Xh zp|Nv~KsFDm`uJ7t)vslXRd8XMkZsu>nLXuof!=ynl;$e^Ky54G}% zb~b?=fYh{6i}|3x9wF7V=yCt(VXBmW8zY6-*g77a?)fMLM`;F2z4X@0V;Pa0Tj*P! zcKNI)W}ockM;Cp1m~1!>t^9bM03fKP!hm3o&{{Ju`BG(S(Ln zQz3RWV~2k(?evA4@HCYliNJAoskvw}6#-S%pmyC(f#|w22@3ojPEUmfOfO$;Vq>55CP(QJi5?SqS85HbUrJSx1U=XzG#`=RHr@7=M&QDQp+ zezaACACT`3XitmxL4I}-_Ppjt1Izxra?VxSktMUC1cjqR!+{g8fcpd3Q#Gk6y$#{( zK#XF!%6u6eK(*SCb880EuY(MVey6gRKlACwY3*8~zjnysWMiZoA!1|(eOjca_4sUF zC1h8tEu0q%`Zj!zNs{Zpz5lt7aV63t;{E#wZ7q-D#ftq}(;_Ruce>IScH@=1GuQ#a z%?XeVXbJV-v=Bt8$&imuve|mjtT9^W&-e<(6{SN{T&I}Lk)?Ea&&W)MvLd%5*`k1a zqT9H}W1p#u`U;@NzAOd$lu8PWERGi10LeAW)6t6^ja0uU$EBGF?HNbneQ|V9VKr5N z`{IigLek5KD`v?;RN)iDC7vnGSVjr{;fdpMax6H7+-8ei4DsYoXDWkidUa)uEZd6X z=!+z{+QnNP?lh)B<-rb@m2sW3@6RPloAC{{OZyQOx})DmqKZM@na)pvNyhwfHG|er zZdB@$|2ZR^2Lc5q&8A^p<@nPU@ z%6EB=`*;fqd*OXCMaUko{yZGwCnmMwg;xYb=Y|(@$G(~=XH+sIjV{z7--;xO(W6O3 z^pgA#8j)fL62dh!9(I=j(hS~~u7Z!iUe%egN1x?|n-_N0?3;zeLS6KlBw7`{UZ_Uy zd zD8@6<;N-;+SeA#Xa7?kg26L~`(R$-ou1x`z(nV&=*Ce#wI7i*NK7_|hnSn6+b@r;5 zGV_7H4FRWWmHKq{sh$elz&eqX)B!Q*T9p~zHMYZ+`$~l;G5HyK#{iav92)25d|>lo z1dgIxe%I_(|1S*XgUtG+1{*M0T;E~al3(0wOArg-Q2C%zThen%ZBmp}e_2fr=+4!H zqGyvXPd;VEt*j|2$E*L+Zbj$qnLI*>b#^coaa4%12brsy{u8E=MR>S9@_FI*t=nDt`D3XbU};KF%p`KxfNxHZm72E!F; zI^c5T=&NnEp*;Q=Z%1$i#ULA0)k}NfK{dym_mg>`tc0cC8YVVddN6)5*yPJ*T*)z( z_Q_AGbFxou^b6J9dsna|2q?G>rd@i3hw3LH^Km1R2WHKgbXe#UR~sPqXA9TKMxS`F zqFVjyWLUEOmG>jy9@N26psY}ClBNOPlf~Zhj{s{FPEG(YTAci-aMnI^E!vvfaxZ{g z{Xjj<^>M~rY@17eqeH`i6YGHc1J-PN<=LUE19)?Gf#YEoV-|Dg#-4BAc{#$d!vpmu z+Kg1Qh-U6{mp?(_pav{(u&m6&+O~b2=^BWPZbdGdEu*MtY%1Tyv>ZbK!AMk@i-lEm zHATBN(7v(G7Z!kOU-LTOz>KhlIvQs|FY}Q}f3+q6RwB%8 zsHbjN*-*HI`7A0Q6;4)erJBNPN_MDov;HlGa^TrHiaRtc#Da14Cr^-@)KV;^=!L{p zDCZ%}@YYCwlwYUkRqxfNf8G;qK(?6hWX~2kQdc!v+*X5=crv&%Hi<@I$u??{VT1c%RJnw zbMN!65hE9s$$POqvfQglis6yxDiu#d<(&_L@c^wrQom#HcDG&-Qv98m92o=fLbYc` z(PV;8IA#OkTK}bKMj$XCC`}bM$AT>Y&rolDL8824TI@nBy#Io`XA{}I!rt2beyX?V zBVm$c(XaRZeE(caio~y+i~=;K-Wy1_+>Dd|rE)k)2uYS0Iv>zs&Hw|xfTQ|E%IQFY zHptR(ET9?6J^DMtHtmV}b(Q~D2Dd(Y%q#csm#zbnbD6yv3@+!r0A)h>xOn=2He6omY%+fAZ8!ofY&eh#jOp^?-)#W zi@S?yYke%J0<5>@mf-6$9lsTiX}Pa6!e|)s|6KplEENhA?gus1%^3e-OY=V zs1Ozvs|6I}(=Sx**pQ9ZJQ4J-avKXv-rg9T%`a8P5?Oxq8P8euJ1r+VN1FPLF^8+mMw9zv6+$ru(p?~wr{~ux zFH!*%sphJJ4SW8{y?H zPzQTLc%|1Tgwez~KUN>)GuO#`GpDMAp$w-zO*67KZzMI4tSJI`P|zC1)uTbNiy6dO zWU2?;lm3emC(D`-8?m$!82VAp1fbxnOu(q4C2Ad)igGgYS{V|z3A?R=YKj3M-$Q?bke&L6ZJ)&y*Es9 zaDDsXLk~)7tr~2KNW2Ox)b#4idVnQ=%>R2puqnsGs}Nq1gEm#MSR5v|-_L|TwjyC_ zFvi7xWvo3r5}-J0m3*ufDQvdlYR#5aXAk!YX{5q#NfD<($f}DWOnxeIu4G04TbL{^Pbt8!XiNhbGczpF!`K)X}#fjuOI@rj@3c)*T% zh10oGWTz}h-yw>Q@^bX5PWPc246c2CC*sVrk^65kv>X9xLY3SWmH&hnD8c~OyC!FA zXO;D`xHX{5VIcZNDLtzSqE6pNZt@7#<#flt^BxTigiPEcWMv$E?^0RR+w`;)visZA}-+tF6f-ww=dXbonFBDJFoJosouF!DZd19ofsm|eqwa*aBB zzpCO39+IEQplf)7Y(&N*KI6ALK$+NjW1~aEffQqa`vW~;bos)>7eC z(2!|YOW(^4mdsfQ8Cb{bl}~-aeNUxk0F$e8A3Yzex{S@m&z`bJzOQ|Oukj%)k+|rv zR@Q+Y10x%~?cf!X&VSNpvzLj|sO75db1Ljp5`oUfJJs6$vt@$t9Y&ZHc`%g1{X(*8 zl8t-tj;6-7QSFjt&gk&{!~;uDgUI3fsU3JQP^L;r^-0mE))CGfM}nEB5$+}vNOGO_ zOHU62KH;V~%gNMf|8SR&4CBA)#%3$x12+uX?h4lu$f=3~!d&iUujz1xXj=G7pEo*uFoUC{*eOXk(7NWgEEEUrIazy~x^j@I# zStQR)IJv*3@yY>3Ntj!tW;Ol=`U`0&3iJos0x573kwRMeBih5(CD%2s=X_z9h1lC1 zLue=vZG!Y2-7NAJe)JMZ?Fe-XPDie9PvUaRWso*!gbMGwP~M9x;mJgq?UV4=HgAbM z5NUaLV3TF=XswsDbCNcv@2%L_03@{Q7cz9|*03h?IU4@$pAB){_v_~ikl~_f8EPlQ z4SZ2IgVr;Y&);@xc{eVu>XM6eoP0-KF_7sDR*a^+M?=JQN)xd4M|oadhB@CC$jqH^E`l-jH! zs%NWvINbrh;1>Y1bgsz{8BuSkK21sSFYl%N^YgzTY8I9r%OA|Ka6q>2+lt z_^K6&`y#_rwBe}L3;p{4a24P}{wSZ4E6ZcCnGHfk&5CIia1c6{#f>-A$#=Q$N{zZo z?0I3ML&Jd-pMd)V8YdHU?m>rxNE7)MGWx0lgqX`P=t%C=!|;-8G?KoEL^`OkEi)JC z4kw?0h`3F@ksUV+fXDw2F}K;;{ApOor@^+IJRq(qZbVVgrzH zA^;3cF))N_`6wYG?^*$u9#MVFkn_Nynw%%UgOtcYRpYd9#q(MdYX+L)j$5U^+S*|Q zBPPv2&e8@$${?onEmqKK*pJp`P@^5T4?4(f6xjJ z->_-(^ol1Hy8RE5*I3?j?K4GPbtr0S!dnUF_;hMgm`6Qc;}dyx0m_R7N%u71wM9KL zJ6i$(m20u9V_~-mT%E!AMhd5GZNFwTZx*8WGNb=n`!iP(fcD|zxTt!6pBmtW7Ho9|y|=Nz1*Q#SC1v2;L?O2iC4gLtlx*R-= z+2qAXba@%eN58ehDfFpOd*lNr?Yb;{yny75QLi#hCybxB%%FUlEN>RFj{pF-XJsW3auj zL-R5Vi#RAkFJ+r6W17nm;Y9Q`)EzmpCs%ZUQled{kxf&wa8QhHE~v{~fxiKdXhy59l%kKv$g5cs&5)&Zttcf<}~PcoCpO&;pB#nL&}VVfQ&NL?m$BGnmV#p}}>~ZxvPA z-7XER2(Er|v!ou!@hSGa$llE|3&3=!d0soMKXA(g?k9c5u7#Q2oZ1h{VL_p`(M( z0u-biFBMOiZHkdMt^!jzfB;`$6Pt#zom3DyY{LyufSF+3s87ZLDK-Z?Q?@hRME91z zUAn3U(@phIq?hZbdQ=AMTE!H5w^?v)l(7=@I~7s=F}?746;)<{Bn28G-eEd^@PPen z+})j6>H-G1k;j1iVN<+K=L%Jc2Iw~J+y>7S7h&FhZH@Gds!ZDF`|6y9!&U?WCsu zL!7n&ksEwdWJowXF(mryz+fW%@%nomTG+p~k;U0e*jufGgPB>qh~4XRfNsoFz0x$3 z(am~-6!Mv!*QS^jBE{E{DgU6bOaxk0yb)*3#g-r>6u0bl zgH(d$1T++U#wT`Dp~vIZtOM9EGcwSDe3DqIWqfp3I5NTt=s|0G6rb0-#8$IGw=rEp z;5+fjWfDl({-0Ty)3dS*&6GJXt5X`f4*G4zIRI+>fMM$5ctwg=$IVaN0*+I!0#V7? zkMsgjP{moc)-{l^g(?y4wfegTMNwznDqMu7P4UZ(@5Ixi)L7Y}6ixdnF)y38w+o6- zSWjyKpAPx($6y2L@_}}|x#WRH`PiH5RYya03Kekju1An>0M5Z9yF&q z^n9UlEV(5>%ZHz%L&Jd;7l8W%KP)t`LO@M?=7b|#%9MJON$D_xsW=m_<2f*;l4#58 z(QPz}Ft5%|)2%x7%19lbQy6=2;;Y-erq2VB9(;LDg@Q(!nyO%h8Z?uOb%pVIhs_L^ z;V-k5KKB}}JyGC{bI$&Oko&hwZHCVnv+@6Cw-6|i>zrO^(m{Gv-kOqPeRDE&qJBD` zBYr$Pkaq>cxITQbI+t2{RcUC$A)ln>0**9LGN9dFt~)IIvnHmHWeMQrv0iNWFv zjH6n#k$bGTh_6eWCPoa-ERJxOOCy(P1}PiyJ`ToI)?5-7=R@!7#9ikT$srSX=@6{o z!C2DZ_*2sr$$*npk6L)XAs4y5?L4g{Y)~DJsh~11m0<;m$9W#GQ8$ zoBW3ayW%nwqK(#%ALj=-Op%FTWK%mu_SyFLKZ-KCKBlbyhq*M;!^J1ylcs%08>;jXJfe^%M~|(vc(^n zd!Tl_*>dYn!t=6EdY#}>hiXH%4g`A3nXO==xaAzpcUJJZnh*{-2tZ6!K4bRa7RxO! zePcSSXG$n%SDO7nD3k_Q@9#w!9kqXeppZfuCk8;tpj@MTc@+pWcLIE|dHrJ={h~7Q z8wH^Cq-3Yb0|<3s~R*k#mpkuHc%hAH*R0n!gBd1n8=!Y)F2!Xeea%*WO`hYiRq0 zq!KqVQ9|#g_6j&-8u!}Uu6g~Tkcm#}sbDz6Kk_q4T69rJ!swtM1USuZXuEx|C!lVr zFJuW};`}Lmwf}rMkcEtMW$XLw+ae2?d-g{LC~}G4>Cv>oNhtsu!2+sAf%%>K8`a*= zzen2UN*qJ^Nzt^Y)>=Tvi%f-Q@%~2HtGYQb|DQJujjWJiW{q6?;j?8zmy&M6z47_^lLiRGcaHK;0G3%_-=jmrffZAL`vXKm zY~i5`O!(l~)>ftY5lkeahwCtyqNoeQLTwovKU(QR8}~=ZpJn`>muKl1bnlaw6+ zmWsWC8miO)s@yLbCDS+#9<98gR3^Ys&p0?lp+3B+99kbDIv>xQ_bc5!7CuZZT$EK$ zBX5`i%=o3Xoi*i0bG!d&pfeIBH7;ZdD@1CrA<0_iu>B%MhwC1qW#F2|WF8x$7)I7RytCZ5$UhN4=Jt0V*cGHfqQ!pSKsyAhwV9X% zcnOO&G5t+-^yVI=?4e=zDmaFfhW7DD`v9(h$54o|t+ky=YMdx*@Za8+Hwra2D=V8#mRmlI%eL+Kv;Iuq)b508SLXR&P#nKNN;QCd>qL?Yej#zMI z0I)ODWI6F%byq8An-wS&>?rTk!1$zVdXnQ*jo~Yr%~iuXZ*VIhnqC`W zg**>*kwlU3fj(CxPx(HcRf+U7sHigpd8f1g(PSOK*nHK>G zQcpng|H_K3J8K(Ole*V{#QECR*;yZnqA3ScQSgJ47caY7j`7}gy=^+;;c%}AcH}** zKo8xiEeoti_9fIqURAD?5&!~D-21RFM@@vY=MSzp|KPJ*Z6hRZwP0%AryDvEcY##v zct{3kA9H-MVp*x{q8^A1=4PujZY<$ttrf+A2AZ5EPQWBYFU4pML4Y}@xin*FRO&|A zHt__2ps^XsK;#jB8SWfqR#u!vdGYjEs0vd1Dh2Si?AjJTWt~b__~vcT{-fbO)b>+g zO03ZXPC`jSVXt+>N+E-=1!>6x78RC9e!@Vj<2gGM8_?3?ogqZ4q|$fGE8v2r2fwIe z8zViy^Z><|`V2=*ZrAS0^F%G9L&Jd;kAV9FQ|Qf>O$d1+L1*!7(Y~KxS7`T(`kKi_ zdm)_Z$0x6+)F5uD@-6@+Y_~lh$>H+aRR!3fi_G%ww;7c=UPZOC$Rey^Qi~!_*ZelQ@{s0?NML7$~rHjyn$)B~*VMQLy%Z z2L<@H#alyA@CE_9R^rYy?kWxSRF(`KR$LJ}RAscd5J#%Of2j)%bWYW|eCy3?Zi@sx zWzq`xW2Arz+%@BgZ#^qY>2IwBL6RYxTZGRbbj$Mw3+n6`(=V`}4n#$OJ3*O1w+3aI~4m5|p4P~5qF$r8AB7%EmT zRbpnQ_LHRyXkD>vR|NwKi_qpyy{A%lL;S%{My~RIy{PhNzJvL~tMIMF$Bz?lBJVcp z9MB+>8$Ws2r@fG@Rb(KUS zOO`&14bNrHSuFAZ%$!!nv{pG#L;i&bxA9t8^P&=RvigZ8|Rf&F>H$na7 zU<({Qe0jNd$FV)5mSB@~WbQ$E_XRtF3CDRm*g2IMJS@Bvf3*{k8coMZ3kpP(TdBa< zf43TpOdh{jLUPRiBU&}^Cu%E~570>Tze5mIi&N-lZ8jNaE8f8E4~K{7SC*BB|INfP zq={nHfRAMUHtgc`2Z+vtnl+Br9&Lv{if%{``uaC^PXvM?At}~`3f=d~cQX+#H4$uX zK1!w(pLVk~NQyZdALGHOF1T^il{VO-r$3%#N}mxeReHVKvTwj0b8zLEvPH7k$h~+c z#4-b05fe5o5gsJfEt;sa0=aSSm6w>E^1eYhhu4;q)sq;Q`@DFBa8lWyADvf*L{tjC zWAi=hb8_O5O#vcrj#h`UdfV9pz4vdKDpnRjiRWvT*L*vK#atNNQ;>#+spY~{$fHBU zffdVu`vdS&3~1PDmvwCKEG8aQ z`}45Z9;Rycx40w-+^J8;60DdMwYV=GVr9ClI8HR=-q(IU3*xbJ6i({}h3X|u;6E8mH8!bt z-h<$=8fbm_8Zb)0?ff*Sfe8Iy$9Bw+02VL9<0qF6CPvS5S&EqLnAyQ-W~`-Q*&XwQ z)Ny=5!x(Sw-$JYxs%6{oI?16I=#CXbjfng+MtgIADxVbx#ryID=RhD;t^$aMBj1e_ zNdTvTGRUzKe+aUn*C88!f}RvdggEif`S7qaR<%?DuRrXo@3#rOI90q|*aQNI=*;OM zjddI$*P!Yu>CeUzvrxV=-3xk(EM^?VEq_pBSkH)%^igb#|-Y z);7=j2p4V_;m^(BZh*VY$13Ey(u5#HI6Xw>yvZ;Pw3Xx2l}m25_Cd8_{KVtf3N&bO$b~_hnPoT8#~Mw^8D*OGl(TnF;BSzLezkj_0CHC~@iz zZq1y!*#ZJ#rzcF58ffnCBQIn|tv07V+mumA@BwV7ubhYUP0p|F7K8ju4SEH3m(tGu z1DcthGBr!~MsYBx-E?MN6R}0&;SwdJo`BDX_@~{_N2#L7Sz!06TG~yUF@K2|W!?Z2 zOa84O)GokWpe-EcZl|1};3F}u$&BByw+h!Dd z!xMonxJ=dgFnR1Hw+AZ+nmca}Z^L~?GL}8C;6U|(@ZDY=W3TcJYedqYf~ZJ>4wdvu z@@WYNA%XNt<$IGuMKCI3(?a65pdgQRl?=`_1u0`VL<}u|{+mtZQC~%$>xH~#Eeopxo8IcNWPhG?utQ!T?w;W5^Stg0*J%Kd(C(@?8S~O#te{=6x-WDWFn_0XHaR% z77{@!C|TvPtts6q^tTlw8U!?PS4gGpe&OMxL&Jd<2Y~wnQFq|mg>e@Qdi{V>M2rJ5 z+TWg=iBYTP%92t&AX414*yq49Np%(fbe=r;%(6rT%&J$xW2pZTU3hP-uS@y@K{5~W zuqMH>1u5(b3eQh$%2>;;;{#NdiS}7k>DEcDCm)Vf?fL|l9B|&o#{FRhYX+A!ARn1W zF?Jhb8)krYu4J+Q6L&PAy5i^{t_!uNDZ90ttsl<}hs7h+x)2|UNLiSb5=IEx5FaDs z-EzAd)Ks_>C}MPhMC(5T6T8&oP?HLdO~;qBQUl;G`J4GW zmCx#a%i{DzXE%ibW^LZ$|KLh~5Q*@ITqJxQM&S_8r_xAYbpItT!)Iafg~q={)@u|C zpTpu*mZ7KbEZKpr2q)dA^&vICg60mi&bqVg$pqzUE|9T+lr(*mH=hQ)2; zOCNzKmAoP_w4O0Ij`v~#OebA;E3zjR?b||)sVaX1dXw&J3+e|teYSq8#&x$B;<@}! zPHg;n#c_lQvmjoS&Ij~DacC2dKKUU)V}5ewk2ixZ%NN+Q3VXaFNc)=1#&c_2>0QCw zmFMJNbFyt*4v2n}#!6S$V-&T6IGbVB_r+5zAfk-YLk!=OV6V*&Q2);nfM=qHVx&sIK^jq& zUe^FxT)~urqOH+d`&2+?kf3SkoX@wvDaWh2ShprKDbpYYltDy-Wk2IYw6le3V=-=T zP!+o(e|G4im6TVRXsT8mGRnrm<=$)k}O$-cn`QAU^LfwDq9j5E@T48Yo~1GDt0Vb#uV% zgwboD4%5f=3pi6+YhIcB{4{DWQQPtAui)S${W{bfd4nD-aFoa6EoXvL9!;5@7)d|w zE9sk!@oR9zc+Z7(D$_>rgItxu1OrBOm?SrkgrCJD+3xF>66-u1%xZwjmrVPI9F3gT z@4+h(NUhljmgy|@Z#GZ?wR-v~BLakF1$FNip>>&8A7@${O{;kqvka?P7?h28vRjr+ zFK9%d_M=0?ffhr6`vXKRX}fmIL`PH{)OZ% zg(c-W2jl z#{}Ub8dpxD``Rl>am$Kxi<%=!lQ{A?EAZ z#GiJNpRD&=j(nx{@9b*oN&qqDUd0)Hxdn1(djp{qzsI|5#VrADXo&ZWHJ49PKo8;P zQQeLT*JyArK!aT({~QeHj=IEeSDtGdk5vy-s5nH&UX%3J00g-2qj@LR&T{60Ts<)z zSTkUK3?v*fM%x#jAv?VK`eOi(+vv)vCon2xgpU6DfB=xrqhoWWD{!3NM4Zk0L-72_ z9X^*m3OL|@I`g>v_pqC-Tl|%FbSNEj$cY!v5MZwNPypVWTkt6CG$ous!#TGT>FD~Zw zoT(g0qspmFIG#`NJdV(B9>QU`aI#*fcCXMmH_&KbCE^aL({d(0Lc>%$JspM59$9%l zH2Qy+4~W1{B3@bj76mSPqVs6hO;Yu7L||qEp{r&&FhGlyU~1vu6wbk3Tb?HYZ@$m| zW?&jGs%_Pf;WcSm2$1ig;;@f_1-YNpma=QG{RO_#M0c_STLR`ck<_3B=>3*TgingO zvFP^1F;xc9ZARl=2Y4|yAvhSp8$SGvxSL^KQF;{0$u?L=)tm&JBK@7P3Rl~%#~h+& zavlZXVapXqf>UP?hdXldHuZ){h@+XFPV=MH>Q7Pxo8vZG^t-smSd)=HPzs{OFm&VK zZQi&cAP?FjV+(xzG%*+1A^JScoKv@kxiUfFPZ3*}Y zKJxZ`mf);-BbDb><>iz4|B?OL;#w-q6WWac3+QL?rTPU^uN?At=xqdo|DGGjPN7Q_ zMHJ%S_$ix>=&b>3kl3St!eg@KW^=TH=fwT*quZgj(Xkm6qf6Qmb>)X?6L_dtevgXP z7m}vJvU06*IsEls?Wl`_(7vL@;40cmjmpzykct^eC(3D($|Q)_gI|&>lM2AGyw_0Y z0x#fr&yx71zi7!%bdjHHSIcT)20bd({8?U68%V+lO{6EFP73tWXXE z7;t~?8Lts|b%#@0(joUqD7ZjvlcRGJ$F9T&?mpUOx)=jB6zw{&u<@-I2ao2izCX`A~&|qT{=bW&mjh!lt(9`rmi2o79>uGl7cKh z9AT05%Js+ex1? z>qlKI9y(reLTYNu`nVw3-@houqS_olR^5lr1h4p}Q3@#?DrJm+I9T3%b)#uZ602V# zx{Qmc%s39|FQg!YYE-oeqKm@r-#=EQL7lKqMw2iH{8NO2_URE$nOq#-ZecnZr-N!< zt0P(J4?NXcj7Ob0r)rr71YoTfYm0c*3HRGHQhiy}5<@i(Z*N_?5v3F zLJ|kv_VKMER0!h78G03j0!Q1$alPU*0;p&Cu6il+Jq{7jo*=m07A;Yj2=jf`xpHu* z#QK!-{&gxp&F|k{&&*Iy+X|D@djC8KR_B4m7xYx$_xfy-!?>oWr4rK_=<$!*ZlZTA z)cOS7a05L@ju}$AVeFffEw_*JcPOat+W|NA*l5T(izz<;11QLM*<}DW6}6A(TKf~F z{mw~P5*TA&ubFIIcG`M%3J!CMp6{7yjOhdr<9l~;7ZIzQ6JI#Q zL~QrBjiy2nK2sAsNI=V@q2Kd7u^kE>CX=PyTd@<)h%Ly@ty`<{Ab3k>A?W-JNzpZZ}iYF0erKiRb@CPLJL{K1u zSJ*Eugw<%-3DllyPiD6GmhkJohy=*!U)`n%C|#~|!di5NaflD?PxT|p3#H}|K^B_j zN5k*mf;ljut$533XQM;Iffl=f`vcb1j5fbb`BR5LbLRMZ#)L!s=A1yv6(;qqjGp3< zyTO9*-d+%q4H`{#iid;-*K>nZvnFr08dZ3>a%`+CPbg&WBb2Y%;vWvw*<=m-{9_#1 zGrLLlNYiT1dVO|Rawaf+^#PYqo`1&Bw*kIL?k9ZHJu|7}pV@9(n=;~7(77EgzOE8C zr$DHAv!MEl@#K{CzQePoKrKW-48j(z5DZl?oFH{3_SLI@2)tR^yg`;*uJVGr5(sP-nrTU1}V}oqW90ZJuv)WVUq_3}d_==I}zUw1?!8rxHa~u+* zMy`I4&bi8>cgcQ)=c2hwP?T45z7J*PmA)L*ih$Nfo7*c@BIq&RF#&!qJG0dgWJ_Ql zhzq*QIhu|%4-dGj5QH6BfxKJV21cffX0Q1xmowQZV?UI{sTEnji3|@+4dRFkOu^rO zEBfn} z&IWa@=lM~5dkmFJ8)Do8 zdh)*PXEn=Da^l$qzG?Sc6e$9NxBaDIHZaQPaV!(`t!(QNkW7nFU`aKpYBpE=P6&n4 zF}2wmvUT3*0(dVNq5q`$#b~3`En@4-C6PkjrcSBHqsrr?JRh*>9gE*^Nj&0%=_H?u)Oe1)^d& zpxA~2LCtCMB3`CohQIy2gF z%H1NPhR2|P3CL;!19ODDlAjy>l>lVadb4n!G@`a7YC;@;a`w2!$<|k+L&Jd<_kjBY z+Gz%@9aRu8Zqaj709jrq(S*oRG4-MjmN7C4!xfBIOVa}lbaw?`IDn6~wqF9jHXmd% zCk9k!yg4ixSW_rAwxLsSG(wB|L1TI%km<;-QC7(EfL5Aee?f0WAd4wtj>9Ai;pIN? za@BuTfUY0<5H0h(pBk>ZzKS@I01B}8*(8I>2%yYDC&djUE`n93^|AGzz=vgX&YeO4 z!s!Ex2ke|h8dS&JDoWsJc3}*<*AZ*(mgc0xj6L+cX~GZZD!Tc{Y?!Z^l!;;4DJXvi z?7JcAX%4iLd!YurygsbSvqN!ckA)%tnDkfGfQ&XP+|cUU!(&5~gCzL>tuu^iV+~+y zehM7(@FO&ccsZJPStiBD$SJ2uonnQvgcIASPD>WeU$;D+@x_j;bpNudT zeD$fBjpK_Ev#*YF)$+1Xxo8|*&7snHT3`M$$j0*$IUCkHn~*jIQX5B^BnqAf!g2TS zW#GgDNrQ}bfIy+BnqEYV#PZt|uxkK^1(3uorb7c_p9(O0V(0az9@X)tctON0TA*G~ zIl)0mI&u*xEE}wziuC)Zji?Ljb4kGX-G*u zF>u;WR9n85HUVwv;_y+OFs0`7Pufa@_M}eK52q@MhF$?72T4LX^V`HgpF49c@*((n zDE#K#Fh}W(D4B>l+pDjOW1x)#RD(0tzkTalSfXSCBhK-mZJn>z6N9gaT9sX<^L(*t z3=!*8fx@s9l*BTO7Q;J5c;$I3sTQt@3nR#NO`_8(d(O>Ny#rlD&8M#dB)sjqVNN`BC|05(DlpSKJc+}-N{;p(zU;V2Stp^AA;=Mjt zk{w`{xw+9#7~Azu}bZRB*7O&whXtE z!l$Lp8aiNfQ#uK$q7GLxQXXXZss5bpbDD-0ExpV3+`wGQ>r%1Y4sZsh0B{%2PAjLj zE8J{}w;PmKBVhAv7^hpe?qm4RN5}H>qr`*-V%^JCZrX5P6FW~01OFP3et=IR-C8V9 zWis|2QKj$M+q2|w&YrmW)`T*$+g1hwVNFloLfFp>-56$mfl-3-EoY7()dAEI+q^*c zu%6oBw}+`rGF9!SdyCZfVee`=#S#ALp98!J1<#d-BjxoOha<(z?(4DCVE|aW+g-N} z9`BImxtJ0hr2x9Wh_s`=GbJi#A2RSOTURMc1h2`jMz1B!DdBGrE;AAlrXt6U+1>u( zg7`A#P~70Gx449Ayn~#hTW@{%?MRgG4SdG74|?oeR?~zs@Q#nOSZ&(Ia?Ka zR8_Hc`ypNyAl?B&ZwWtiPmaI-3VZ|+`})6R(66Yr+{rjaU#oa6pD9`fKoGyMgd`bk zDSBLxt#&D*5gKa$m;BDy^R2G_t{hS$V^BXiwf5Tj1w9&+ZiIhTrcBul(+IlV>PJjw zrX`)bPDzZ}G2MQ^(#(50o{KSRXut zbim=`P~s1jpDTtI2>hUNddYw%(w8&-MYdP8BIw_=2L^cb1^3rL{+7)9Fk;LmuP33S zL&Jd=Z-Dy)P`3$+jxmqru^wxz@db^iN@1HypBTywYi-gt}}uD3eO z0gKg}%y9t4?ndq1Y2>4y?dEhF{IMXZwfb%0aTDSYQjB5`%-Tjh<`Ofxd=@Lxl$kM( z3n|uA^wjlCGM=~vm?gv|_9(vQ=Q-(pmGM%yg?Z9x%(OsC@Uf;fYq8&(#T4tCtD)l; zeBmIlEjSgSRBg-MT2MY|#$oMbVZSi4P?-5Np>kL#LD}Y0CIw@Ve1#Td??jLbLb!fd zr^MJ+ujTzqs7ow0Y;D!IN+a;T6D|tJb?9blHU?kiLX|Pc67=U{&8&a5v`Ix@Z%teb1JV zj1{-l?2*MD+B2TwegjWYf!8Gq1E^15M4k|uk|M6GqUToudCSA<5VOmW*?v!P#%E^_ z{=_pU^1{I3Ng3rrPGk#(1}1VuoQECoNjU0n+N4UECI;KO4OJOPu?MhLOfk~ zDMOc&FI5VYk&tgg0QIQs-R2U^9D+3F%+{_}G z_F~STNLqhj7`tMUGBSi24zd<*)nFp47wKDPz$)EBkX>U8|PFQeCEP#Uc9vWUvYN zab7}P^3yxMX%qu801P)5fTfT(`o{BRT$)Cj!^0Kn^{Yr1{YV--Oy@UoI0sn|fNel` z;vUWj$5N7)nuRcNYn3RhWl4_!%~TZkavh=_Fq5lboRVDJd-oliFA7a|FLx#!GtdZu z>UcRPz^4%*C}m|OWVBrP1OT6m&d#7)oL4M6y^I6 z>MuPd`WFZR$Wq{0KqE}C9&HmCqFMHT^|DV%XUvvfy~gcKCXQ!X5XQYGRP*g=Ra{*D zUj(INMU1(0PejDb^kJH~WjetD_%c35 zE&Qwl`xkbAN3pMpw2IVFrxZzVaNxZs^b0-~cyns$@UxOfsfWnAt5<2r;mwl1azsN4 zKqWFf++Y9fM+sC|kqc>daLrslpNJgRY8JP-I(m)#9cv`UlJ_VRcy*>M2aYs0;{;2LyC$zaRer}Pm zVz1O=LWZ&qWhdTcqyHz7$q130gl zTjKd1MD%Qbyu@eAiO(#Pwo^tBgVCNO6l7TgA#*=7+)m;MLdlS^$`-m^R!!KGKPZNO z_-g%$zQmNHL&Jd==YabIPp?VMD)~lnOR$iXXu9nhU;|IuM`_amXbK|I#aJ|*U3~we zjCR>OQ)<7q4o-r4osmpvo;=4t`}9{VA8Q>(g?z%yBk58q`9E9@nWIGJH+KrY;(|lv z8@-rHTNJITo}nb@W2AAO5z}@ncO?F6z<7B++f4OH_^6v@;vLWGDk9gt1=@s}6qqsA zGO+&MSUKE%JZLy2Ccs?*v?j`w+IY)3|?!I}jj^;nGO#<+NlMUW4^poffrQ-#gTDg7m#= zzrS;jI0n}{i4HU(c+9iDM%QE#Jo98HKiGp zfWkj>lair`1ctX=3Cu6Bb%D)hBB6Sk3JOK}9ayu3$s+X0En*6}2;F5zo?ZQ!1CjmO zhfou6Yd){Og`UBjX&VSeSbYX}dv2j!&yu?Pq+7<2H~m|O*9LE4K10ZesqZmqe$JD) zIk%3bL`D^yMjnez^cFfn0|CP_Uf$K;ersRO7jRMr1%HUW+9JlA(N$U?6aMyljMg^p z_2AMZ-Gi1O+O7rAvTfV8ZQHiHY}>ZkW!tuG+qPfd^FCsZY$REJv4hff0%O1d7b$M* zid&4p7?A$Vs{%`DZ7xcWN`H{s^A_q{3vAC}fW>H1L|;bgr%3)Sv6?$E;y6%gi?zae z0UE?z_fSPg5ll|E^@9=KCM1gG)}4;(!G&rdI7#xXvD2TiKs%|=<8qWR_#|mjXdgIs zruU7r2CxN5juU!k=lV$8wmRIcZxA=hxY^}=lS>uVh}vpK>l&K)HaZEV*BpS8AyqJs zdN&p&MHWkWUDTUf%W6PJ7}5Wp_IBrCyd;y z>-bvtQHM0ZJp}SC5SbX1Lt|oH!VpxW`7~|D)~Vp9MAOn!?)X?@ad)>^DJWS8J!IW^ z{M11v7c8V`fW=tzXX3c9LBFaGAo4)@zKUIu7szOwrIE113wOE}mM|s7Z-OVWVl!lc zF8Q8=z=wY=wAE+@6P62avx4kMC;MM5#vuORiMq~cR55lhC1m+A@xrK; zJZXM$&rdQAsEl?(k)R!Dvz)H6TN?t!QR)J1WnR&g(*Jf5$`Zx~;r`j6yQwa>}u7?vpz_{pa(_8tusIVRYNFhX2*-;L# z-oe#qkLWZ(K`_TX zT`mrCGr{7}MaYTn#3yttw(jP5V0Xb5ejJ2A0}-|w90)@TX#yhbJhz7nC5SeOKyVE@ zV%VfurISiSy~_rEH=IBW}hET_}Vv8oLMy8Wb)guHJkhgXBS8)wx^ z*-gK^hq22JcAWA(yWOR{5BMla4v$%U=hod{q1!XTW2zZ~DH_Wod|*1S0}w}Wl)a^~ zyT4KZ5p?r`x=?%U4LM5<+rqe1+q@Os%1EtPG3V>wp9wBAlNv5hF%BSVD$fBE`Z@`; zjPkZ*hAC7WiTdt+&Iz9n*$BW|0YtWLNB&Miz1f_3*_(hmidj+3SAnXvRc0CtacfWS zNVVTh+vk6Rm8SL_rxQbu+lHskvq@c+Vh+ZRpld+2dPh6EwR1$ zVpY^q)^^IKP8=V}tZHPvIXJlCrBx~`3%bZn)P{0nrpHQbTJ#(+VYQ?f9hASX2w8B&fVmXN(q@_`c7a>+AaPX z1rfusuZ|FDD5jAzPRA-9r=G#o-GK}eLg39c^DcV|)_G_diQc2H>NfQ;^8O{nJFc@7 zwc4}E&l5~P+mL!oD≤=Jet16zCV)qUC9H+yfMHVU`X{2eTqxE|kzMuVGl%3{~7K=*iB@mgWPD6=hIIAju%U86ixd_eo z&o3N(F}PQcTb=$~bnNlMgUp8lVhkcawxJ<2j~5wp{hS}%b8O!oD^lyjGmT1<6P=k{qjfnJ-VEst11Buh3b+#fTN~GDogjKJ*~d+*iJ>%&echQ4NMz{| z0q6Q-u&vY~uzMfxi+QcB2DMjoFx z5=W*K8bEn+1nx0oiz12i(8x*j=t=e`HF1|Rz1Hh6A(%lm15V2ddj zGUcc#R>xGgP}&@4nYaQ$x9bwja&k#8Fe|A|ApEVR0nEaA4<>gPUI|>^I5ThHOVeH8 zz?k-4$sEz?{*(ToKam3d2!#@_V54z9($JD?0Ale9W6OX94ng;}4+QFMe@Umr;1?7p zIQJP6`pK7B$xioU9bQ2^1Zm533c#!nRjY*$70F`j&-TBEG-MHu=*!h||Mi2)I0=O<|D%6DiY4OKj zI&`=FDR3=ARjBa^RdAM&+s>ReN4ZM-VqNAx#aaIpD=F4;n!;DKsZXQ$j=GP1MMU)e z(w_RoKDWF$M?Cd}47I7L1uI|N<^)FVj(a6f6%OjLW_D*+Dk=QxmI*2(~pOS>zH- za}Lyw0!3$8^Of!lUy~I{ENCU}BX(WJovtO74Guz;!RsGojn;g{inD%iI7d5E#v}>G zFSF{HfhmsErNKeN5a=>WVePMd{L^h*k(jOcC9cM!P!gPR8-4&5Uu&y5BFetQGDH%s4AX>hD3r1EfagKEiRUDgObc^5lp1%#Lf(_1MbS1Mc?A(-o zU>?j>qg<4qo&^3%J=k(vwOR3We(Wlg z*Of=wDi$x_&Ut&-S4;~3!Td~NiIw0^F0$m@=UrnA*)Tm z?@V5HLq@SPEX2Jc?KT$db=dU5igN_;6+fWQTf=|92xf+h{_4={gn6zk+jfNs@LIoW zFrQ4fARQdpDrDvjx=+GWqsj=|HVYX>3rT%B4boQscBM*~+} zNLOS3Tw3X>_}oU;3jMGIpaXg3=agD}!GdF}Q?J*2YjH*;+ZEpKF4#TdY6Su*VG%7^&+NzD;V%&=` zH8(ulKSh{R&1DYdaHse^_&0#zZ2H+QvmC%gvV#W&jJ+sw4PQCjtF3>9Q^U-}kq;JK z%vC~g>Y83s33qO+5zWRHTa*BiLz>{~Qr9{Ab&+S6_N4#LO9&;TF@Atvdc@#Hecc5i zAYUUn%N5E_>=}GaOTRx?CK_ItMa8pV4AI~J_!SNQ)cC;{JyXX+R8q(yI;rzb5R4T1 zwBi4!c=dm^*nc_dcm|m_>ZI?~7v4-Br=Y zgs3PtA`GZ50#>KK(6Je;z0JxlJu$4k(0SUbKY=AVJWnn~!$C67_9vXk8}ziU>P+|a zmZmrK@mrbC<#>2@`45w#|D4)6rj5(HJy_Pr>xrys5*Xy#pt3KNsfqYyusdH~XmTmz z8pL$r5DuBG5Sw3Z$OB7bTt|Ac{<>YTy3UY%jAPXok^m6vhhif{mNq((&@VH>Zon;o zIJ7xT2A{|NZa%oI!nDpc(9!v~B5I**T6Ua_E$~e7!bpgir<3*Sl?tTAGY<_;s7c$N z=gR)NiplY@4S;YjhV9p3%!+c`=gwf>Q)j1$VQ-B$K(x_zzqlTDj^Ps(F)I8%ZxF!Z zYP0x-gSZKP!Cl9&hT`KN*X>T{hs}BJq$rm7fbU2Y?eTbu5D&+Ol(Q}4dOeF~*@h&= zaXZ)Mn)Pj>KC6mWD|yWpy#n0w*kCl)xjJM$Cj9v>IP2C1S8D~PoNm72i9W$fwE)V1 zqD{21$}bE>3Dy!?2x3jOszC5y%OAzTuKvn;>>WP{MvaN9Xmw>|1#_3OY<)ASzw;cG zSPp!l76gI>AU%>4`EUOs>;j@C5-c!jZS={ZJDRZ>}?L2?KLJNfdIIiQxKF;6BmPz)(jcj zV`UXB*Ky&u7*Wcs)E^%9_27@*`z^SihO3W$nbRraQLV2XroZdU_8ZlKlKI@|DMho! zW#nwUs5oyn#!0NHe>7Melq}3AotiTx$IdY#a?t+6svE?`nYxhw434+rf)Hrb+3EI+ z3ALPjm?)x|Xo0~O!RFOLxac@^rJZi`ngI-3@#lYvF_Hf%7Qhhp)=R9W3zXp5-bqOe zgNR_uU0_Vr%Otl$VfB9C{)=1)&C%MInW!v-S2zqQj^|cT10@o45CF^0cv@C!e?Iq) zGg5}wK|^Z2@q6yCwCTjp858wb@z&usds;I0-ovkYOiWSMZ98bL$g6WJ!eg@nORb>@ z^GmaM&&acexK>qpT%Zy;&w16FO!nMN1pSWi1dQeZ&_i{P>QvFZYK7tQy}q*8cZ`4~ zPR8J|Cy21;PO=O!KZ^uCCqm zc&?}+aMG}}$<>4}sNgW~#(pNXi~z^$5qcBP1=}bc9J*`h2{WsrYp`-b3Y4rMknD-! z&3|eFk7B(((Q606NNhe+dzHa*xn=J>E$mCnq8KWbDnSem|p;o zk`|e1L@LcCO&&Wdg8r1!UUk7Z7WdT*iyM!aUkBZZ4Sbg zJ8pyL==TNJzp!D!#gIcbW%hm?xiF1$dtVe~U7JEN`>$|O)}qlP1%d0Opsn+=OU2wwmU$%C;4!E zmUjF=%^TOy!I|&K0ar}JRf^>E7ZbxDW6eXz*8I2nXh4JFk(58fv^B2J#=j`h5SxYP ztPC61(~qU0xj(6yBb+)S@?%n|7emlI0i`xV>ZgoxU65_ug)76+^@J&EUd+2vY1+~c zuhjEV!k8IL-N;r}%*WrE30yN#=pePp?vn>dO)JJowN@g^#4zYC(4yYSEy9lUdE!06 zoG^TYFz4L~)&H*DH*(%9GhnIGb5;+s*LnV1h$BR*n6O>dygG|nMO@O4uwa4y3J|6& zK>?H@GH?GWmj9<%A#Z95<*5ymud$2}Sk#AUb+o>IJ$FR8F}w^l|JU?RLP%0;*Vk!T z-oQX4CE0(=vetbG{P?VtN@6}?Mk{k z2{E%t{8*aP!Jm)ZruX>IH%cafUy=+qmUa&4Oa8EvH^Hzx@en2U>3B|Gf1^ZR-W>Ub z1cY*u4kc2`~Eo0&~--y({vp6a$=W zw42C_TQ2+7GKbNObpnDgx|fTz+4{y%!P&0nGtu^HRB>z)69}Z;?`ba}{cIr0eYw1-{lQZAHc51;-x+~Zza7-&E^akL+S2p_80uN7@DtY=B^z_3UsG4?0 zcJx4hHlN(w#l zS*^X`#}44#&d#nUh7}MLwt-VXbTpXB&6we!yeJMkV?#qvOrj4c%f0*HSiqWPxe<21Gls3=1ogv*m5@a&dI0n1?4M~>FsPJxLHSM^$= z4M9HVsj2pJUf0yh(@|9Q3oPRNFtwVGhv#iC!{4@TV5^3b--DOWhhLO~Jcb&ySTfir zepSFfLaor}lxy%fNGWCRpqkWw-eo!YfWRKvsEcE!GtW@9u-O9~lz)^sUwEU{X+?ie zUE(F((&y=6b)8Zc!Y%UPK=2W`PE_kT-91(~cXj(M(huL^Z{Ao6+Z573fQu)TxKOYg ztX=xVG%#uB zla^}z;iR$^ccxqlJK25*FgeGmSw(|!_4CtCz9rZ0@-6>U9Q9AJvmK)aImEr(MIkNW z`^_s!6$lwdsa^i*CZpt6F4MO?{uzu9IbkeG2Uf>$0sc#bkpB(8C60P#(rg5sfl(|? zu&sdqq!HUBBSxOE+GA=z6rI=I>u%L;ov^S(q)VU&8(g_FEgTIfg| zrw-yYKJ|bSM!CRU+Yw*VQ?s%b32yv=z`9%c;=`a@G`zjlasfiMZ-7oQ}NN{0XKLwbwsch+N1W_4JWq%{h^<(ZkK?4l#xRD*kmz}$`fE8-tH^_^J z9EW)}aLjD%)b69`Gr1rOZ}L`X0C+I5*mBJixAW<;xF}}D5P$+i?*TI`Phgc+GbQ`f zN)k*F=^Fyx1Y%H{Q2_3rc1rA9t&v!;Z?aJw38Cs~8d?)wvtK~X~eWXhCw~BjT>c)CxtKTS@u+n4?#L%@NA! z@HC)@=9Ghw{A?4IJ?F|{?viDQp+a+4ek5nl!9tOHs(41V;Cs6F&89*D(~R6D6UZHm z%6;Rw90Q<12QjG3Y+Dr0x6Q!;FEx)?dfF0!lP?&F@FHrm$o~ z4CEH%`khMq+#pk11b0btwZC2`O(a5*>V*}gT(*#pUMZo<93{e;YL}n0Y1o=+z-zy1 z9fkh*2;c(ooGpXAISUT!r18K-7oq$*b&;t{C}rRNPGC$YeKK zWj71*o_rY&E!mOIyi9s!3a}PF9fjq(kzXj*Nu)2j_)Ea(mAUi4A>r@g=i}@Yx!*OR zYtg;rgnr)7#%*hxm!UrS`6i;;@ve`sCHij6?*qygiFd@@f=|sN*R*7xZp5ennW1wW zSYV`|z5(QUqHZ!rK+&Aq)l17Zri>R|Q+arLX%mT{#7iM<*6zeT4Ejoo5o?zf3EcOX zOh;Voo;NfBgkzd(6OEJ=HlcNoMtjf=ZjlZ2VAD=qV_~LNKtq#}wL4@7yx>sKlgM%# z^5x7|Ab9{9$Q&h)K+#G(xGB;`I=2U0@xXyx+_F6~f5#e;?nGeWDrW7#C77lb9A?NVTQoUJFX zV!}->zALW5btGXMnq=mD`P6i7#>kD6tbK8qub48603Ulfd-Phwa#AcB_Ode><2?0n;uXOLz2FHLO3!|I%@DlYD=w46uJ=0nIZq zzJ~s2UQDm6S6S*TE7rYj`TkqhmiLa#ob*UvjOc$&Y_YmKO;2pCSAsX(ppb4FN z6~2(=dPmvA>B>p@D8nMnjlHt#PFHOOD#m%!?tdrs(i#H!VA}L5<5a}_jy5c>wYeNx zIzR1hP`*v{gbDgCVNqwI%4y zY2UXeQ5t^sUYY`l#WvwzhOa*U-tU0-;Y!*2OmM)RY{ItCy*4Q5Q;*$z z6Xr9Q9ygub@JRz z_pO*Smi3;gU9GLgTZ0~=L?$W)x_cm9CfZN`a1A`v1N{N1($$N?@w5`i6@OT{QhNQq zwM@`3@p>nq05qj zt^SG*PMlcshn@A{+9K5A?q~B&rpZoQ$E6Vk`LBV9zGKSvwsdjNEwZI$e2=fK`NEy` z^;s6DP|9g*{MPA1!!&HjL@)txHKRgTTx|8IwHr?uv%3J%1{$%83r}t!+`MX;PHo)J zXYm$ZuELgS^kT?-J|>aM=Q`@>2`c!xrkM)^&HY*&Lp2Ze#s3sz0sm7BNV&ZtZSf90 zC&tC+#HF+`CyvsLXYxo+N^8Hj-qhy>R(iNeFm-F^WX5|%;&BBp7{E5VSi1V4)l{UN zxI;ixnH*n=WmAUlY%;syMi`P}7JjT3P!={)NeY&?(fASHg{5{>4MQtdhX~>LS}%%1 zp&E6r>z=w}hr8yAk2&8c2{RkpSyx3406x|6ww55t_Ci`x_UpV)6Z$jJoNw`RofzE3 zz0ZDmx>tB!bze;$TiLse7!9^G zwZFCqmos2Tj=qpReeqaN1?g1XuREB%yMkX(Z&uEOGJ&i~BRNXIeo7}$Y z%I9_VPnFrfsO)<}uij^0I1QLgJoKqz_k|b)?j@OjvWv6X}wUo_S#F335X#pewu=E2jsAsg^A5Qjowx%=*V8<)u zHZ1==P6pmaT{`C@7c|uom#cQ)8JV_b?id4{oGk?&M8CpMGhrcnhxfNG$vI2B7qNfR zW+a?!MtaeSl&w>v3XJ&nai#l&4wD!qI<;T7El6V5X}`MS7?lB+0d$qAv#UNH#)JD& z^8N%$cVGP*9%kn|gyMwsLQ(a|)#N&whZ}iq--DJyNuCg0;mOf6-#qCd`8|H=m)o*YF~lB1oTDh{w9yk`uGd zYDp-h;tJcV@TdjSiUJ{z&<$`a6Vmg^t;Wzu9705TK28$xycM{SzxKyDQ#TnYJP-0# z5!>RDoIA2_oltv`-G7St|0!O67;qafg*wmNU(CzpBw|qq>IH0eJr)r@Gdn|H>gZF@ z6Ft@0T?ZPykhE5UD<~a>T~NH;3Aa8jqe+IA9qs89%@iD=j`kiIks0(c!wF6WVz(6i z+*eJ7;PZCw8M(Aj`@)g&W}x5Jlkw zZp6;3LGfF9f;M(sQ$!Oi*;3@9!{n~(BoL5M#PLV81L$ZS!uA(WA3d`YGLper8H0S0jTD^&nT(VBMm@x-Ezm`O-kkBA2B*ltIs zbgW9QCrr}tk&UX2`_i6bJg;wEo(}+1qh%LHvHSm=FmV+2KG`E&aC<(=`~Uu!IB>;qPZ!s8_jq*uDUb1`(|;Y901RmPKSiH25g zjTNF5Wi_o5c6qII`8mAA9r zm6@=GB8Ys&W*TEBv0k5pK`;+*)wTE%wQs>BOopsq{gi*Mf9Vb%i`~%=K*Is^q=?r2 z!tkV%-E}U2f5v$1w9e2F=LfKk2%nZewhms14cMv^E~xjTEq!t;!5^rvwcr?6BDg z1vIz)$KrLy5kN6EdOH8MX6f~MIZdvK zhf`vI|5&9X%Ayo!$KKLO<|GFdU~$tmrimo@e4r~2{ynJL60 zYahWT{?ICAF^gG;Y51}UrqTGfcxRo&42rc2R6*7QZ21RZ<-BueDrx8)ict! z(sE2x4r!To^k|R|F$8~hPjP%-Rp`3xRbl&?cb}!G?}gB#-kPc2e~P{SDPF_C*c9|z z(x)BC?y|saPRPBQ{Tj|#uptmmx6anm&mH>)2Fb5udnPoArbrJW@GGPCipOV}7)bCh zHE_5s9^U91j!{k@Ny5h{jwF%1OCO8QU*d?q-^|ST=dwtfZuui9e&O!(icO$gl)URV zKqiChiqpdD>E`E9;FiSepgyt}1fV;rE?=Y!Is(8ye6Uk$q2-sQ{Qz0umfn;r=>@-o z-HB!Ov>dD-za-$UyFmEggvzfg>|6{wpcR9Mu_YDL@qn2-hv8|CvXD!Ii=^4M7IGl$ zJQm*iD)Esz&Xs2(9$L)(X$oHvt^EEG6^27sC`y@+baC7@H`rCV*+p7v4i#UPA$c)2 zJp-SB8grPne`~(~5L}ckj_AG+oJRFCp)5`W95Sgfkf07t3Hj*4S)iLiMvov@%hXPjIXr${EhGv2-MhgTvQ0-HL-u@7|s1v;1rPQm; z@W4HJo^EUfm`CuIm0fTaC+z{}G{7uV`2vN9s|e`=e-VXWNXvDmk7pd@5b*&HuF3g{ zSIM!rl;jbezP&wKflw}gjPq0CCuldWvpazzxX&GyQ#G4n6vw*U0-%aR7mglzkR)s_ z7YWd_;BGoB8zyLnX5k+z89f@DxzLtJLjNGn!#NOXj*Uyu?beC7+Kyin=@b3?-Q|+n zV%v}U;SxsYl;&t692ZY{)RT=TT+}q%B!~N|08(q$`C8sya<3$evurK967VQpH)*?_ zP!^T~pf=Ay+PwjD%M%BGrUj182m=m9}xVb4b7yNtnW>$ zBEfub={4my5-FmO#jmfGI4$x*^cbe4%6}WL3DxT9lEya%E&9az)_SH{U zI?+J@=g;9^QEvqgsH# z^!j~tyQ$>@bi&8e#!k^O*W^oJMOQn(9jgpu3VBCbE(;)&PtbMIVzjA7TqWmqwn#%z zOHue13C}ksWKRTqy;Xuc-6o7nDej`-iy;_FO8&4)Ni04kcqA1Qr7^x9;Sv<-oHle= zLe-{#ZcbEL;3lHP|7I&oJA$&Y?o;`gSc0*QG^FimuU{t%UUztY zE{}lxm45XD((4ju*)C^KG>gZo`l=0vja&3vPSk8(#?B?dH~Xv%KYp-Q7l8y$7hl`a zo-KGYV2(jvG|pk?_}b^~MP4m*SY%=J`(i+ECvdN*gz+?57!p0Ew=jqXc4N1*eg^ZDWF5ci5Y8p7253Wr?Bow``>HO3Dq7v<( zIh(!hVA6?L3E{Hy~Gbf-}puL~mN5>g0^JC->TWlG|&A3w~*@X8)>!Ci7CVs^7%B zu6M5v?=wXwz_9GSOpK(~P{5v9!lOE9W$P1I%OF7tURpV1pc+UdVY!gv^{>$Bh9#y? z@G`t(97Hc40j>ml)7b={;X;+$b>v7LwNKYWlh|}NFTl+31Q7S`6ylxUjl+J2mas)@ z1;fTRk`JmJh+@ur@@3z5D-C;E+2^7KuvA_Ap)ag2KXQ_IuDo=WCDmA!2tunz(u?h2 zgfi_pUBirSRKdAoHsD^Ef0b~6r=W^hP?ewljt4!+>vCNgWOY$jh!8);7Mdv|-cI6$ z4%8?3pD1kxE?s@x%l636XbjQA`qX<;%JSZcNm{jC?vvNQZM4A7sFOs9LhQYq zrI=Usx*WQUlllzq()s2wgG+(hbhs~S?{kq>QE+(?WP&;y3_KAXLH{M6Vm5q2V&}%nzaY{HNLU(LL{kpa_gBM_SYs)?-MEAQF8<&cx01B#y#TYrj z6WV~YTQhT^*uV8_o~g6fxem~Qy63AITBYAC`A(Sym}B2w-Sq9z()WcLU?&dGP`j-Q z8Zu(mTy#XAF41v|TB2;6)A#^f+P?b-QWkhqy0+bbEO5>rzOkO(*caj8W%o@S{eY!h zmlok91;}%jP!}w$3vk^;9rmsw)5`G$vI@Y7Y*-a7v;r-L;hGeo=amqVi#WvjdJcdo z44<*f++ECUM-P(JjP)whgf%qnWQoKN1ah`V& zd9xv~8 zH!s+kgz&_txT{o(kDELQ%8=ev5R6Z7d7`P4z0bLQXsW%^NX_K&t?;J~6Z|19ej$ zg@wg_$j7&er^-}L`f@18wT<}rAviAVY$~R)D`BeR7JWiCHWgB(@U#7x3U`>u6})WZ z#-My#FCeV#oD?&y2Z40Vq&ObPq;an>Q2;Wy(q!X&Z7c^XQ-;A0bG?-)*B9zn>-wd-oM(k0N=Vw{k+9q_>KTYDkS%9c#e3HAeS<4Z!y9IOA#Y+4 zmh~rC!qK!k*DQ(ET#Q9J|S^7 zSIg*%2kONi2Xf6t$(ipz#!upKJ!0RZ1Uc!Xcqa-vNSq63Fi5*?CM!YA;%Gu}TNwV_ zNp{-re-en*2~v8VqgDOK@|oeV8Ub95^N0oyc1?MO#GB8}`7Xk(L~by6Jpj}mLC_%G zyd<>quAULnjGP?XEAzX?LvDE54eyeA)8iS&tV!ujfa`0rxDgl#Y!wWX%mF}Nc{M)4tAD_wujziIX zQ&E^t#TPyM_OAeV9xtgVdCVLlhrd@!m_@8;p6Vxs{wqXT0UpZtii7>RbE9sxaMJ* z_BCi&`o^JoJ5{%w%hbsHF@+m$e@ibwW$a_D!Vbs%>4o!4)(2fPuO;$c%$j7d*vR&W zB6j>VSflv<^qe}!POHe%#I}Y!TqfHTBs*oqOHLJI1<^;Nu&ZZgtjR(sTvqZ58{1TB ze7w!W|Alxxli5Nsj{jS`z2d5C)D_}T3Q)UzHJjdNKM6iV-MN<1&;cW^mn81)R&eL5 za>DwWP0g_T^nBeImY{00kBH=0(2+{B$DB!^{+p2Xux6VXnMfhnIiYP|~L_F*KzW3FN9d)^_@ks|Zi8{}fyQ zQw*w%8KP4LB?Raj#hrvfi`InsNOOy&8O6(`!bEf{8JwevsX;%9!)#u(0ODq=BTHCsN7Eu{$(NQ5BrveEB=Bi4R9(EWJLoNCf9>HhuBInVr2s{e zn9)I;bL;_)JSFMVIDhl==?J^=bjk3-F&~@WX}_C_oq*>RmPzDqRtFgTRU}o#spGT* zYF;V8d>ciV9Jc+_;7F}v;qXpP^Sb#_=V5{_5VBJL)sWth?;S(tg_eFZAcOXnqcU z7D8XU?N`nSf76mG)X6UNi22;Q89}KQ5`PqW$O7;MV+!q4mOJFH=&hhW(BxzTAg8Mo?~A(LVxFmn{muJtai zDq9Tj<8qVqoD_M}8R1RP2rpExKvmmr9yBmWg!`&*idZdUB8S`*HHWj1?xQqCjvN+B zbZ=4JZ95xAc~PkVdN%AMcsh}lixPdd_kFd_5YGk~cY&bZ*A)t!(zQUTwuwwhJ)kt< zur)&n6%PF>L-?jB$8293q@aFT0|I8=%m_BD#S!m!1b!_2W* z#ymSa?2x8yMha+)n5Qx3Ek9DIQc*PJp$482Lb8hbutvWHts0Z{KT3 zNKb@Te;Da7(SIC1d>*LeDlu{-vxkIA=ak}hU1_^P*o=8Ls#VXLtZZh*0pfC%tLjlL z+UhOxJ+()7T)O7$G#cRdom%6q-D0%Wv|buGQxPX<6vz~PMur6{4+)r$f*fl!iDRx5 zP2AC+{ypwP`I|OdA&l(EQoqDS4uliTz2lfz1brvrWv4-8fc?YGLM`wof9+p-BiG*2 zqo=bmy)dt3={GV_!RYTobW^UquGO%==9{lYKtgfrn7-%(Wyg9EPCZ$ z#4)zpVI#Nod{}~5Y1q-S2hyvy=>1XOxndfZS;Q%Y*gn!qLVE*5jRrb&cUmfAlHo`h zXzpG)+Yp6^1a^fR`9(?#yXhp)=f4kL>OG zf%{uXR`X^sOrIj+VAs-(9qqAR5iBoh8e55ZjA!N$VV++_7kPCZW-Io}N-M@fBWCbB z4kFoQ^8`{rq zTqQ!d(<%cvFuAM74~Vue{hPPHYcE@6*D8%SnE`1bZEg2Zu&vpqmQ1x-I`pAnqcVSO ztw7U8Mi=S}I@_7NQ1uGjOQcgI2n^K@rhU%bnp)Slpmeft@N2yEyRGy%I!#;Hwm*+( zlQ!fVBZ!mR?kc%igh1vI0RrUGQghuIe%1Wfy<+NDj1>ra9yY+?uPoaw7jkDjk~ zNgZ^)g>JTnDhdfUUfoK~cDd^Z%wx3v?mM{`l#TDXoH7S~o8x#6rM0$BvOSrKvLpR} zEk69G*qM2FxbfvYG8~|tee^j=b0mXE{dV86#kQ$IBIkuAqvV_%NNj*UdF5y&&|pFO z>Y1@C(XtXF()c^Tt^LQ>|4aBoJn9!(>>F4>6OwZj-kX5(swokYG89;!fRWiUQ@rR2 zAq^n4xszo#)43f-fewWoj7hms_1GrkITVy%4|t#@tSC8|X0`*u8N)C|_uc1e@It{L6p>Kcl;VW zjo%nCwH2hR;+I13IU7iwVfk5$ZsMm`y%uTL9Uh!?U?J8(ltx*?#5rlEl#&W=;FEBcGVAb4OVC&MXS@v1|9*14 z%k9r}|0H3T-+q}5Z3Q4&)dCj`9eDB)b4pD~tvr)JQi9mDlq#zCRrRQhX)f)t#eiM- z)_{U+k~v(8I<4+Z?h6&88O|)|QMyB?hxg&N)?$z4#WeE8&>*p7`;HxN<1@`+Rt;{& zs2Xch->kXe8EtEg{t3ITl1d0s@R2ktHgj16e!ORim8EuTcHkTMAX7wF=oBhI(QW{c z?daY%oYfMs_aRPtBfbL*n8ZZ)zcEe^heV5umLA#s-vc3vCM?&vCr7@JUf@l&u&fNGgD)P5=d{OQX=-d-d{V5 zG4Ht2xM+xT3HRK19&Wa#vQ8sE$9*IWL`VcWov-g3hyIC^^5^U;?km6cFf!;ZYQ!i6 z1kw>pu2KZb;NP_NUq^X~r;JTb zbTU=NC5im-Y#sd_&v%c>RHUuz$_N&io>du8I;f7zN0=ed zR`b&%1aRRI;{{*MG&hU8B&EuAdcCP_ybDdRe+o0CfrS`^1!9*}tNQnQY*o8dfEDwg z)2EZ@#uLi?GNRM5n{D=8(YyfQLy2Lcs`d~fap~Y=Js59Z+t{c?m8yJ=@*d4H0o$po zWE1d9*wIAUQL$j1nrzAM2yh|D;6c$P zS0)Fu)uB`!ijp6mcB7y~0COis7nDUMd{tXMi?6^>7?wNWbxHj_C=0o=itlQOf8bpz zx7uX?Odw`40H>+FtcKm{qKMBbr6^@ay8=os|5J3zX{ zQrb1gtnN}Aogr)qo$8JvoGZQHtx+hUkPJvmcDQJJE>8i^vNPU&{24UqfnQ-Z99Mgp zG%*HIEIf`STiYhFGAvat$w3M=iKrDk-|vaPph7uKjDJDJ4MKB=-hkF7eEx-~jfe`U zj>4vMTc#E>us4yv($eq4qkmP%B^7;vbYcGzu$&QCE+Oar@ZK~r;8lYfDwFY&ffmXS z^8CQ*pCvw*Zhw2PP#F(p`WKOR2u1nHkIZzXk9u78@&6{T^<^TP;p@VlSp4^j_$C(^ zWt_ocHK8)eDJ=66Ej`0?o8i`{v)Clqc9_zhY8bw8?is?5s6~DpWh5ccFz%S7d`YV1 zR%XWT%cc|5pv;$&*#4Jgf(bJxVZ$*nELr)NF^dYRZDMJMK;heKRYJ25XjxrQxaJ$U zq;!vd(A1m)K>^FG>E@r=smxG0z-w6$_FIL1YrW}c2tBigo8XcdcydSuKPtvRAah9e zkw89|AI5Z$g4mBf2N8As#U!bE?WU&Tt%ItitsM$--Av}`51ps!avW}Ho;^92gA-Ww z52-Ssk8@TTC7k3B?9{ZB0dhbBt1Fl5@h*U0=0P*@sh{c=0M`3r)d9mqC)WN=p>SxlBq81qvy~BMF84wt7aF0 zH*p35+SQ>-?>mmPV+?Htu#>nEheUCvDwQ4cI@8s}Qz$rrGzI1I(G`pJsOEhV<%6<;%&Gx2-0ZeT05Tzqlq;$qttI&lwA`A!T_z zm5g+B|5Er_h!e^{7Ju47Sm)@$UYEOrZ7{>4 zCrVYjP)QRrz`08w3Y~2yv737=*54&G|NYv6|Gx)BLR7|DR7*U2Q5%0R#;SlKyQJm< zNB2!lZ`5SR2YSBYl@Bofd@m=713oxyk*4E>kg|f}_Fy}aY7T%2*VP=Ljgt)X=Xa+= zgviFSpOIhsw%&}WRDp@elemnao;Gtm!`(Ez>uyo@X~B;Y$ocSB(nu?x0~gS=mdxrc zmQ}J47oIR8l|X#&ddSAFkF!R%QF|OO5p{`qw;HUW{4zcpy_{WJL6^wX6Ycmx5Y0F{ zi6tKrSZ_L>=3aR6v`FOhTs7s!nLrRRJ1bW|HUTvv>79{;R+F`P-ojwz056geZBeON z@&jud*_2Fv*fB|c#`2_7E(q8mHRLd%lq9UTqu&0Dft>_3Lh@m{G9_#fTzrfasH5eh zJ*C(OImIO!o39k7f3LDRxJ0;s$1Os|YOqi6YM`_0x7Kdtua4@sOpt%=t07g}qcQn! zvI2R&2wfz6)pAMK@QGMt936u#6b$f1n3RbAc~DT7ojqCS5t_-VxD7SrCB)aKHfGy3 z+dV^XSD4(BK#+RH*iP>7OtGXz_Jo}-9(3NhNgr673)cPt0rF z$xCb7PP14bp)^n+S5ACxAwTd#PR7Gu{1^Hq+%vjuN0Z6!9wF;XE)y zCib&hzt 0: logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===") logger.info(f"Retry Count: {enhanced_context.retry_count}") - logger.info(f"Previous Improvements: {enhanced_context.improvements}") - logger.info(f"Previous Review Result: {enhanced_context.previous_review_result}") - logger.info(f"Failure Patterns: {enhanced_context.failure_patterns}") - logger.info(f"Failed Actions: {enhanced_context.failed_actions}") - logger.info(f"Successful Actions: {enhanced_context.successful_actions}") + logger.debug(f"Previous Improvements: {enhanced_context.improvements}") + logger.debug(f"Previous Review Result: {enhanced_context.previous_review_result}") + logger.debug(f"Failure Patterns: {enhanced_context.failure_patterns}") + logger.debug(f"Failed Actions: {enhanced_context.failed_actions}") + logger.debug(f"Successful Actions: {enhanced_context.successful_actions}") logger.info("=== END RETRY CONTEXT ===") - available_docs = self.service.getAvailableDocuments(workflow) - available_connections = self.service.getConnectionReferenceList() - - # Log available resources for debugging - logger.info("=== AVAILABLE RESOURCES FOR ACTION GENERATION ===") - logger.info(f"Available Documents: {available_docs}") - # Note: available_docs is now a string description, not a list - logger.info(f"Available Connections: {len(available_connections) if available_connections else 0}") - if available_connections: - for i, conn in enumerate(available_connections[:5]): # Show first 5 - logger.info(f" Conn {i+1}: {conn}") - if len(available_connections) > 5: - logger.info(f" ... and {len(available_connections) - 5} more connections") - logger.info("=== END AVAILABLE RESOURCES ===") + # Log that we're starting action generation + logger.info("=== STARTING ACTION GENERATION ===") # Create proper context object for action definition if enhanced_context and isinstance(enhanced_context, TaskContext): @@ -326,8 +315,8 @@ class HandlingTasks: task_step=enhanced_context.task_step, workflow=enhanced_context.workflow, workflow_id=enhanced_context.workflow_id, - available_documents=enhanced_context.available_documents or available_docs, - available_connections=enhanced_context.available_connections or available_connections, + available_documents=enhanced_context.available_documents, + available_connections=enhanced_context.available_connections, previous_results=enhanced_context.previous_results or previous_results or [], previous_handover=enhanced_context.previous_handover, improvements=enhanced_context.improvements or [], @@ -346,8 +335,8 @@ class HandlingTasks: task_step=task_step, workflow=workflow, workflow_id=workflow.id, - available_documents=available_docs, - available_connections=available_connections, + available_documents=None, + available_connections=None, previous_results=previous_results or [], previous_handover=None, improvements=[], @@ -364,30 +353,22 @@ class HandlingTasks: # Check workflow status before calling AI service self._checkWorkflowStopped() - # Log the final action context being sent to AI - logger.info("=== FINAL ACTION CONTEXT FOR AI ===") - logger.info(f"Task Step ID: {action_context.task_step.id if action_context.task_step else 'None'}") - logger.info(f"Task Step Objective: {action_context.task_step.objective if action_context.task_step else 'None'}") - logger.info(f"Workflow ID: {action_context.workflow_id}") - logger.info(f"Available Documents: {action_context.available_documents or 'No documents available'}") - logger.info(f"Available Connections Count: {len(action_context.available_connections) if action_context.available_connections else 0}") - logger.info(f"Previous Results Count: {len(action_context.previous_results) if action_context.previous_results else 0}") - logger.info(f"Retry Count: {action_context.retry_count}") - logger.info(f"Is Regeneration: {action_context.is_regeneration}") - logger.info("=== END ACTION CONTEXT ===") - # Generate the action definition prompt action_prompt = await createActionDefinitionPrompt(action_context, self.service) + # Trace action planning prompt + self.service.writeTraceLog("Action Plan Prompt", action_prompt) + prompt = await self.service.callAiTextAdvanced(action_prompt) # Check if AI response is valid if not prompt: raise ValueError("AI service returned no response") - # Log the full AI response for debugging - logger.debug("=== FULL AI RESPONSE ===") - logger.debug(prompt) - logger.debug("=== END AI RESPONSE ===") + # Log action response received + logger.info("=== ACTION PLAN AI RESPONSE RECEIVED ===") + logger.info(f"Response length: {len(prompt) if prompt else 0}") + # Trace action planning response + self.service.writeTraceLog("Action Plan Response", prompt) # Inline parseActionResponse logic here json_start = prompt.find('{') @@ -875,23 +856,21 @@ class HandlingTasks: # Use promptFactory for review prompt prompt = createResultReviewPrompt(review_context, self.service) - # Log the full result review prompt being sent to AI for debugging + # Log result review prompt sent to AI logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===") logger.info(f"Task: {task_step.objective}") logger.info(f"Action Results Count: {len(review_context.action_results) if review_context.action_results else 0}") logger.info(f"Task Actions Count: {len(review_context.task_actions) if review_context.task_actions else 0}") - logger.info("=== FULL RESULT REVIEW PROMPT ===") - logger.info(prompt) - logger.info("=== END RESULT REVIEW PROMPT ===") + # Trace result review prompt + self.service.writeTraceLog("Result Review Prompt", prompt) response = await self.service.callAiTextAdvanced(prompt) - # Log the full AI response for result review + # Log result review response received logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===") logger.info(f"Response length: {len(response) if response else 0}") - logger.debug("=== FULL RESULT REVIEW AI RESPONSE ===") - logger.debug(response) - logger.debug("=== END RESULT REVIEW AI RESPONSE ===") + # Trace result review response + self.service.writeTraceLog("Result Review Response", response) # Inline parseReviewResponse logic here json_start = response.find('{') @@ -1095,6 +1074,17 @@ class HandlingTasks: ) result_label = action.execResultLabel + # Trace action result (without document data) + action_result_trace = { + "method": action.execMethod, + "action": action.execAction, + "success": result.success, + "error": result.error, + "resultLabel": result_label, + "documentsCount": len(result.documents) if result.documents else 0 + } + self.service.writeTraceLog("Action Result", action_result_trace) + # Process documents from the action result created_documents = [] if result.success: diff --git a/modules/chat/handling/promptFactory.py b/modules/chat/handling/promptFactory.py index ada386ba..2890e7bf 100644 --- a/modules/chat/handling/promptFactory.py +++ b/modules/chat/handling/promptFactory.py @@ -3,14 +3,68 @@ import json import logging -from typing import Any, Dict +from typing import Any, Dict, List from modules.interfaces.interfaceChatModel import TaskContext, ReviewContext +from modules.chat.documents.documentUtility import getFileExtension # Set up logger logger = logging.getLogger(__name__) # Prompt creation helpers extracted from managerChat.py +def _getAvailableDocuments(workflow) -> str: + """ + Get simple description of available documents for task planning. + + Args: + workflow: ChatWorkflow object + + Returns: + str: Simple description of document availability + """ + total_documents = 0 + document_types = set() + + for message in workflow.messages: + if message.documents: + total_documents += len(message.documents) + for doc in message.documents: + try: + file_extension = getFileExtension(doc.fileName) + if file_extension: + document_types.add(file_extension.upper()) + except: + pass + + if total_documents == 0: + return "No documents available" + elif len(document_types) == 0: + return f"{total_documents} document(s) available" + else: + types_str = ", ".join(sorted(document_types)) + return f"{total_documents} document(s) available ({types_str} files)" + +def _getConnectionReferenceList(service) -> List[str]: + """Get list of all UserConnection objects as references with enhanced state information""" + connections = [] + # Get user connections through AppObjects interface + user_connections = service.interfaceApp.getUserConnections(service.user.id) + + refreshed_count = 0 + for conn in user_connections: + # Get enhanced connection reference with state information + enhanced_ref = service.getConnectionReferenceFromUserConnection(conn) + connections.append(enhanced_ref) + + # Count refreshed tokens + if "refreshed" in enhanced_ref: + refreshed_count += 1 + + # Sort by connection reference + if refreshed_count > 0: + logger.info(f"Refreshed {refreshed_count} connection tokens while building action planning prompt") + return sorted(connections) + def _getPreviousRoundContext(service, workflow) -> str: """Get context from previous workflow rounds to help understand follow-up prompts""" try: @@ -98,8 +152,8 @@ def createTaskPlanningPrompt(context: TaskContext, service) -> str: # Extract user request from context - use Pydantic model directly user_request = context.task_step.objective if context.task_step else 'No request specified' - # Extract available documents from context - use Pydantic model directly - available_documents = context.available_documents or "No documents available" + # Get available documents using generic function + available_documents = _getAvailableDocuments(context.workflow) if context.workflow else "No documents available" # Get previous workflow round context for better understanding of follow-up prompts previous_round_context = _getPreviousRoundContext(service, context.workflow) @@ -226,7 +280,9 @@ async def createActionDefinitionPrompt(context: TaskContext, service) -> str: # Get enhanced document context using the new method available_documents_str = service.getEnhancedDocumentContext() - connRefs = service.getConnectionReferenceList() + # Get available documents and connections using generic functions + available_docs_summary = _getAvailableDocuments(context.workflow) + connRefs = _getConnectionReferenceList(service) # Create a structured JSON format for better AI parsing # This replaces the old hard-to-read format with a clean JSON structure @@ -603,7 +659,8 @@ IMPORTANT NOTES: - Always include a user-friendly userMessage for each action in the user's language ({user_language}). - The examples above show German user messages as reference - adapt the language to match the USER LANGUAGE specified above.""" - logging.debug(f"[ACTION PLAN PROMPT] Enhanced Document Context:\n{available_documents_str}\nUser Connections Section:\n{available_connections_str}\nAvailable Methods (detailed):\n{available_methods_str}") + # Removed sensitive data from debug logging + logging.debug(f"[ACTION PLAN PROMPT] Document context and methods prepared") return prompt diff --git a/modules/chat/managerChat.py b/modules/chat/managerChat.py index 6be21739..882d46e3 100644 --- a/modules/chat/managerChat.py +++ b/modules/chat/managerChat.py @@ -2,9 +2,8 @@ import logging from typing import Dict, Any, List from modules.interfaces.interfaceAppModel import User from modules.interfaces.interfaceChatModel import ChatWorkflow, UserInputRequest, TaskStep, TaskAction, ActionResult, ReviewResult, TaskPlan, WorkflowResult, TaskContext -from modules.chat.serviceCenter import ServiceCenter from modules.interfaces.interfaceChatObjects import ChatObjects -from .handling.handlingTasks import HandlingTasks, WorkflowStoppedException +from modules.chat.handling.handlingTasks import HandlingTasks, WorkflowStoppedException logger = logging.getLogger(__name__) @@ -16,21 +15,19 @@ class ChatManager: def __init__(self, currentUser: User, chatInterface: ChatObjects): self.currentUser = currentUser self.chatInterface = chatInterface - self.service: ServiceCenter = None self.workflow: ChatWorkflow = None self.handlingTasks: HandlingTasks = None async def initialize(self, workflow: ChatWorkflow) -> None: """Initialize chat manager with workflow""" self.workflow = workflow - self.service = ServiceCenter(self.currentUser, self.workflow) - self.handlingTasks = HandlingTasks(self.chatInterface, self.service, self.workflow) + self.handlingTasks = HandlingTasks(self.chatInterface, self.currentUser, self.workflow) + async def executeUnifiedWorkflow(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> WorkflowResult: """Unified Workflow Execution""" try: logger.info(f"Starting unified workflow execution for workflow {workflow.id}") - logger.debug(f"User request: {userInput.prompt}") # Phase 1: High-Level Task Planning logger.info("Phase 1: Generating task plan") @@ -54,8 +51,8 @@ class ChatManager: task_step=task_step, workflow=workflow, workflow_id=workflow.id, - available_documents=self.service.getAvailableDocuments(workflow), - available_connections=self.service.getConnectionReferenceList(), + available_documents=None, + available_connections=None, previous_results=previous_results, previous_handover=None, improvements=[], diff --git a/modules/chat/serviceCenter.py b/modules/chat/serviceCenter.py index 9160a3ae..55648ead 100644 --- a/modules/chat/serviceCenter.py +++ b/modules/chat/serviceCenter.py @@ -15,7 +15,7 @@ from modules.interfaces.interfaceComponentObjects import getInterface as getComp from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects from modules.chat.documents.documentExtraction import DocumentExtraction from modules.chat.documents.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData -from modules.chat.methodBase import MethodBase +from modules.methods.methodBase import MethodBase from modules.shared.timezoneUtils import get_utc_timestamp import uuid @@ -584,38 +584,7 @@ Please provide a comprehensive summary of this conversation.""" logger.error(f"Error getting documents from document list: {str(e)}") return [] - # ===== Functions for Prompts + Actions: Connection References generation and resolution ===== - - def getConnectionReferenceList(self) -> List[str]: - """Get list of all UserConnection objects as references with enhanced state information""" - connections = [] - # Get user connections through AppObjects interface - logger.debug(f"getConnectionReferenceList: Service center user ID: {self.user.id}") - logger.debug(f"getConnectionReferenceList: Service center user type: {type(self.user)}") - logger.debug(f"getConnectionReferenceList: Service center user object: {self.user}") - - user_connections = self.interfaceApp.getUserConnections(self.user.id) - logger.debug(f"getConnectionReferenceList: User ID: {self.user.id}") - logger.debug(f"getConnectionReferenceList: Raw user connections: {user_connections}") - logger.debug(f"getConnectionReferenceList: User connections type: {type(user_connections)}") - logger.debug(f"getConnectionReferenceList: User connections length: {len(user_connections) if user_connections else 0}") - - refreshed_count = 0 - for conn in user_connections: - # Get enhanced connection reference with state information - enhanced_ref = self.getConnectionReferenceFromUserConnection(conn) - logger.debug(f"getConnectionReferenceList: Enhanced ref for connection {conn.id}: {enhanced_ref}") - connections.append(enhanced_ref) - - # Count refreshed tokens - if "refreshed" in enhanced_ref: - refreshed_count += 1 - - # Sort by connection reference - logger.debug(f"getConnectionReferenceList: Final connections list: {connections}") - if refreshed_count > 0: - logger.info(f"Refreshed {refreshed_count} connection tokens while building action planning prompt") - return sorted(connections) + # ===== Functions for Prompts + Actions: Connection References generation and resolution ===== def getConnectionReferenceFromUserConnection(self, connection: UserConnection) -> str: """Get connection reference from UserConnection with enhanced state information""" @@ -692,12 +661,12 @@ Please provide a comprehensive summary of this conversation.""" # Try advanced AI first, with retries for attempt in range(max_retries): try: - prompt_size = self.calculateObjectSize(prompt) + prompt_size = self._calculateObjectSize(prompt) if context: - prompt_size += self.calculateObjectSize(context) + prompt_size += self._calculateObjectSize(context) response = await self.interfaceAiCalls.callAiTextAdvanced(prompt, context) - response_size = self.calculateObjectSize(response) - self.updateWorkflowStats(eventLabel="aicall.anthropic.text", bytesSent=prompt_size, bytesReceived=response_size) + response_size = self._calculateObjectSize(response) + self._updateWorkflowStats(eventLabel="aicall.anthropic.text", bytesSent=prompt_size, bytesReceived=response_size) return response except Exception as e: last_error = e @@ -726,12 +695,12 @@ Please provide a comprehensive summary of this conversation.""" last_error = None for attempt in range(max_retries): try: - prompt_size = self.calculateObjectSize(prompt) + prompt_size = self._calculateObjectSize(prompt) if context: - prompt_size += self.calculateObjectSize(context) + prompt_size += self._calculateObjectSize(context) response = await self.interfaceAiCalls.callAiTextBasic(prompt, context) - response_size = self.calculateObjectSize(response) - self.updateWorkflowStats(eventLabel="aicall.openai.text", bytesSent=prompt_size, bytesReceived=response_size) + response_size = self._calculateObjectSize(response) + self._updateWorkflowStats(eventLabel="aicall.openai.text", bytesSent=prompt_size, bytesReceived=response_size) return response except Exception as e: last_error = e @@ -745,34 +714,34 @@ Please provide a comprehensive summary of this conversation.""" async def callAiImageBasic(self, prompt: str, imageData: str, mimeType: str) -> str: """Basic image processing using OpenAI""" # Calculate prompt size for stats - prompt_size = self.calculateObjectSize(prompt) - prompt_size += self.calculateObjectSize(imageData) + prompt_size = self._calculateObjectSize(prompt) + prompt_size += self._calculateObjectSize(imageData) # Call AI response = await self.interfaceAiCalls.callAiImageBasic(prompt, imageData, mimeType) # Calculate response size for stats - response_size = self.calculateObjectSize(response) + response_size = self._calculateObjectSize(response) # Update stats - self.updateWorkflowStats(eventLabel="aicall.openai.image", bytesSent=prompt_size, bytesReceived=response_size) + self._updateWorkflowStats(eventLabel="aicall.openai.image", bytesSent=prompt_size, bytesReceived=response_size) return response async def callAiImageAdvanced(self, prompt: str, imageData: str, mimeType: str) -> str: """Advanced image processing using Anthropic""" # Calculate prompt size for stats - prompt_size = self.calculateObjectSize(prompt) - prompt_size += self.calculateObjectSize(imageData) + prompt_size = self._calculateObjectSize(prompt) + prompt_size += self._calculateObjectSize(imageData) # Call AI response = await self.interfaceAiCalls.callAiImageAdvanced(prompt, imageData, mimeType) # Calculate response size for stats - response_size = self.calculateObjectSize(response) + response_size = self._calculateObjectSize(response) # Update stats - self.updateWorkflowStats(eventLabel="aicall.anthropic.image", bytesSent=prompt_size, bytesReceived=response_size) + self._updateWorkflowStats(eventLabel="aicall.anthropic.image", bytesSent=prompt_size, bytesReceived=response_size) return response @@ -957,9 +926,9 @@ Please provide a comprehensive summary of this conversation.""" return document - # ===== Internal public helper functions ===== + # ===== Internal helper functions ===== - def updateWorkflowStats(self, eventLabel: str = None, bytesSent: int = 0, bytesReceived: int = 0, tokenCount: int = 0) -> None: + def _updateWorkflowStats(self, eventLabel: str = None, bytesSent: int = 0, bytesReceived: int = 0, tokenCount: int = 0) -> None: """ Centralized function to update workflow statistics in database and running workflow. @@ -983,7 +952,7 @@ Please provide a comprehensive summary of this conversation.""" except Exception as e: logger.error(f"Error updating workflow stats: {str(e)}") - def calculateObjectSize(self, obj: Any) -> int: + def _calculateObjectSize(self, obj: Any) -> int: """ Calculate the size of an object in bytes. @@ -1008,38 +977,6 @@ Please provide a comprehensive summary of this conversation.""" logger.error(f"Error calculating object size: {str(e)}") return 0 - def getAvailableDocuments(self, workflow) -> str: - """ - Get simple description of available documents for task planning. - - Args: - workflow: ChatWorkflow object - - Returns: - str: Simple description of document availability - """ - total_documents = 0 - document_types = set() - - for message in workflow.messages: - if message.documents: - total_documents += len(message.documents) - for doc in message.documents: - try: - file_extension = getFileExtension(doc.fileName) - if file_extension: - document_types.add(file_extension.upper()) - except: - pass - - if total_documents == 0: - return "No documents available" - elif len(document_types) == 0: - return f"{total_documents} document(s) available" - else: - types_str = ", ".join(sorted(document_types)) - return f"{total_documents} document(s) available ({types_str} files)" - # ===== Functions for Manager: Execution Tools ===== async def executeAction(self, methodName: str, actionName: str, parameters: Dict[str, Any]) -> ActionResult: @@ -1090,6 +1027,85 @@ Please provide a comprehensive summary of this conversation.""" """Set user language for the service center""" self.user.language = language + def writeTraceLog(self, contextText: str, data: Any) -> None: + """Write trace data to configured trace file if in debug mode""" + try: + import logging + import os + from datetime import datetime, UTC + from modules.shared.configuration import APP_CONFIG + + # Only write if logger is in debug mode + if logger.level > logging.DEBUG: + return + + # Get log directory from configuration + logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR", "./") + if not os.path.isabs(logDir): + # If relative path, make it relative to the gateway directory + gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + logDir = os.path.join(gatewayDir, logDir) + + # Ensure log directory exists + os.makedirs(logDir, exist_ok=True) + + # Create trace file path + trace_file = os.path.join(logDir, "log_trace.log") + + # Format the trace entry + timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + trace_entry = f"[{timestamp}] {contextText}\n" + + # Add data if provided + if data is not None: + if isinstance(data, (dict, list)): + import json + trace_entry += f"Data: {json.dumps(data, indent=2, default=str)}\n" + else: + trace_entry += f"Data: {str(data)}\n" + + trace_entry += "-" * 80 + "\n\n" + + # Write to trace file + with open(trace_file, "a", encoding="utf-8") as f: + f.write(trace_entry) + + except Exception as e: + # Don't log trace errors to avoid recursion + pass + + def clearTraceLog(self) -> None: + """Clear the trace log file""" + try: + import logging + import os + from modules.shared.configuration import APP_CONFIG + + # Get log directory from configuration + logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR", "./") + if not os.path.isabs(logDir): + # If relative path, make it relative to the gateway directory + gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + logDir = os.path.join(gatewayDir, logDir) + + # Create trace file path + trace_file = os.path.join(logDir, "log_trace.log") + + # Only clear if logger is in debug mode + if logger.level > logging.DEBUG: + # Delete file if not in debug mode + if os.path.exists(trace_file): + os.remove(trace_file) + return + + # Create empty file if in debug mode + with open(trace_file, "w", encoding="utf-8") as f: + f.write("") + + except Exception as e: + # Don't log trace errors to avoid recursion + pass + # ===== Functions for Manager: Workflow Tools ===== def setWorkflowContext(self, round_number: int = None, task_number: int = None, action_number: int = None): diff --git a/modules/connectors/connectorGoogleSpeech.py b/modules/connectors/connectorGoogleSpeech.py index 1243b70e..ed547b2b 100644 --- a/modules/connectors/connectorGoogleSpeech.py +++ b/modules/connectors/connectorGoogleSpeech.py @@ -29,10 +29,10 @@ class ConnectorGoogleSpeech: """ try: # Get JSON key from config.ini - api_key = APP_CONFIG.get("Connector_GoogleSpeech_API_KEY") + api_key = APP_CONFIG.get("Connector_GoogleSpeech_API_KEY_SECRET") if not api_key or api_key == "YOUR_GOOGLE_SERVICE_ACCOUNT_JSON_KEY_HERE": - raise ValueError("Google Speech API key not configured. Please set Connector_GoogleSpeech_API_KEY in config.ini with the full service account JSON key") + raise ValueError("Google Speech API key not configured. Please set Connector_GoogleSpeech_API_KEY_SECRET in config.ini with the full service account JSON key") # Parse the JSON key and set up authentication try: diff --git a/modules/connectors/connectorWebTavily.py b/modules/connectors/connectorWebTavily.py index 7a9ec038..97410493 100644 --- a/modules/connectors/connectorWebTavily.py +++ b/modules/connectors/connectorWebTavily.py @@ -69,9 +69,9 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): @classmethod async def create(cls): - api_key = APP_CONFIG.get("Connector_WebTavily_API_KEY") + api_key = APP_CONFIG.get("Connector_WebTavily_API_KEY_SECRET") if not api_key: - raise ValueError("Tavily API key not configured. Please set Connector_WebTavily_API_KEY in config.ini") + raise ValueError("Tavily API key not configured. Please set Connector_WebTavily_API_KEY_SECRET in config.ini") return cls(client=AsyncTavilyClient(api_key=api_key)) async def search_urls(self, request: WebSearchRequest) -> WebSearchActionResult: diff --git a/modules/services/serviceValueonChat.py b/modules/features/featureChatPlayground.py similarity index 98% rename from modules/services/serviceValueonChat.py rename to modules/features/featureChatPlayground.py index 1d50b134..85d8c2d4 100644 --- a/modules/services/serviceValueonChat.py +++ b/modules/features/featureChatPlayground.py @@ -30,7 +30,7 @@ class WorkflowManager: await self.chatManager.initialize(workflow) # Set user language - self.chatManager.service.setUserLanguage(userInput.userLanguage) + self.chatManager.handlingTasks.service.setUserLanguage(userInput.userLanguage) # Send first message message = await self._sendFirstMessage(userInput, workflow) @@ -170,10 +170,13 @@ class WorkflowManager: if message: workflow.messages.append(message) + # Clear trace log for new workflow session + self.chatManager.handlingTasks.service.clearTraceLog() + # Add documents if any, now with messageId if userInput.listFileId: # Process file IDs and add to message data - documents = await self.chatManager.service.processFileIds(userInput.listFileId, message.id) + documents = await self.chatManager.handlingTasks.service.processFileIds(userInput.listFileId, message.id) message.documents = documents # Update the message with documents in database self.chatInterface.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]}) diff --git a/modules/services/serviceNeutralization.py b/modules/features/featureNeutralizePlayground.py similarity index 100% rename from modules/services/serviceNeutralization.py rename to modules/features/featureNeutralizePlayground.py diff --git a/modules/services/serviceDeltaSync.py b/modules/features/featureSyncDelta.py similarity index 100% rename from modules/services/serviceDeltaSync.py rename to modules/features/featureSyncDelta.py diff --git a/modules/interfaces/interfaceChatObjects.py b/modules/interfaces/interfaceChatObjects.py index 63cec9c7..7b6806da 100644 --- a/modules/interfaces/interfaceChatObjects.py +++ b/modules/interfaces/interfaceChatObjects.py @@ -1155,7 +1155,7 @@ class ChatObjects: # Remove the 'Workflow started' log entry # Start workflow processing - from modules.services.serviceValueonChat import WorkflowManager + from modules.features.featureChatPlayground import WorkflowManager workflowManager = WorkflowManager(self, currentUser) # Start the workflow processing asynchronously diff --git a/modules/methods/methodAi.py b/modules/methods/methodAi.py index e69decd9..eda36f69 100644 --- a/modules/methods/methodAi.py +++ b/modules/methods/methodAi.py @@ -7,7 +7,7 @@ import logging from typing import Dict, Any, List, Optional from datetime import datetime, UTC -from modules.chat.methodBase import MethodBase, action +from modules.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/chat/methodBase.py b/modules/methods/methodBase.py similarity index 100% rename from modules/chat/methodBase.py rename to modules/methods/methodBase.py diff --git a/modules/methods/methodDocument.py b/modules/methods/methodDocument.py index 89e35a88..8cd3ac1c 100644 --- a/modules/methods/methodDocument.py +++ b/modules/methods/methodDocument.py @@ -9,7 +9,7 @@ import re from typing import Dict, Any, List, Optional from datetime import datetime, UTC -from modules.chat.methodBase import MethodBase, action +from modules.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodOutlook.py b/modules/methods/methodOutlook.py index 8a2b5b7d..23b17985 100644 --- a/modules/methods/methodOutlook.py +++ b/modules/methods/methodOutlook.py @@ -81,7 +81,7 @@ from datetime import datetime, UTC import json import uuid -from modules.chat.methodBase import MethodBase, action +from modules.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.interfaces.interfaceAppModel import ConnectionStatus from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodSharepoint.py b/modules/methods/methodSharepoint.py index 21decac0..ca99c06a 100644 --- a/modules/methods/methodSharepoint.py +++ b/modules/methods/methodSharepoint.py @@ -13,7 +13,7 @@ from urllib.parse import urlparse import aiohttp import asyncio -from modules.chat.methodBase import MethodBase, action +from modules.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodWeb.py b/modules/methods/methodWeb.py index 96c597db..014498de 100644 --- a/modules/methods/methodWeb.py +++ b/modules/methods/methodWeb.py @@ -2,7 +2,7 @@ import logging import csv import io from typing import Any, Dict -from modules.chat.methodBase import MethodBase, action +from modules.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult, ActionDocument from modules.interfaces.interfaceWebObjects import WebInterface from modules.interfaces.interfaceWebModel import ( diff --git a/modules/neutralizer/neutralizer.py b/modules/neutralizer/neutralizer.py index 87427611..f8677465 100644 --- a/modules/neutralizer/neutralizer.py +++ b/modules/neutralizer/neutralizer.py @@ -4,481 +4,109 @@ Unterstützt TXT, JSON, CSV, Excel und Word-Dateien Mehrsprachig: DE, EN, FR, IT """ -import re -import json -import pandas as pd -import docx -from pathlib import Path -from typing import Dict, List, Tuple, Any, Union, Optional -from dataclasses import dataclass -import uuid import logging -import traceback -import csv -from datetime import datetime -import xml.etree.ElementTree as ET -import os -import random -from io import StringIO -from modules.neutralizer.patterns import Pattern, HeaderPatterns, DataPatterns, get_pattern_for_header, find_patterns_in_text, TextTablePatterns -import base64 +from typing import Dict, List, Any + +# Import all necessary classes and functions +from modules.neutralizer.subProcessCommon import ProcessResult, CommonUtils +from modules.neutralizer.subProcessText import TextProcessor, PlainText +from modules.neutralizer.subProcessList import ListProcessor, TableData +from modules.neutralizer.subProcessBinary import BinaryProcessor, BinaryData +from modules.neutralizer.subParseString import StringParser +from modules.neutralizer.subPatterns import Pattern, HeaderPatterns, DataPatterns, TextTablePatterns # Configure logging logger = logging.getLogger(__name__) -@dataclass -class TableData: - """Repräsentiert Tabellendaten""" - headers: List[str] - rows: List[List[str]] - source_type: str # 'csv', 'json', 'xml', 'text_table' - -@dataclass -class PlainText: - """Repräsentiert normalen Text""" - content: str - source_type: str # 'txt', 'docx', 'text_plain' - -@dataclass -class ProcessResult: - """Result of content processing""" - data: Any - mapping: Dict[str, str] - replaced_fields: List[str] - processed_info: Dict[str, Any] # Additional processing information +# Export all classes and functions for external use +__all__ = [ + 'DataAnonymizer', + 'ProcessResult', + 'CommonUtils', + 'TextProcessor', + 'PlainText', + 'ListProcessor', + 'TableData', + 'BinaryProcessor', + 'BinaryData', + 'StringParser', + 'Pattern', + 'HeaderPatterns', + 'DataPatterns', + 'TextTablePatterns' +] class DataAnonymizer: """Hauptklasse für die Datenanonymisierung""" def __init__(self, names_to_parse: List[str] = None): - """Initialize the anonymizer with patterns and custom names + """Initialize the anonymizer with specialized processors Args: names_to_parse: List of names to parse and replace (case-insensitive) """ - self.header_patterns = HeaderPatterns.patterns - self.data_patterns = DataPatterns.patterns self.names_to_parse = names_to_parse or [] - self.replaced_fields = set() - self.mapping = {} - self.processing_info = [] + + # Initialize specialized processors + self.text_processor = TextProcessor(names_to_parse) + self.list_processor = ListProcessor(names_to_parse) + self.binary_processor = BinaryProcessor() + + # Common utilities + self.common_utils = CommonUtils() - def _normalize_whitespace(self, text: str) -> str: - """Normalize whitespace in text""" - text = re.sub(r'\s+', ' ', text) - text = text.replace('\r\n', '\n').replace('\r', '\n') - return text.strip() - - - def _is_table_line(self, line: str) -> bool: - """Check if a line represents a table row""" - return bool(re.match(r'^\s*[^:]+:\s*[^:]+$', line) or - re.match(r'^\s*[^\t]+\t[^\t]+$', line)) - - def _extract_tables_from_text(self, content: str) -> Tuple[List[TableData], List[PlainText]]: - """ - Extract tables and plain text from content - - Args: - content: Content to process - - Returns: - Tuple of (list of tables, list of plain text sections) - """ - tables = [] - plain_texts = [] - - # Process the entire content as plain text - plain_texts.append(PlainText(content=content, source_type='text_plain')) - - return tables, plain_texts - - def _anonymize_table(self, table: TableData) -> TableData: - """Anonymize table data""" - try: - anonymized_table = TableData( - headers=table.headers.copy(), - rows=[row.copy() for row in table.rows], - source_type=table.source_type - ) - - for i, header in enumerate(anonymized_table.headers): - pattern = get_pattern_for_header(header, self.header_patterns) - if pattern: - for row in anonymized_table.rows: - if row[i] is not None: - original = str(row[i]) - if original not in self.mapping: - self.mapping[original] = pattern.replacement_template.format(len(self.mapping) + 1) - row[i] = self.mapping[original] - - return anonymized_table - - except Exception as e: - logger.error(f"Error anonymizing table: {str(e)}") - raise - - def _anonymize_plain_text(self, text: PlainText) -> PlainText: - """Anonymize plain text content using simple search-and-replace approach""" - try: - current_text = text.content - - # Step 1: Replace custom names first (simple regex search-and-replace) - for name in self.names_to_parse: - if not name.strip(): - continue - - # Create case-insensitive regex pattern with word boundaries - pattern = re.compile(r'\b' + re.escape(name.strip()) + r'\b', re.IGNORECASE) - - # Find all matches for this name - matches = list(pattern.finditer(current_text)) - - # Replace each match with a placeholder - for match in reversed(matches): # Process from right to left to avoid position shifts - matched_text = match.group() - if matched_text not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - self.mapping[matched_text] = f"[name.{placeholder_id}]" - - replacement = self.mapping[matched_text] - start, end = match.span() - current_text = current_text[:start] + replacement + current_text[end:] - - # Step 2: Replace pattern-based matches (emails, phones, etc.) - # Use the same simple approach for patterns - pattern_matches = find_patterns_in_text(current_text, self.data_patterns) - - # Process pattern matches from right to left to avoid position shifts - for pattern_name, matched_text, start, end in reversed(pattern_matches): - # Skip if already a placeholder - if re.match(r'\[[a-z]+\.[a-f0-9-]+\]', matched_text): - continue - - # Skip if contains placeholder characters - if '[' in matched_text or ']' in matched_text: - continue - - if matched_text not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - # Create placeholder in format [type.uuid] - type_mapping = { - 'email': 'email', - 'phone': 'phone', - 'address': 'address', - 'id': 'id' - } - placeholder_type = type_mapping.get(pattern_name, 'data') - self.mapping[matched_text] = f"[{placeholder_type}.{placeholder_id}]" - - replacement = self.mapping[matched_text] - current_text = current_text[:start] + replacement + current_text[end:] - - return PlainText(content=current_text, source_type=text.source_type) - - except Exception as e: - logger.error(f"Error anonymizing plain text: {str(e)}") - raise - - def _anonymize_json_value(self, value: Any, key: str = None) -> Any: - """ - Recursively anonymize JSON values based on their keys and content - - Args: - value: Value to anonymize - key: Key name (if part of a key-value pair) - - Returns: - Anonymized value - """ - if isinstance(value, dict): - return {k: self._anonymize_json_value(v, k) for k, v in value.items()} - elif isinstance(value, list): - return [self._anonymize_json_value(item) for item in value] - elif isinstance(value, str): - # Check if this is a key we should process - if key: - pattern = get_pattern_for_header(key, self.header_patterns) - if pattern: - if value not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - # Create placeholder in format [type.uuid] - type_mapping = { - 'email': 'email', - 'phone': 'phone', - 'name': 'name', - 'address': 'address', - 'id': 'id' - } - placeholder_type = type_mapping.get(pattern.name, 'data') - self.mapping[value] = f"[{placeholder_type}.{placeholder_id}]" - return self.mapping[value] - - # Check if the value itself matches any patterns - pattern_matches = find_patterns_in_text(value, self.data_patterns) - custom_name_matches = self._find_custom_names(value) - - if pattern_matches or custom_name_matches: - # Use the first match's pattern or custom name - if pattern_matches: - pattern_name = pattern_matches[0][0] - if value not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - # Create placeholder in format [type.uuid] - type_mapping = { - 'email': 'email', - 'phone': 'phone', - 'name': 'name', - 'address': 'address', - 'id': 'id' - } - placeholder_type = type_mapping.get(pattern_name, 'data') - self.mapping[value] = f"[{placeholder_type}.{placeholder_id}]" - elif custom_name_matches: - if value not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - self.mapping[value] = f"[name.{placeholder_id}]" - return self.mapping[value] - - return value - else: - return value - - def _anonymize_xml_element(self, element: ET.Element, indent: str = '') -> str: - """ - Recursively process XML element and return formatted string - - Args: - element: XML element to process - indent: Current indentation level - - Returns: - Formatted XML string - """ - # Process attributes - processed_attrs = {} - for attr_name, attr_value in element.attrib.items(): - # Check if attribute name matches any header patterns - pattern = get_pattern_for_header(attr_name, self.header_patterns) - if pattern: - if attr_value not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - # Create placeholder in format [type.uuid] - type_mapping = { - 'email': 'email', - 'phone': 'phone', - 'name': 'name', - 'address': 'address', - 'id': 'id' - } - placeholder_type = type_mapping.get(pattern.name, 'data') - self.mapping[attr_value] = f"[{placeholder_type}.{placeholder_id}]" - processed_attrs[attr_name] = self.mapping[attr_value] - else: - # Check if attribute value matches any data patterns - matches = find_patterns_in_text(attr_value, self.data_patterns) - if matches: - pattern_name = matches[0][0] - pattern = next((p for p in self.data_patterns if p.name == pattern_name), None) - if pattern: - if attr_value not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - # Create placeholder in format [type.uuid] - type_mapping = { - 'email': 'email', - 'phone': 'phone', - 'name': 'name', - 'address': 'address', - 'id': 'id' - } - placeholder_type = type_mapping.get(pattern_name, 'data') - self.mapping[attr_value] = f"[{placeholder_type}.{placeholder_id}]" - processed_attrs[attr_name] = self.mapping[attr_value] - else: - processed_attrs[attr_name] = attr_value - else: - processed_attrs[attr_name] = attr_value - - attrs = ' '.join(f'{k}="{v}"' for k, v in processed_attrs.items()) - attrs = f' {attrs}' if attrs else '' - - # Process text content - text = element.text.strip() if element.text and element.text.strip() else '' - if text: - # Check if text matches any patterns or custom names - pattern_matches = find_patterns_in_text(text, self.data_patterns) - custom_name_matches = self._find_custom_names(text) - - if pattern_matches or custom_name_matches: - if pattern_matches: - pattern_name = pattern_matches[0][0] - pattern = next((p for p in self.data_patterns if p.name == pattern_name), None) - if pattern: - if text not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - # Create placeholder in format [type.uuid] - type_mapping = { - 'email': 'email', - 'phone': 'phone', - 'name': 'name', - 'address': 'address', - 'id': 'id' - } - placeholder_type = type_mapping.get(pattern_name, 'data') - self.mapping[text] = f"[{placeholder_type}.{placeholder_id}]" - text = self.mapping[text] - elif custom_name_matches: - if text not in self.mapping: - # Generate a UUID for the placeholder - import uuid - placeholder_id = str(uuid.uuid4()) - self.mapping[text] = f"[name.{placeholder_id}]" - text = self.mapping[text] - - # Process child elements - children = [] - for child in element: - child_str = self._anonymize_xml_element(child, indent + ' ') - children.append(child_str) - - # Build element string - if not children and not text: - return f"{indent}<{element.tag}{attrs}/>" - elif not children: - return f"{indent}<{element.tag}{attrs}>{text}" - else: - result = [f"{indent}<{element.tag}{attrs}>"] - if text: - result.append(f"{indent} {text}") - result.extend(children) - result.append(f"{indent}") - return '\n'.join(result) - - def process_content(self, content: str, content_type: str) -> ProcessResult: + def process_content(self, content: str, content_type: str = None) -> ProcessResult: """ Process content and return anonymized data Args: content: Content to process - content_type: Type of content ('csv', 'json', 'xml', 'text') + content_type: Type of content ('csv', 'json', 'xml', 'text', 'binary') + If None, will auto-detect Returns: ProcessResult: Contains anonymized data, mapping, replaced fields and processing info """ try: + # Auto-detect content type if not provided + if content_type is None: + content_type = self.common_utils.detect_content_type(content) # Check if content is binary data - is_binary = False - try: - # First, check if content looks like base64 (contains only base64 characters) - if re.match(r'^[A-Za-z0-9+/]*={0,2}$', content.strip()): - # Try to decode base64 if it looks like base64 - try: - decoded = base64.b64decode(content) - # If it's not valid text, consider it binary - decoded.decode('utf-8') - is_binary = True - except (base64.binascii.Error, UnicodeDecodeError): - is_binary = False - else: - is_binary = False - except Exception as e: - is_binary = False - - if is_binary: - # TODO: Implement binary data neutralization - # This would require: - # 1. Detecting binary data types (images, audio, video, etc.) - # 2. Implementing specific neutralization for each type - # 3. Handling metadata and embedded content - # 4. Preserving binary integrity while removing sensitive data - return ProcessResult(content, self.mapping, [], {'type': 'binary', 'status': 'not_implemented'}) - - replaced_fields = [] - processed_info = {} + if self.binary_processor.is_binary_content(content): + return self.binary_processor.process_binary_content(content) + # Route to appropriate processor based on content type if content_type in ['csv', 'json', 'xml']: - # Handle as table if content_type == 'csv': - df = pd.read_csv(StringIO(content), encoding='utf-8') - table = TableData( - headers=df.columns.tolist(), - rows=df.values.tolist(), - source_type='csv' - ) - processed_info['type'] = 'table' - processed_info['headers'] = table.headers - processed_info['row_count'] = len(table.rows) + result, mapping, replaced_fields, processed_info = self.list_processor.process_csv_content(content) elif content_type == 'json': - data = json.loads(content) - # Process JSON recursively - result = self._anonymize_json_value(data) - processed_info['type'] = 'json' - return ProcessResult(result, self.mapping, replaced_fields, processed_info) + result, mapping, replaced_fields, processed_info = self.list_processor.process_json_content(content) else: # xml - root = ET.fromstring(content) - # Process XML recursively with proper formatting - result = self._anonymize_xml_element(root) - processed_info['type'] = 'xml' - return ProcessResult(result, self.mapping, replaced_fields, processed_info) + result, mapping, replaced_fields, processed_info = self.list_processor.process_xml_content(content) - if not table.rows: - return ProcessResult(None, self.mapping, [], processed_info) - - anonymized_table = self._anonymize_table(table) - - # Track replaced fields - for i, header in enumerate(anonymized_table.headers): - for orig_row, anon_row in zip(table.rows, anonymized_table.rows): - if anon_row[i] != orig_row[i]: - replaced_fields.append(header) - - # Convert back to original format - if content_type == 'csv': - result = pd.DataFrame(anonymized_table.rows, columns=anonymized_table.headers) - elif content_type == 'json': - if len(anonymized_table.headers) == 1 and anonymized_table.headers[0] == 'value': - result = anonymized_table.rows[0][0] - else: - result = dict(zip(anonymized_table.headers, anonymized_table.rows[0])) - else: # xml - result = ET.tostring(root, encoding='unicode') - - return ProcessResult(result, self.mapping, replaced_fields, processed_info) + return ProcessResult(result, mapping, replaced_fields, processed_info) else: # Handle as text - # First, identify what needs to be replaced using table detection - tables, plain_texts = self._extract_tables_from_text(content) - processed_info['type'] = 'text' - processed_info['tables'] = [{'headers': t.headers, 'row_count': len(t.rows)} for t in tables] - - # Process plain text sections - anonymized_texts = [self._anonymize_plain_text(text) for text in plain_texts] - - # Combine all processed content - result = content - for i, (text, anonymized_text) in enumerate(zip(plain_texts, anonymized_texts)): - if text.content != anonymized_text.content: - result = result.replace(text.content, anonymized_text.content) - - return ProcessResult(result, self.mapping, replaced_fields, processed_info) + result, mapping, replaced_fields, processed_info = self.text_processor.process_text_content(content) + return ProcessResult(result, mapping, replaced_fields, processed_info) except Exception as e: logger.error(f"Error processing content: {str(e)}") - return ProcessResult(None, self.mapping, [], {'type': 'error', 'error': str(e)}) \ No newline at end of file + return ProcessResult(None, {}, [], {'type': 'error', 'error': str(e)}) + + def get_mapping(self) -> Dict[str, str]: + """ + Get the combined mapping from all processors + + Returns: + Dict[str, str]: Combined mapping dictionary + """ + text_mapping = self.text_processor.get_mapping() + list_mapping = self.list_processor.get_mapping() + return self.common_utils.merge_mappings(text_mapping, list_mapping) + + def clear_mapping(self): + """Clear the mapping in all processors""" + self.text_processor.clear_mapping() + self.list_processor.clear_mapping() \ No newline at end of file diff --git a/modules/neutralizer/readme.md b/modules/neutralizer/readme.md new file mode 100644 index 00000000..20d00816 --- /dev/null +++ b/modules/neutralizer/readme.md @@ -0,0 +1,91 @@ +# Neutralizer Module Structure + +This module provides DSGVO-compliant data anonymization for AI agent systems. The code has been refactored into specialized sub-modules for better maintainability and code reuse. + +## Module Overview + +### Core Module +- **`neutralizer.py`** - Main DataAnonymizer class that orchestrates all processing + +### Specialized Processors +- **`subProcessText.py`** - Handles plain text processing without header information +- **`subProcessList.py`** - Handles structured data with headers (CSV, JSON, XML) +- **`subProcessBinary.py`** - Handles binary data types (images, audio, video, etc.) + +### Utility Modules +- **`subParseString.py`** - String parsing and replacement utilities for emails, phones, addresses, IDs and names +- **`subProcessCommon.py`** - Common utilities and data structures shared across modules +- **`patterns.py`** - Pattern definitions for data anonymization + +## Key Features + +### 1. Modular Architecture +- **Separation of Concerns**: Each module handles a specific type of data processing +- **Code Reuse**: Common functionality is centralized in utility modules +- **Maintainability**: Easier to modify and extend individual components + +### 2. Processing Order +1. **Pattern-based matches** (emails, phones, addresses, etc.) are processed FIRST +2. **Custom names** from the user list are processed SECOND +3. **Already anonymized content** (placeholders) is skipped + +### 3. Supported Data Types +- **Text**: Plain text documents, emails, etc. +- **Structured Data**: CSV, JSON, XML with headers +- **Binary Data**: Images, audio, video (framework ready, implementation pending) + +### 4. Placeholder Protection +- Prevents re-anonymization of already processed content +- Uses format `[tag.uuid]` for placeholders +- Validates placeholder format before processing + +## Usage Example + +```python +from modules.neutralizer import DataAnonymizer + +# Initialize with custom names +anonymizer = DataAnonymizer(names_to_parse=['John Doe', 'Jane Smith']) + +# Process content (auto-detects type) +result = anonymizer.process_content(content, content_type='text') + +# Or specify content type explicitly +result = anonymizer.process_content(content, content_type='csv') + +# Get mapping of original values to placeholders +mapping = anonymizer.get_mapping() +``` + +## Module Dependencies + +``` +neutralizer.py +├── subProcessCommon.py (ProcessResult, CommonUtils) +├── subProcessText.py (TextProcessor) +├── subProcessList.py (ListProcessor) +├── subProcessBinary.py (BinaryProcessor) +└── patterns.py (Pattern definitions) + +subProcessText.py +└── subParseString.py (StringParser) + +subProcessList.py +├── subParseString.py (StringParser) +└── patterns.py (HeaderPatterns) + +subProcessBinary.py +└── (standalone) + +subParseString.py +└── patterns.py (DataPatterns) +``` + +## Benefits of New Structure + +1. **Single Responsibility**: Each module has one clear purpose +2. **DRY Principle**: No code duplication across modules +3. **Testability**: Individual modules can be tested in isolation +4. **Extensibility**: Easy to add new data types or processing methods +5. **Maintainability**: Changes to one module don't affect others +6. **Performance**: Specialized processors are optimized for their data types diff --git a/modules/neutralizer/subParseString.py b/modules/neutralizer/subParseString.py new file mode 100644 index 00000000..a2b39333 --- /dev/null +++ b/modules/neutralizer/subParseString.py @@ -0,0 +1,162 @@ +""" +String parsing and replacement utilities for data anonymization +Handles pattern matching and replacement for emails, phones, addresses, IDs and names +""" + +import re +import uuid +from typing import Dict, List, Tuple, Any +from modules.neutralizer.subPatterns import DataPatterns, find_patterns_in_text + +class StringParser: + """Handles string parsing and replacement operations""" + + def __init__(self, names_to_parse: List[str] = None): + """ + Initialize the string parser + + Args: + names_to_parse: List of names to parse and replace (case-insensitive) + """ + self.data_patterns = DataPatterns.patterns + self.names_to_parse = names_to_parse or [] + self.mapping = {} + + def is_placeholder(self, text: str) -> bool: + """ + Check if text is already a placeholder in format [tag.uuid] + + Args: + text: Text to check + + Returns: + bool: True if text is a placeholder + """ + return bool(re.match(r'^\[[a-z]+\.[a-f0-9-]+\]$', text)) + + def replace_pattern_matches(self, text: str) -> str: + """ + Replace pattern-based matches (emails, phones, etc.) in text + + Args: + text: Text to process + + Returns: + str: Text with pattern matches replaced + """ + pattern_matches = find_patterns_in_text(text, self.data_patterns) + + # Process pattern matches from right to left to avoid position shifts + for pattern_name, matched_text, start, end in reversed(pattern_matches): + # Skip if already a placeholder + if self.is_placeholder(matched_text): + continue + + # Skip if contains placeholder characters + if '[' in matched_text or ']' in matched_text: + continue + + if matched_text not in self.mapping: + # Generate a UUID for the placeholder + placeholder_id = str(uuid.uuid4()) + # Create placeholder in format [type.uuid] + type_mapping = { + 'email': 'email', + 'phone': 'phone', + 'address': 'address', + 'id': 'id' + } + placeholder_type = type_mapping.get(pattern_name, 'data') + self.mapping[matched_text] = f"[{placeholder_type}.{placeholder_id}]" + + replacement = self.mapping[matched_text] + text = text[:start] + replacement + text[end:] + + return text + + def replace_custom_names(self, text: str) -> str: + """ + Replace custom names from the user list in text + + Args: + text: Text to process + + Returns: + str: Text with custom names replaced + """ + for name in self.names_to_parse: + if not name.strip(): + continue + + # Create case-insensitive regex pattern with word boundaries + pattern = re.compile(r'\b' + re.escape(name.strip()) + r'\b', re.IGNORECASE) + + # Find all matches for this name + matches = list(pattern.finditer(text)) + + # Replace each match with a placeholder + for match in reversed(matches): # Process from right to left to avoid position shifts + matched_text = match.group() + if matched_text not in self.mapping: + # Generate a UUID for the placeholder + placeholder_id = str(uuid.uuid4()) + self.mapping[matched_text] = f"[name.{placeholder_id}]" + + replacement = self.mapping[matched_text] + start, end = match.span() + text = text[:start] + replacement + text[end:] + + return text + + def process_string(self, text: str) -> str: + """ + Process a string by replacing patterns first, then custom names + + Args: + text: Text to process + + Returns: + str: Processed text with replacements + """ + if self.is_placeholder(text): + return text + + # Step 1: Replace pattern-based matches FIRST + text = self.replace_pattern_matches(text) + + # Step 2: Replace custom names SECOND + text = self.replace_custom_names(text) + + return text + + def process_json_value(self, value: Any) -> Any: + """ + Process a JSON value for anonymization + + Args: + value: Value to process + + Returns: + Any: Processed value + """ + if isinstance(value, str): + return self.process_string(value) + elif isinstance(value, dict): + return {k: self.process_json_value(v) for k, v in value.items()} + elif isinstance(value, list): + return [self.process_json_value(item) for item in value] + else: + return value + + def get_mapping(self) -> Dict[str, str]: + """ + Get the current mapping of original values to placeholders + + Returns: + Dict[str, str]: Mapping dictionary + """ + return self.mapping.copy() + + def clear_mapping(self): + """Clear the current mapping""" + self.mapping.clear() diff --git a/modules/neutralizer/patterns.py b/modules/neutralizer/subPatterns.py similarity index 100% rename from modules/neutralizer/patterns.py rename to modules/neutralizer/subPatterns.py diff --git a/modules/neutralizer/subProcessBinary.py b/modules/neutralizer/subProcessBinary.py new file mode 100644 index 00000000..67c73bc1 --- /dev/null +++ b/modules/neutralizer/subProcessBinary.py @@ -0,0 +1,101 @@ +""" +Binary data processing module for data anonymization +Handles binary data types (images, audio, video, etc.) +""" + +import base64 +import re +from typing import Dict, Any, Tuple +from dataclasses import dataclass + +@dataclass +class BinaryData: + """Repräsentiert Binärdaten""" + content: str + data_type: str # 'image', 'audio', 'video', 'document', 'unknown' + encoding: str # 'base64', 'hex', 'raw' + +class BinaryProcessor: + """Handles binary data processing for anonymization""" + + def __init__(self): + """Initialize the binary processor""" + self.supported_types = { + 'image': ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp'], + 'audio': ['.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a'], + 'video': ['.mp4', '.avi', '.mov', '.wmv', '.flv', '.mkv', '.webm'], + 'document': ['.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx'] + } + + def detect_binary_type(self, content: str) -> str: + """ + Detect if content is binary data and determine type + + Args: + content: Content to analyze + + Returns: + str: Binary type or 'text' if not binary + """ + # Check if content looks like base64 + if re.match(r'^[A-Za-z0-9+/]*={0,2}$', content.strip()): + try: + decoded = base64.b64decode(content) + # Try to decode as text + decoded.decode('utf-8') + return 'text' # It's base64 encoded text + except (base64.binascii.Error, UnicodeDecodeError): + # It's binary data + return 'binary' + + # Check for binary patterns + if len(content) > 100 and '\x00' in content: + return 'binary' + + return 'text' + + def is_binary_content(self, content: str) -> bool: + """ + Check if content is binary data + + Args: + content: Content to check + + Returns: + bool: True if content is binary + """ + return self.detect_binary_type(content) == 'binary' + + def process_binary_content(self, content: str) -> Tuple[Any, Dict[str, str], list, Dict[str, Any]]: + """ + Process binary content for anonymization + + Args: + content: Binary content to process + + Returns: + Tuple of (processed_data, mapping, replaced_fields, processed_info) + """ + # TODO: Implement binary data neutralization + # This would require: + # 1. Detecting binary data types (images, audio, video, etc.) + # 2. Implementing specific neutralization for each type + # 3. Handling metadata and embedded content + # 4. Preserving binary integrity while removing sensitive data + + processed_info = { + 'type': 'binary', + 'status': 'not_implemented', + 'message': 'Binary data neutralization not yet implemented' + } + + return content, {}, [], processed_info + + def get_supported_types(self) -> Dict[str, list]: + """ + Get list of supported binary file types + + Returns: + Dict[str, list]: Dictionary of supported types and their extensions + """ + return self.supported_types.copy() diff --git a/modules/neutralizer/subProcessCommon.py b/modules/neutralizer/subProcessCommon.py new file mode 100644 index 00000000..6ffc2166 --- /dev/null +++ b/modules/neutralizer/subProcessCommon.py @@ -0,0 +1,143 @@ +""" +Common processing utilities for data anonymization +Shared functions and data structures +""" + +import re +from typing import Dict, List, Any, Union, Optional +from dataclasses import dataclass + +@dataclass +class ProcessResult: + """Result of content processing""" + data: Any + mapping: Dict[str, str] + replaced_fields: List[str] + processed_info: Dict[str, Any] # Additional processing information + +class CommonUtils: + """Common utility functions for data processing""" + + @staticmethod + def normalize_whitespace(text: str) -> str: + """ + Normalize whitespace in text + + Args: + text: Text to normalize + + Returns: + str: Normalized text + """ + text = re.sub(r'\s+', ' ', text) + text = text.replace('\r\n', '\n').replace('\r', '\n') + return text.strip() + + @staticmethod + def is_table_line(line: str) -> bool: + """ + Check if a line represents a table row + + Args: + line: Line to check + + Returns: + bool: True if line is a table row + """ + return bool(re.match(r'^\s*[^:]+:\s*[^:]+$', line) or + re.match(r'^\s*[^\t]+\t[^\t]+$', line)) + + @staticmethod + def detect_content_type(content: str) -> str: + """ + Detect the type of content based on its structure + + Args: + content: Content to analyze + + Returns: + str: Content type ('csv', 'json', 'xml', 'text', 'binary') + """ + content = content.strip() + + # Check for JSON + if content.startswith('{') and content.endswith('}'): + return 'json' + if content.startswith('[') and content.endswith(']'): + return 'json' + + # Check for XML + if content.startswith('<') and content.endswith('>'): + return 'xml' + + # Check for CSV (has commas and newlines) + if ',' in content and '\n' in content: + lines = content.split('\n') + if len(lines) > 1 and all(',' in line for line in lines[:3]): + return 'csv' + + # Check for binary + if len(content) > 100 and '\x00' in content: + return 'binary' + + # Default to text + return 'text' + + @staticmethod + def merge_mappings(*mappings: Dict[str, str]) -> Dict[str, str]: + """ + Merge multiple mapping dictionaries + + Args: + *mappings: Mapping dictionaries to merge + + Returns: + Dict[str, str]: Merged mapping dictionary + """ + merged = {} + for mapping in mappings: + merged.update(mapping) + return merged + + @staticmethod + def create_placeholder(placeholder_type: str, placeholder_id: str) -> str: + """ + Create a placeholder string in the format [type.uuid] + + Args: + placeholder_type: Type of placeholder (email, phone, name, etc.) + placeholder_id: Unique identifier for the placeholder + + Returns: + str: Formatted placeholder string + """ + return f"[{placeholder_type}.{placeholder_id}]" + + @staticmethod + def validate_placeholder(placeholder: str) -> bool: + """ + Validate if a string is a valid placeholder + + Args: + placeholder: String to validate + + Returns: + bool: True if valid placeholder + """ + return bool(re.match(r'^\[[a-z]+\.[a-f0-9-]+\]$', placeholder)) + + @staticmethod + def extract_placeholder_info(placeholder: str) -> Optional[tuple]: + """ + Extract type and ID from a placeholder + + Args: + placeholder: Placeholder string + + Returns: + Optional[tuple]: (type, id) or None if invalid + """ + match = re.match(r'^\[([a-z]+)\.([a-f0-9-]+)\]$', placeholder) + if match: + return match.group(1), match.group(2) + return None diff --git a/modules/neutralizer/subProcessList.py b/modules/neutralizer/subProcessList.py new file mode 100644 index 00000000..58981333 --- /dev/null +++ b/modules/neutralizer/subProcessList.py @@ -0,0 +1,279 @@ +""" +List processing module for data anonymization +Handles structured data with headers (CSV, JSON, XML) +""" + +import json +import pandas as pd +import xml.etree.ElementTree as ET +from typing import Dict, List, Any, Union +from dataclasses import dataclass +from io import StringIO +from modules.neutralizer.subParseString import StringParser +from modules.neutralizer.subPatterns import get_pattern_for_header, HeaderPatterns + +@dataclass +class TableData: + """Repräsentiert Tabellendaten""" + headers: List[str] + rows: List[List[str]] + source_type: str # 'csv', 'json', 'xml', 'text_table' + +class ListProcessor: + """Handles structured data processing with headers for anonymization""" + + def __init__(self, names_to_parse: List[str] = None): + """ + Initialize the list processor + + Args: + names_to_parse: List of names to parse and replace + """ + self.string_parser = StringParser(names_to_parse) + self.header_patterns = HeaderPatterns.patterns + + def anonymize_table(self, table: TableData) -> TableData: + """ + Anonymize table data based on headers + + Args: + table: TableData object to anonymize + + Returns: + TableData: Anonymized table + """ + anonymized_table = TableData( + headers=table.headers.copy(), + rows=[row.copy() for row in table.rows], + source_type=table.source_type + ) + + for i, header in enumerate(anonymized_table.headers): + pattern = get_pattern_for_header(header, self.header_patterns) + if pattern: + for row in anonymized_table.rows: + if row[i] is not None: + original = str(row[i]) + if original not in self.string_parser.mapping: + # Generate a UUID for the placeholder + import uuid + placeholder_id = str(uuid.uuid4()) + self.string_parser.mapping[original] = pattern.replacement_template.format(len(self.string_parser.mapping) + 1) + row[i] = self.string_parser.mapping[original] + + return anonymized_table + + def process_csv_content(self, content: str) -> tuple: + """ + Process CSV content + + Args: + content: CSV content to process + + Returns: + Tuple of (processed_data, mapping, replaced_fields, processed_info) + """ + df = pd.read_csv(StringIO(content), encoding='utf-8') + table = TableData( + headers=df.columns.tolist(), + rows=df.values.tolist(), + source_type='csv' + ) + + if not table.rows: + return None, self.string_parser.get_mapping(), [], {'type': 'table', 'headers': table.headers, 'row_count': 0} + + anonymized_table = self.anonymize_table(table) + + # Track replaced fields + replaced_fields = [] + for i, header in enumerate(anonymized_table.headers): + for orig_row, anon_row in zip(table.rows, anonymized_table.rows): + if anon_row[i] != orig_row[i]: + replaced_fields.append(header) + + # Convert back to DataFrame + result = pd.DataFrame(anonymized_table.rows, columns=anonymized_table.headers) + + processed_info = { + 'type': 'table', + 'headers': table.headers, + 'row_count': len(table.rows) + } + + return result, self.string_parser.get_mapping(), replaced_fields, processed_info + + def process_json_content(self, content: str) -> tuple: + """ + Process JSON content + + Args: + content: JSON content to process + + Returns: + Tuple of (processed_data, mapping, replaced_fields, processed_info) + """ + data = json.loads(content) + + # Process JSON recursively using string parser + result = self.string_parser.process_json_value(data) + + processed_info = {'type': 'json'} + + return result, self.string_parser.get_mapping(), [], processed_info + + def anonymize_xml_element(self, element: ET.Element, indent: str = '') -> str: + """ + Recursively process XML element and return formatted string + + Args: + element: XML element to process + indent: Current indentation level + + Returns: + Formatted XML string + """ + # Process attributes + processed_attrs = {} + for attr_name, attr_value in element.attrib.items(): + # Check if attribute name matches any header patterns + pattern = get_pattern_for_header(attr_name, self.header_patterns) + if pattern: + if attr_value not in self.string_parser.mapping: + # Generate a UUID for the placeholder + import uuid + placeholder_id = str(uuid.uuid4()) + # Create placeholder in format [type.uuid] + type_mapping = { + 'email': 'email', + 'phone': 'phone', + 'name': 'name', + 'address': 'address', + 'id': 'id' + } + placeholder_type = type_mapping.get(pattern.name, 'data') + self.string_parser.mapping[attr_value] = f"[{placeholder_type}.{placeholder_id}]" + processed_attrs[attr_name] = self.string_parser.mapping[attr_value] + else: + # Check if attribute value matches any data patterns + from modules.neutralizer.subPatterns import find_patterns_in_text, DataPatterns + matches = find_patterns_in_text(attr_value, DataPatterns.patterns) + if matches: + pattern_name = matches[0][0] + pattern = next((p for p in DataPatterns.patterns if p.name == pattern_name), None) + if pattern: + if attr_value not in self.string_parser.mapping: + # Generate a UUID for the placeholder + import uuid + placeholder_id = str(uuid.uuid4()) + # Create placeholder in format [type.uuid] + type_mapping = { + 'email': 'email', + 'phone': 'phone', + 'name': 'name', + 'address': 'address', + 'id': 'id' + } + placeholder_type = type_mapping.get(pattern_name, 'data') + self.string_parser.mapping[attr_value] = f"[{placeholder_type}.{placeholder_id}]" + processed_attrs[attr_name] = self.string_parser.mapping[attr_value] + else: + processed_attrs[attr_name] = attr_value + else: + processed_attrs[attr_name] = attr_value + + attrs = ' '.join(f'{k}="{v}"' for k, v in processed_attrs.items()) + attrs = f' {attrs}' if attrs else '' + + # Process text content + text = element.text.strip() if element.text and element.text.strip() else '' + if text: + # Skip if already a placeholder + if not self.string_parser.is_placeholder(text): + # Check if text matches any patterns + from modules.neutralizer.subPatterns import find_patterns_in_text, DataPatterns + pattern_matches = find_patterns_in_text(text, DataPatterns.patterns) + + if pattern_matches: + pattern_name = pattern_matches[0][0] + pattern = next((p for p in DataPatterns.patterns if p.name == pattern_name), None) + if pattern: + if text not in self.string_parser.mapping: + # Generate a UUID for the placeholder + import uuid + placeholder_id = str(uuid.uuid4()) + # Create placeholder in format [type.uuid] + type_mapping = { + 'email': 'email', + 'phone': 'phone', + 'name': 'name', + 'address': 'address', + 'id': 'id' + } + placeholder_type = type_mapping.get(pattern_name, 'data') + self.string_parser.mapping[text] = f"[{placeholder_type}.{placeholder_id}]" + text = self.string_parser.mapping[text] + else: + # Check if text matches any custom names from the user list + for name in self.string_parser.names_to_parse: + if not name.strip(): + continue + if text.lower().strip() == name.lower().strip(): + if text not in self.string_parser.mapping: + # Generate a UUID for the placeholder + import uuid + placeholder_id = str(uuid.uuid4()) + self.string_parser.mapping[text] = f"[name.{placeholder_id}]" + text = self.string_parser.mapping[text] + break + + # Process child elements + children = [] + for child in element: + child_str = self.anonymize_xml_element(child, indent + ' ') + children.append(child_str) + + # Build element string + if not children and not text: + return f"{indent}<{element.tag}{attrs}/>" + elif not children: + return f"{indent}<{element.tag}{attrs}>{text}" + else: + result = [f"{indent}<{element.tag}{attrs}>"] + if text: + result.append(f"{indent} {text}") + result.extend(children) + result.append(f"{indent}") + return '\n'.join(result) + + def process_xml_content(self, content: str) -> tuple: + """ + Process XML content + + Args: + content: XML content to process + + Returns: + Tuple of (processed_data, mapping, replaced_fields, processed_info) + """ + root = ET.fromstring(content) + + # Process XML recursively with proper formatting + result = self.anonymize_xml_element(root) + + processed_info = {'type': 'xml'} + + return result, self.string_parser.get_mapping(), [], processed_info + + def get_mapping(self) -> Dict[str, str]: + """ + Get the current mapping of original values to placeholders + + Returns: + Dict[str, str]: Mapping dictionary + """ + return self.string_parser.get_mapping() + + def clear_mapping(self): + """Clear the current mapping""" + self.string_parser.clear_mapping() diff --git a/modules/neutralizer/subProcessText.py b/modules/neutralizer/subProcessText.py new file mode 100644 index 00000000..c9ad872f --- /dev/null +++ b/modules/neutralizer/subProcessText.py @@ -0,0 +1,101 @@ +""" +Text processing module for data anonymization +Handles plain text processing without header information +""" + +from typing import Dict, List, Any +from dataclasses import dataclass +from modules.neutralizer.subParseString import StringParser + +@dataclass +class PlainText: + """Repräsentiert normalen Text""" + content: str + source_type: str # 'txt', 'docx', 'text_plain' + +class TextProcessor: + """Handles plain text processing for anonymization""" + + def __init__(self, names_to_parse: List[str] = None): + """ + Initialize the text processor + + Args: + names_to_parse: List of names to parse and replace + """ + self.string_parser = StringParser(names_to_parse) + + def extract_tables_from_text(self, content: str) -> tuple: + """ + Extract tables and plain text from content + + Args: + content: Content to process + + Returns: + Tuple of (list of tables, list of plain text sections) + """ + # For now, process the entire content as plain text + # This can be extended later to detect table-like structures + tables = [] + plain_texts = [PlainText(content=content, source_type='text_plain')] + + return tables, plain_texts + + def anonymize_plain_text(self, text: PlainText) -> PlainText: + """ + Anonymize plain text content + + Args: + text: PlainText object to anonymize + + Returns: + PlainText: Anonymized text + """ + # Use the string parser to process the content + anonymized_content = self.string_parser.process_string(text.content) + + return PlainText(content=anonymized_content, source_type=text.source_type) + + def process_text_content(self, content: str) -> tuple: + """ + Process text content and return anonymized data + + Args: + content: Text content to process + + Returns: + Tuple of (anonymized_content, mapping, replaced_fields, processed_info) + """ + # Extract tables and plain text sections + tables, plain_texts = self.extract_tables_from_text(content) + + # Process plain text sections + anonymized_texts = [self.anonymize_plain_text(text) for text in plain_texts] + + # Combine all processed content + result = content + for text, anonymized_text in zip(plain_texts, anonymized_texts): + if text.content != anonymized_text.content: + result = result.replace(text.content, anonymized_text.content) + + # Get processing information + processed_info = { + 'type': 'text', + 'tables': [{'headers': t.headers, 'row_count': len(t.rows)} for t in tables] if hasattr(tables[0], 'headers') else [] + } + + return result, self.string_parser.get_mapping(), [], processed_info + + def get_mapping(self) -> Dict[str, str]: + """ + Get the current mapping of original values to placeholders + + Returns: + Dict[str, str]: Mapping dictionary + """ + return self.string_parser.get_mapping() + + def clear_mapping(self): + """Clear the current mapping""" + self.string_parser.clear_mapping() diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index 3243bc21..f0feef25 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -18,7 +18,7 @@ import modules.interfaces.interfaceComponentObjects as interfaceComponentObjects from modules.interfaces.interfaceComponentModel import FileItem, FilePreview from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse, AttributeDefinition from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes -from modules.services.serviceNeutralization import NeutralizationService +from modules.features.featureNeutralizePlayground import NeutralizationService # Configure logger logger = logging.getLogger(__name__) diff --git a/modules/routes/routeSecurityGoogle.py b/modules/routes/routeSecurityGoogle.py index 2967e1fc..9cca2b3a 100644 --- a/modules/routes/routeSecurityGoogle.py +++ b/modules/routes/routeSecurityGoogle.py @@ -590,6 +590,20 @@ async def logout( try: appInterface = getInterface(currentUser) appInterface.logout() + + # Log successful logout + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_user_access( + user_id=str(currentUser.id), + mandate_id=str(currentUser.mandateId), + action="logout", + success_info="google_auth_logout" + ) + except Exception: + # Don't fail if audit logging fails + pass + return {"message": "Logged out successfully"} except Exception as e: logger.error(f"Error during logout: {str(e)}") diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index c0b176b0..15f998f9 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -124,6 +124,19 @@ async def login( # Save access token userInterface.saveAccessToken(token) + # Log successful login + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_user_access( + user_id=str(user.id), + mandate_id=str(user.mandateId), + action="login", + success_info="local_auth_success" + ) + except Exception: + # Don't fail if audit logging fails + pass + # Create response data response_data = { "type": "local_auth_success", @@ -138,6 +151,20 @@ async def login( # Handle authentication errors error_msg = str(e) logger.warning(f"Authentication failed for user {formData.username}: {error_msg}") + + # Log failed login attempt + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_user_access( + user_id="unknown", + mandate_id="unknown", + action="login", + success_info=f"failed: {error_msg}" + ) + except Exception: + # Don't fail if audit logging fails + pass + raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=error_msg, @@ -253,6 +280,19 @@ async def logout(request: Request, currentUser: User = Depends(getCurrentUser)) appInterface.revokeTokenById(jti, revokedBy=currentUser.id, reason="logout") revoked = 1 + # Log successful logout + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_user_access( + user_id=str(currentUser.id), + mandate_id=str(currentUser.mandateId), + action="logout", + success_info=f"revoked_tokens: {revoked}" + ) + except Exception: + # Don't fail if audit logging fails + pass + return JSONResponse({ "message": "Successfully logged out", "revokedTokens": revoked diff --git a/modules/routes/routeSecurityMsft.py b/modules/routes/routeSecurityMsft.py index efde94a3..8c2d8856 100644 --- a/modules/routes/routeSecurityMsft.py +++ b/modules/routes/routeSecurityMsft.py @@ -463,6 +463,20 @@ async def logout( try: appInterface = getInterface(currentUser) appInterface.logout() + + # Log successful logout + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_user_access( + user_id=str(currentUser.id), + mandate_id=str(currentUser.mandateId), + action="logout", + success_info="microsoft_auth_logout" + ) + except Exception: + # Don't fail if audit logging fails + pass + return {"message": "Logged out successfully"} except Exception as e: logger.error(f"Error during logout: {str(e)}") diff --git a/modules/routes/routeVoiceGoogle.py b/modules/routes/routeVoiceGoogle.py index 048e483d..2b68299b 100644 --- a/modules/routes/routeVoiceGoogle.py +++ b/modules/routes/routeVoiceGoogle.py @@ -161,12 +161,12 @@ async def realtime_interpreter( logger.info(f"📊 Audio file size: {len(audio_content)} bytes") # Save audio file for debugging with correct extension - file_extension = "webm" if audio_file.filename.endswith('.webm') else "wav" - debug_filename = f"debug_audio/audio_google_{audio_file.filename.replace('.wav', '.webm')}" - os.makedirs("debug_audio", exist_ok=True) - with open(debug_filename, "wb") as f: - f.write(audio_content) - logger.info(f"💾 Saved audio file for debugging: {debug_filename}") + # file_extension = "webm" if audio_file.filename.endswith('.webm') else "wav" + # debug_filename = f"debug_audio/audio_google_{audio_file.filename.replace('.wav', '.webm')}" + # os.makedirs("debug_audio", exist_ok=True) + # with open(debug_filename, "wb") as f: + # f.write(audio_content) + # logger.info(f"💾 Saved audio file for debugging: {debug_filename}") # Validate audio format connector = get_google_speech_connector() diff --git a/modules/security/auth.py b/modules/security/auth.py index f314b065..4ada086c 100644 --- a/modules/security/auth.py +++ b/modules/security/auth.py @@ -19,7 +19,7 @@ from modules.interfaces.interfaceAppObjects import getRootInterface from modules.interfaces.interfaceAppModel import User, AuthAuthority, Token # Get Config Data -SECRET_KEY = APP_CONFIG.get("APP_JWT_SECRET_SECRET") +SECRET_KEY = APP_CONFIG.get("APP_JWT_KEY_SECRET") ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM") ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY")) REFRESH_TOKEN_EXPIRE_DAYS = int(APP_CONFIG.get("APP_REFRESH_TOKEN_EXPIRY", "7")) diff --git a/modules/shared/auditLogger.py b/modules/shared/auditLogger.py new file mode 100644 index 00000000..dab32fa9 --- /dev/null +++ b/modules/shared/auditLogger.py @@ -0,0 +1,202 @@ +""" +Audit Logging System for PowerOn Gateway + +This module provides centralized audit logging functionality for security events, +user actions, and system access patterns. +""" + +import logging +import os +from datetime import datetime +from typing import Optional, Dict, Any +from logging.handlers import RotatingFileHandler +from modules.shared.configuration import APP_CONFIG + + +class DailyRotatingFileHandler(RotatingFileHandler): + """ + A rotating file handler that automatically switches to a new file when the date changes. + The log file name includes the current date and switches at midnight. + """ + + def __init__(self, log_dir, filename_prefix, max_bytes=10485760, backup_count=5, **kwargs): + self.log_dir = log_dir + self.filename_prefix = filename_prefix + self.current_date = None + self.current_file = None + + # Initialize with today's file + self._update_file_if_needed() + + # Call parent constructor with current file + super().__init__(self.current_file, maxBytes=max_bytes, backupCount=backup_count, **kwargs) + + def _update_file_if_needed(self): + """Update the log file if the date has changed""" + today = datetime.now().strftime("%Y%m%d") + + if self.current_date != today: + self.current_date = today + new_file = os.path.join(self.log_dir, f"{self.filename_prefix}_{today}.log") + + if self.current_file != new_file: + self.current_file = new_file + return True + return False + + def emit(self, record): + """Emit a log record, switching files if date has changed""" + # Check if we need to switch to a new file + if self._update_file_if_needed(): + # Close current file and open new one + if self.stream: + self.stream.close() + self.stream = None + + # Update the baseFilename for the parent class + self.baseFilename = self.current_file + # Reopen the stream + if not self.delay: + self.stream = self._open() + + # Call parent emit method + super().emit(record) + + +class AuditLogger: + """Centralized audit logging system""" + + def __init__(self): + self.logger = None + self._setup_audit_logger() + + def _setup_audit_logger(self): + """Setup the audit logger with daily file rotation""" + try: + # Get log directory from config + logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR", "./") + if not os.path.isabs(logDir): + # If relative path, make it relative to the gateway directory + gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + logDir = os.path.join(gatewayDir, logDir) + + # Ensure log directory exists + os.makedirs(logDir, exist_ok=True) + + # Create audit logger + self.logger = logging.getLogger('audit') + self.logger.setLevel(logging.INFO) + + # Remove any existing handlers to avoid duplicates + for handler in self.logger.handlers[:]: + self.logger.removeHandler(handler) + + # Create daily rotating file handler for audit log + rotationSize = int(APP_CONFIG.get("APP_LOGGING_ROTATION_SIZE", 10485760)) # Default: 10MB + backupCount = int(APP_CONFIG.get("APP_LOGGING_BACKUP_COUNT", 5)) + + fileHandler = DailyRotatingFileHandler( + log_dir=logDir, + filename_prefix="log_audit", + max_bytes=rotationSize, + backup_count=backupCount + ) + + # Create formatter for audit log + auditFormatter = logging.Formatter( + fmt="%(asctime)s | %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" + ) + fileHandler.setFormatter(auditFormatter) + + # Add handler to logger + self.logger.addHandler(fileHandler) + + # Prevent propagation to root logger + self.logger.propagate = False + + except Exception as e: + # Fallback to standard logger if audit setup fails + self.logger = logging.getLogger(__name__) + self.logger.error(f"Failed to setup audit logger: {str(e)}") + + def log_event(self, + user_id: str, + mandate_id: str, + category: str, + action: str, + details: str = "", + timestamp: Optional[datetime] = None) -> None: + """ + Log an audit event + + Args: + user_id: User identifier + mandate_id: Mandate identifier (can be empty if not applicable) + category: Event category (e.g., 'key', 'access', 'data') + action: Specific action (e.g., 'decode', 'login', 'logout') + details: Additional details about the event + timestamp: Optional custom timestamp (defaults to current time) + """ + try: + if not self.logger: + return + + # Use provided timestamp or current time + if timestamp is None: + timestamp = datetime.now() + + # Format the audit log entry + # Format: timestamp | userid | mandateid | category | action | details + audit_entry = f"{user_id} | {mandate_id} | {category} | {action} | {details}" + + # Log the event + self.logger.info(audit_entry) + + except Exception as e: + # Use standard logger as fallback + logging.getLogger(__name__).error(f"Failed to log audit event: {str(e)}") + + def log_key_access(self, user_id: str, mandate_id: str, key_name: str, action: str) -> None: + """Log key access events (decode/encode)""" + self.log_event( + user_id=user_id, + mandate_id=mandate_id, + category="key", + action=action, + details=key_name + ) + + def log_user_access(self, user_id: str, mandate_id: str, action: str, success_info: str = "") -> None: + """Log user access events (login/logout)""" + self.log_event( + user_id=user_id, + mandate_id=mandate_id, + category="access", + action=action, + details=success_info + ) + + def log_data_access(self, user_id: str, mandate_id: str, action: str, details: str = "") -> None: + """Log data access events""" + self.log_event( + user_id=user_id, + mandate_id=mandate_id, + category="data", + action=action, + details=details + ) + + def log_security_event(self, user_id: str, mandate_id: str, action: str, details: str = "") -> None: + """Log security-related events""" + self.log_event( + user_id=user_id, + mandate_id=mandate_id, + category="security", + action=action, + details=details + ) + + +# Global audit logger instance +audit_logger = AuditLogger() diff --git a/modules/shared/configuration.py b/modules/shared/configuration.py index 9415b7f7..e906840d 100644 --- a/modules/shared/configuration.py +++ b/modules/shared/configuration.py @@ -7,8 +7,14 @@ config.ini files and environment variables stored in .env files, using a flat st import os import logging +import json +import base64 +import time from typing import Any, Dict, Optional from pathlib import Path +from cryptography.fernet import Fernet +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC # Set up basic logging for configuration loading logging.basicConfig( @@ -119,20 +125,43 @@ class Configuration: try: with open(envPath, 'r') as f: - for line in f: - line = line.strip() - # Skip empty lines and comments - if not line or line.startswith('#'): - continue + lines = f.readlines() + + i = 0 + while i < len(lines): + line = lines[i].strip() + + # Skip empty lines and comments + if not line or line.startswith('#'): + i += 1 + continue + + # Parse key-value pairs + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Check if value starts with { (JSON object) + if value.startswith('{'): + # Collect all lines until we find the closing } + json_lines = [value] + i += 1 + brace_count = value.count('{') - value.count('}') - # Parse key-value pairs - if '=' in line: - key, value = line.split('=', 1) - key = key.strip() - value = value.strip() + while i < len(lines) and brace_count > 0: + json_lines.append(lines[i].rstrip('\n')) + brace_count += lines[i].count('{') - lines[i].count('}') + i += 1 - # Add directly to data dictionary + # Join all lines and create the full JSON value + full_json_value = '\n'.join(json_lines) + self._data[key] = full_json_value + else: + # Single line value self._data[key] = value + + i += 1 logger.info(f"Loaded environment variables from {envPath.absolute()}") @@ -158,7 +187,7 @@ class Configuration: logger.info("Environment file has changed, reloading...") self._loadEnv() - def get(self, key: str, default: Any = None) -> Any: + def get(self, key: str, default: Any = None, user_id: str = "system") -> Any: """Get configuration value with optional default""" self.checkForUpdates() # Check for file changes @@ -166,10 +195,24 @@ class Configuration: value = self._data[key] # Handle secrets (keys ending with _SECRET) if key.endswith("_SECRET"): - return handleSecret(value) - # Handle JSON secrets (keys ending with _API_KEY that contain JSON) - elif key.endswith("_API_KEY") and value.startswith("{"): - return handleJsonSecret(value) + # Log audit event for secret key access + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_key_access( + user_id=user_id, + mandate_id="system", + key_name=key, + action="decode" + ) + except Exception: + # Don't fail if audit logging fails + pass + + if value.startswith("{") and value.endswith("}"): + # Handle JSON secrets (keys ending with _API_KEY that contain JSON) + return handleSecretJson(value, user_id, key) + else: + return handleSecretText(value, user_id, key) return value return default @@ -177,7 +220,7 @@ class Configuration: """Enable attribute-style access to configuration""" self.checkForUpdates() # Check for file changes - value = self.get(name) + value = self.get(name, user_id="system") if value is None: raise AttributeError(f"Configuration key '{name}' not found") return value @@ -191,42 +234,306 @@ class Configuration: """Set a configuration value (for testing/overrides)""" self._data[key] = value -def handleSecret(value: str) -> str: +def handleSecretText(value: str, user_id: str = "system", key_name: str = "unknown") -> str: """ - Handle secret values. Currently just returns the plain text value, - but can be enhanced to provide actual decryption in the future. + Handle secret values with encryption/decryption support. Args: - value: The secret value to handle + value: The secret value to handle (may be encrypted) + user_id: The user ID making the request (default: "system") + key_name: The name of the key being decrypted (default: "unknown") Returns: - str: Processed secret value + str: Processed secret value (decrypted if encrypted) """ - # For now, just return the value as-is - # In the future, this could be enhanced to decrypt values + if _is_encrypted_value(value): + return decrypt_value(value, user_id, key_name) return value -def handleJsonSecret(value: str) -> str: +def handleSecretJson(value: str, user_id: str = "system", key_name: str = "unknown") -> str: """ - Handle JSON secret values (like Google service account keys). - Validates that the value is valid JSON. + Handle JSON secret values (like Google service account keys) with encryption/decryption support. + Validates that the value is valid JSON after decryption. Args: - value: The JSON secret value to handle + value: The JSON secret value to handle (may be encrypted) + user_id: The user ID making the request (default: "system") + key_name: The name of the key being decrypted (default: "unknown") Returns: - str: Processed JSON secret value + str: Processed JSON secret value (decrypted if encrypted) Raises: - ValueError: If the value is not valid JSON + ValueError: If the value is not valid JSON after decryption """ - import json + # Decrypt if encrypted + if _is_encrypted_value(value): + decrypted_value = decrypt_value(value, user_id, key_name) + else: + decrypted_value = value + try: # Validate that it's valid JSON - json.loads(value) - return value + json.loads(decrypted_value) + return decrypted_value except json.JSONDecodeError as e: raise ValueError(f"Invalid JSON in secret value: {e}") +# Global rate limiting tracking +# Structure: {user_id: {key_name: [timestamps]}} +_decryption_attempts = {} + +def _get_master_key() -> bytes: + """ + Get the master key for the current environment. + + Returns: + bytes: The master key for encryption/decryption + + Raises: + ValueError: If no master key is found + """ + # Get the key location from config + key_location = APP_CONFIG.get('APP_KEY_SYSVAR') + env_type = APP_CONFIG.get('APP_ENV_TYPE', 'dev') + + if not key_location: + raise ValueError("APP_KEY_SYSVAR not configured") + + # First try to get from environment variable + master_key = os.environ.get(key_location) + + if master_key: + # If found in environment, use it directly + return master_key.encode('utf-8') + + # If not in environment, try to read from file + if os.path.exists(key_location): + try: + with open(key_location, 'r') as f: + content = f.read().strip() + + # Parse the key file format: env = key + lines = content.split('\n') + for line in lines: + line = line.strip() + if not line or line.startswith('#'): + continue + + if '=' in line: + key_env, key_value = line.split('=', 1) + key_env = key_env.strip() + key_value = key_value.strip() + + if key_env == env_type: + return key_value.encode('utf-8') + + raise ValueError(f"No key found for environment '{env_type}' in {key_location}") + + except Exception as e: + raise ValueError(f"Error reading key file {key_location}: {e}") + + raise ValueError(f"Master key not found. Checked environment variable '{key_location}' and file path") + +def _derive_encryption_key(master_key: bytes) -> bytes: + """ + Derive a 32-byte encryption key from the master key using PBKDF2. + + Args: + master_key: The master key bytes + + Returns: + bytes: 32-byte derived key suitable for Fernet + """ + # Use a fixed salt for consistency (in production, consider using a random salt stored separately) + salt = b'poweron_config_salt_2025' + + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=100000, + ) + + return base64.urlsafe_b64encode(kdf.derive(master_key)) + +def _is_encrypted_value(value: str) -> bool: + """ + Check if a value is encrypted (starts with environment-specific prefix). + + Args: + value: The value to check + + Returns: + bool: True if encrypted, False otherwise + """ + if not value or not isinstance(value, str): + return False + + # Check for environment-specific encryption prefixes + env_type = APP_CONFIG.get('APP_ENV_TYPE', 'dev').upper() + expected_prefix = f"{env_type}_ENC:" + return value.startswith(expected_prefix) + +def _get_encryption_prefix(env_type: str) -> str: + """ + Get the encryption prefix for the given environment type. + + Args: + env_type: The environment type (dev, int, prod, etc.) + + Returns: + str: The encryption prefix + """ + return f"{env_type.upper()}_ENC:" + +def _check_decryption_rate_limit(user_id: str, key_name: str, max_per_second: int = 10) -> bool: + """ + Check if decryption is allowed based on rate limiting (max 10 per second per user per key). + + Args: + user_id: The user ID making the request + key_name: The name of the key being decrypted + max_per_second: Maximum decryptions per second (default: 10) + + Returns: + bool: True if allowed, False if rate limited + """ + current_time = time.time() + + # Initialize tracking for this user if not exists + if user_id not in _decryption_attempts: + _decryption_attempts[user_id] = {} + + # Initialize tracking for this key if not exists + if key_name not in _decryption_attempts[user_id]: + _decryption_attempts[user_id][key_name] = [] + + # Clean old attempts (older than 1 second) + _decryption_attempts[user_id][key_name] = [ + timestamp for timestamp in _decryption_attempts[user_id][key_name] + if current_time - timestamp < 1.0 + ] + + # Check if we're within rate limit + if len(_decryption_attempts[user_id][key_name]) >= max_per_second: + logger.warning(f"Decryption rate limit exceeded for user '{user_id}' key '{key_name}' ({max_per_second}/sec)") + return False + + # Record this attempt + _decryption_attempts[user_id][key_name].append(current_time) + return True + +def encrypt_value(value: str, env_type: str = None, user_id: str = "system", key_name: str = "unknown") -> str: + """ + Encrypt a value using the master key for the specified environment. + + Args: + value: The plain text value to encrypt + env_type: The environment type (dev, int, prod). If None, uses current environment. + user_id: The user ID making the request (default: "system") + key_name: The name of the key being encrypted (default: "unknown") + + Returns: + str: The encrypted value with prefix + + Raises: + ValueError: If encryption fails + """ + if env_type is None: + env_type = APP_CONFIG.get('APP_ENV_TYPE', 'dev') + + try: + master_key = _get_master_key() + derived_key = _derive_encryption_key(master_key) + fernet = Fernet(derived_key) + + # Encrypt the value + encrypted_bytes = fernet.encrypt(value.encode('utf-8')) + encrypted_b64 = base64.urlsafe_b64encode(encrypted_bytes).decode('utf-8') + + # Add environment prefix + prefix = _get_encryption_prefix(env_type) + encrypted_value = f"{prefix}{encrypted_b64}" + + # Log audit event for encryption + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_key_access( + user_id=user_id, + mandate_id="system", + key_name=key_name, + action="encrypt" + ) + except Exception: + # Don't fail if audit logging fails + pass + + return encrypted_value + + except Exception as e: + raise ValueError(f"Encryption failed: {e}") + +def decrypt_value(encrypted_value: str, user_id: str = "system", key_name: str = "unknown") -> str: + """ + Decrypt a value using the master key for the current environment. + + Args: + encrypted_value: The encrypted value with prefix + user_id: The user ID making the request (default: "system") + key_name: The name of the key being decrypted (default: "unknown") + + Returns: + str: The decrypted plain text value + + Raises: + ValueError: If decryption fails + """ + if not _is_encrypted_value(encrypted_value): + return encrypted_value # Return as-is if not encrypted + + # Check rate limiting (10 per second per user per key) + if not _check_decryption_rate_limit(user_id, key_name, max_per_second=10): + raise ValueError(f"Decryption rate limit exceeded for user '{user_id}' key '{key_name}' (10/sec)") + + try: + # Extract the encrypted part (remove prefix) + env_type = APP_CONFIG.get('APP_ENV_TYPE', 'dev') + env_type_upper = env_type.upper() + expected_prefix = f"{env_type_upper}_ENC:" + + if not encrypted_value.startswith(expected_prefix): + raise ValueError(f"Invalid encryption prefix. Expected {expected_prefix}") + + encrypted_part = encrypted_value[len(expected_prefix):] + + # Get master key and derive encryption key + master_key = _get_master_key() + derived_key = _derive_encryption_key(master_key) + fernet = Fernet(derived_key) + + # Decode and decrypt + encrypted_bytes = base64.urlsafe_b64decode(encrypted_part.encode('utf-8')) + decrypted_bytes = fernet.decrypt(encrypted_bytes) + decrypted_value = decrypted_bytes.decode('utf-8') + + # Log audit event for decryption + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_key_access( + user_id=user_id, + mandate_id="system", + key_name=key_name, + action="decrypt" + ) + except Exception: + # Don't fail if audit logging fails + pass + + return decrypted_value + + except Exception as e: + raise ValueError(f"Decryption failed: {e}") + # Create the global APP_CONFIG instance APP_CONFIG = Configuration() \ No newline at end of file diff --git a/notes/changelog.txt b/notes/changelog.txt deleted file mode 100644 index e10e683a..00000000 --- a/notes/changelog.txt +++ /dev/null @@ -1,1206 +0,0 @@ - -TODO - -# System -- database -- db initialization as separate function to create root mandate, then sysadmin with hashed passwords --> using the connector according to env configuration -- settings: UI page for: db new (delete if exists and init), then to add mandate root and sysadmin, log download --> in the api to add connector settings with the according endpoints -- access model as matrix, not as code --> to have view, add, update, delete with the rights on level table and attribute for all, my (created by me), my mandate (mandate I am in), none (no access) -- document handling centralized -- ai handling centralized -- neutralizer to activate AND put back placeholders to the returned data - -# Tests -- workflow continue after stop. to run normally -- add a prompt --> then shall be visible in the workflow to select -- msft connection bei 2 verschiedene users -- chat 3x ausführen mit verschiedenen mailempfängern, test ob round greift -- manual task retry - triggered - -- check method outlook: alles -- check method sharepoint: alles -- check method webcrawler: alles -- check method google: alles -- check zusammenfassung von 10 dokumenten >10 MB -- test case bewerbung - -# Ida changes gateway: -- Polling endpoint + doku dazu -- files in documents integriert --> document endpoint for files -- prompts in chat endpoint -- - -# DOCUMENTATION -Design principles -- UI: Module classes for data management (CRUD tables & forms --> formGeneric) -- Basic: All timestamps to be timezone aware fehlerabfangroutinen -- Backend: All external components to attach over connectorXxx --> interfaceXxx --> our codebase -- all model definitions in interfaceXxxModel -- action functions for ai: why to use documentList and not just document as input parameter? --> to have full flexibility to pass either list of documents, or documentList - -******************** - - -INIT - -conda activate poweron -cd gateway -pip install -r requirements.txt -python app.py - - - - - ------------------------ OPEN - - - -Tools to transfer incl funds: -- Google SERPAPI (shelly) -- Anthropic Claude (valueon + shelly) -- Cursor Pro -- Mermaid -- Github Pro - - - - ------------------------ DONE - - -FRONTEND -- the application initiation gets userdata with the token over apiCall.js:/api/local/me --> object: - username - fullName - email - language - list of connections with attributes: - id - authority - externalUsername - - -Backend - -in the backend to handle the routes as follows: -- routeSecurityLocal.py to handle all local endpoints, to include token generation from local authority in auth.py -- routeSecurityMsft.py and routeSecurityGoogle.py to handle all their endpoints -- all routeSecurity*.py to use the same interface to manage tokens and userdata: serviceUserClass.py. This class to have following - -logic: - - all tokens are stored in one tabel, where each token has the attribute of the according authenticationAuthority - - login and logout endoints for "local" use a function "getUseridFromToken" to identify the user context. If user does not exist, error message - - login and logout endoints for "msft" and "google" use a function "getUseridFromToken" to identify the user context. If user does not exist for login, to register a new "local" user with the external user data and to attach the external connection. within the identified user context and the connection in its list to send back user context as tokenLocal and connection as tokenExt - - the important thing is, that login endpoint serves for two different actions: - a) without user context (no tokenLocal), it makes login for a user by external authority and sets user context - b) with user context (a tokenLocal provided), it does NOT set a nwe user context, but manipulate a connection in the connection list of a local user - - illustrative example of token data to send to UI (attributes): - connect and - { - "token_type": "Bearer", - "expires_in": , - "access_token": , - "id_token": , - "client_info": , - "user_info": { - "name": "Patrick Motsch", - "email": "p.motsch@valueon.ch", - "id": "xxx" - }, - "mandateId": "", - "userId": "", - "id": "tokenid", - } - - - - - - -We have to correct the following wrong user access management. - -Issue is: when user logs in with "local" managed account and then logs in to msft account with "msft" authority, the userid is switched to the microsoft instance in the workflow. this must not happen. -Objective: The correct logic is, that a user logs in with an account (managed by "local" or other authority). Once logged in, his login does not change, also if he connects to microsoft account afterwards. - -Problem: We have a mix between user-login (creating currentUser profile) and user-connections (attaching user to a service, like "msft" - and future other services in parallel). - -Concept: We need to separate user-login and user-connections: - 1. the ui login and register modules produce a user-login, resulting in a currentUser profile in the backend to be used for workflow and other activities. the user gets a token (from "local" or "msft" or furthers). this token has to be checked when user logs in. ALWAYS a check is required by the according registration authority. - those use cases: - - if user registers with a "local" profile, a new user is created, a local token is produced - - if user logs in with a "local" profile for an existing user, a local token is produced - - if user logs in with a "local" profile for a non-existing user, login is denied (no user) - - if user logs in with a "msft" profile (or other foreign profile) for an existing user, a local token AND a token in "msft" database (or other foreign system) is produced - - if user logs in with a "msft" profile (or other foreign profile) for a non-existing user, a local profile is generated based on information from foreign account, then a local token AND a token in "msft" database (or other foreign system) is produced - - 2. the ui navigation buttons for "Login MSFT" or future other buttons to connect to services (like e.g. google account or github account or microsoft "msft" account, etc.) does NOT generate a user-login, only a user-connection to a service. - -soloution: -So there must be a mechanism, which manages user-login and user-connection. Following proposition: User has a user profile to login and a list of profiles for user-connections. -Examples: -- user registers with "local" profile --> he gets local profile with 0 user-connections -- user registers with "msft" profile --> he gets local profile with 1 user-connections to "msft". Then he connects to another "msft" profile. Now he gets local profile 2 user-connections "msft" -- user registers with "google" profile (future) --> he gets local profile and 1 user-connections to "google". Then he connects to another "msft" profile. Now he gets local profile and 1 user-connections "msft" and 1 user-connection "google". - -can you tell me, how you would implement this adapted model into the pydantic model and into the code modules in a structured and maintainable way? - - - -i want to refactor the user management in the backend through the user journey. currrently we have two problems: we always pass _userid and _mandate or id with _mandate from function to function, which blocks scaling. this is too complicated and non-logic. - -to adapt the following: - -1. The attributes _mandateid and _userid to be removed from @connectorDbJson.py. the attribute _userid to rename to "userId". this is the id of the user, who creates the record. This is the passed attribute instead of _userid and _mandate id., which is stored as userId. The default value to be "" (if None, then set to ""). All new created records get an additional "_createdBy" and "modifiedBy" attribute =self.userId. A modified record gets adapted "modifiedBy" attribute = "userId" when modified. - -2.@gatewayModel.py to adapt class User: add mandateId. This is set to the same mandateId like the mandateId of the user, who creates the user. - -3. @lucydomModel.py to adapt classes Prompt, FileItem, ChatWorkflow: add mandateId. This is set to the same mandateId like the mandateId of the user, who creates the user. -Also to add "workflowId" to ChatStat, it is missing there. - -4. @gatewayInterface.py and @lucydomInterface.py to adapt according to the changes of point 1, 2, 3. Also to integrate their according "*Model.py" to use for record creation with correct attributes. - -Also to separate class initiation and function call getInterface(). - -Class initiation without parameter userid and mandateid. Initialize database and records. Like this it is ensured, when the first function call happens to the class, it is initiated correctly. Initiate the module class automaitcally when module loading. - -function getInterface(currentUser with default value = None) makes this: -- if currentUser is None, then only database is initialized (e.g. for refresh folders and files) and an empty object given back with logger info for databse refresh -- if currentUser is provided, then uses the id of the user for contextkey, creates ne instance of the class, gives self.user=currentUser to the class to have user context, initializes AI service self.aiService=ChatService(), initializes access control: self.access = LucydomAccess(self.currentUser, self.db) -- now to adapt code in the *Interface.py modules to use currentUser attributes. like this we have a proper object usage -- modules.interfaces.*Interface to import as module and not the functions. This ensure, that module is initiated when imported. - -5. @auth.py : getRootInterface to call getInterface(rootUser), where rootUser is the user with initialId indatabase (use function for this) - - - - - -FRONTEND: -- login page and register page withoug fallback. they have mandatory to load their login.html or register.html pages to work (not html in the code). - - - -I want formCeneric module to use api calls over apiCalls.js module, not directly. So please adapt formCeneric parameter "apiEndpoint" with the respective api-functions as objects, handed over by the modules: -- apiEndpoint.get --> the api to get data -- apiEndpoint.update --> the api to update data -- apiEndpoint.delete --> the api to delete data -then to use those api-functions in the module formGeneric instead of direct api calls -the modules mandates, users, files, prompts, to adapt accordingly - - - - - -- all api calls from workflowUI.js and workflowData.js also to transfer to apiCall.js. There to integrate ALL route endpoints from all routes and to call over window.utils.api..... -- handleFileUplad and uploadfile is on nmany places. To have the api functionality only in apiCall.js. - - - - -please refactor those topics. - -- all api calls from workflowUI.js and workflowData.js also to transfer to apiCall.js. There to integrate ALL route endpoints from all routes and to call over window.utils.api..... - -- Functions to handleFileUplad and uploadfile are on many places. To have the api functionality only in apiCall.js. - -no api relevant code in other modules than apiCall.js. - -In apiCalls.js to remove the generic functions get, post, put, delete from the public set. those not to expose. only the specific endpoints from the routes to expose. - -If more than 3 changes in a module, give me the full module. otherwise tell me the parts to change. - - - - -Please enhance this: -- config & env variables integration to have config variables in the globalState set in category "config" integrated. - -cleanup utils.js: - - remove all elements in the context of workflow and messages. those elements have to be integrated within workflow... modules. Some functions within utils.js anyway are not used anymore, so to remove anyway. - - extract all api-call functions to a separate submodule "apiCalls.js" module. There to implement one interface function for each api-call. all calls to put into one object "api" to be accessed. - - at the end utils.js shall only include config & environment data management, show general toast and error, uiUtils, dataUtils - - in workflow... modules there are some redundant functions like in utils.js (e.g. showToast, showError, etc.). Those to remove in workflow and to get from utils.js - - utils data shall be accesssible within those categories: - - window.utils.api --> the functions from apiCalls.js - - window.utils.ui --> what is in uiUtils currently, plus showError, showToast and similair - - window.utils.data --> what is in dataUtils currently plus handleFileUpload - -adapt other modules accordingly. for workflowUi.js only give me the parts to adapt. If only 1-3 adaptions for module, just give me the changes. Otherwise the revised module. - - - - - -please adapt module workflowUi.js with this input (the other modules have already been adapted): -- Error handling for file parsing failures to add -- Clear indication of workflow completion status -- The message object structure to fully match the documented model -- Status field handling to be exaclty and only the implementation according documentation (adjustment to recognize "first", "step", "last") -- File preview to better handle the documented document structure -- File actions to use the correct API paths -- log progress indicator implementation to improve, e.g. the feature to collapse/expand details -- Agent-specific log formatting to fully match the documented model -Updates Required: -- Update message rendering to handle status field correctly -- Improve file preview to handle documented document structure -- Update API paths for file operations -- Add better indication of workflow completion status -- Improve log progress indicator implementation - -also remove unused functionality and objects. - - - -Can you adapt following two modules. the modules workflowCoordination.ja and workflowData.js have already been updated. -please remove unused functionality and objects. - -please adapt module workflow.js with this input: -Updates Required: -- Implement explicit state machine transitions -- Update API interaction to match documented endpoints -- Improve error handling to match documented failure states -- Align status handling with the documented state transitions -- Ensure proper handling of the "last" message status - -please adapt module workflow.js with this input: -- adapt: Explicit handling of the workflow status transitions per state machine, clean separation of workflow states according to the documentation -- The workflow state management to align with the documented state machine -- Status transition handling to be more explicit -- Verify API paths and request structures -- Response handling to match the documented workflow object -Updates Required: -- Implement explicit state machine transitions -- Update API interaction to match documented endpoints -- Improve error handling to match documented failure states -- Align status handling with the documented state transitions -- Ensure proper handling of the "last" message status - - - -please adapt module workflowCoordination.js with this input: -- the workflow state object structure to be updated to match documentation -- Status transitions to follow the documented state machine -- message Status Handling to properly handle message status ("first", "step", "last") -Updates Required: -- Update workflowState object to match documented model -- Implement proper status transitions (null → running → completed/failed/stopped) -- Ensure message status field handling ("first", "step", "last") -- Ensure correct polling mechanism with log/message IDs -- Add missing getWorkflowStatus() function -- Fix the updateWorkflowStatus() function to handle all status transitions - - - - -please adapt module workflowData.js with this input: -- estimateJsonSize not to be in frontend. data stats is delivered in workflow object with attribute "tokensUsed" -- pollWorkflowStatus to implement -- adapt object Model Discrepancies: workflow object structure to match the state machine docs, File object structure to follow the documented model -- API Endpoint paths to correct to be: /api/workflows/${workflowId}/logs?id=${workflowState.lastPolledLogId}; Same issue to adapt for messages endpoint -- Data Handling: lastPolledLogId and lastPolledMessageId tracking variable paths to ensure corretly -- uploadAndAddFile: no change, this happens in the backend -- submitUserInput() and createWorkflow() to align response handling with the documented workflow object -Updates Required: -- Implement pollWorkflowStatus() function -- Define estimateJsonSize() function -- Fix API endpoint paths to match documentation (?logId= → ?id=) -- Update object models to match documentation -- Improve error handling according to the state machine -- Fix file handling to match documented file object model - - - -Can you please refactor the workflow_utils.js. The other documents we have. - -Attached: Frontend State machine Documentation as ruleset for the refactory, the current frontend - -To organize workflow... modules like this: -1. Centralized State Management: Use a single state object that all modules reference. -2. Event-Based Updates: Use a simple event system to trigger UI updates when state changes. -3. Clear Separation of Concerns: - * Model: Manages workflow state and API communication - * View: Purely responsible for rendering the UI based on state - * Controller: Connects user actions to model updates - -Comments: -- all variables and objects and functions and classes to name in camelCase, not in snake_case -- Adapted routes to implement -- I do not need backwards compatibility -- please remove all unnecessary elements and provide smart, well structured code, which is maintainable - -New File names: -- workflow.js - The main module as manager and coordinator -- workflow_state.js - Centralized state management -- workflow_api.js - API communication layer -- workflow_ui.js - UI rendering layer - - - - - -Can you please refactor the backend with those inputs: - -Attached: Backend State machine Documentation as ruleset for the refactory - -Comments: -- all variables and objects and functions and classes to name in camelCase, not in snake_case -- Adapted routes to implement -- I do not need backwards compatibility -- please remove all unnecessary elements and provide smart, well structured code, which is maintainable - -If you need further documents, please tell me. - - - - - -I like your proposition. So do the refactory according to your proposition to clean and structure with these documents: -- workflow_presentation.js -- workflow_presentation_core.js -- workflow_presentation_components.js (here to group the functions accordingly for log, chat, files, ui) -- workflow_presentation_utilities.js - -Can you also split the css files to: -- styles_workflow.css --> here only to keep the basic formatting for the layout -- styles_workflow_log.css -- styles_workflow_chat.css -- styles_workflow_files.css -- styles_workflow_ui.css - - - - -I like to refactor frontend to match updated backend. - -Please this to do: - -- General: Adapt to backend changes and simplify polling and frontend objects status, remove unnecessary elements. -- The Workflow object has only one attribute for status of workflow and for polling to know, if polling shall be active orn not. this is "status" with value "completed" or "running". All other status objects for workflow to remove. -- polling start/finish and frontend elements status have only to look for "status" value of workflow. especially all the routines for button "stop", "send", animations only rely on this status. -- based on this create one centralized function, which gets workflow status and all other status changes in the front end. based on this this function manages ui adaptions. so we have a mainatenable place to control and debug all status changes. -- for log entries to show in the console: always check last log-entry for progress update. logging is done, that also progress information is passed. is this clear for you? - -what other simplifications or consolidations do you see to improve code for clear debugging and maintainability? - -please first to give review plan, before doing code. - - - - -can you do following adaptions for the workflow management for the frontend: -- german comments in logs and prompts to translate to english. where to adapt what? -- ai calls to adapt for user language if necessary (additional parameter in the lucydom ai call) - -- can you check all self.log_add(...) statements and rearrange them for the revised function call. They are for the progress of a workflow to show in the front-end. I want all messages to be in a standardizes format and organized along the workflow, that user understands the logical progress. Not too much information, but the relevant steps to show. Within loops to tell progress in percent by having a log_add in the loops (so to add progress attribute to the function call) - -please deliver adapted modules when more than 3 parts have to be adapted, otherwise the parts to adapt. - - -can you do following adaptions - -for document class: -- class Document to have a "data" attribute, where the file-data is stored in base64 format - -based on this: -- task object for agents to enhance with this attribute - -for content in contents in documents, when adding a file to a document object: -- to set "base64_encoded" if encoded. this should already be, to check - -when building task for the agents: -- ensure attribute "data" is integrated, containing filedata base64 encoded -- in each content to deliver "data" as it is, optional "base64_encoded" attribute depending on data format, to add attribute "data_extracted" and to store here the extracted data from ai call - -everywhere: -- to remove base64 checks ot tests. only to use base64_encoded attribute -- to use the enhanced attributes for document ("data" containing filedata in base64 format) and content ("data", "base64_encoded", "data_extracted") - -please tell me, where to adapt what in the code. I do not neew fully new code. - - - - -please revise all chat_agents* modules: -- all comments, logs and outputs in english language -- all ai answers in the language of the user -- no language specific features like analysis of words. a prompt in japanese would not work with this! i need it generically. -- why are there still data extraction routines in the modules? - data is already delivered in the input_documents section. - -documentation agent: -- why to try to find out document type, when in the "label" of the files to deliver the extension is ALWAYS indludes (e.g. .docx, .csv, etc.). Please revise, this can be very much shortened and simplified - -webcrawler_agent: -- there is a try - except mapping problem in the code. please also fix this -- - -also attached chat.py and chat_content_extraction (centralized), that you can see the scrutcure of passed parameters. - - - - - -alle expliziten prompt ersetzen. -kannst du mir zusammenstellen, wo es überall in chat.py explizite texte an den user in den messages drin hat? - stell dir vor, es arbeitet ein japaner, der würde es nicht verstehen. die referenzen der code-elemente reicht. - - - - -die agents registry bereinigen inkl agents - -die file upload & dragdrop bereinigen, dass einfach file in db geschrieben wird mit file im file-object - -funktion für integration von file in message, als basis db-file-id oder document-part-from-agent; damit alle attribute füllen inkl zusammenfassung pro content --> pro extractor-typ ein file - -Workflow: -- NO-FILES for the workflow! -- All documents in message objects -- Uploads only to store in document object with file inline and parsed into content[] - - - - - -kannst du bitte den Code Vorschlag von Dir als class "ChatManager" ins modul "chat.py" umbauen und mir diese class liefern. hier zusätzliche infos und dokumente. - -für die implementierung der funktionen bitte die beiliegenden module als grundlage verwenden, aber allen code neu erstellen. denn die heutigen codes sind viel zu lange haben zuviele details auf allen levels drin. die implementierung der funktionen soll ebenfalls high-level sein, indem alle detail-ausführungen in grundlagen-funktionen ausgelagert werden. - -folgende anhänge dazu: -- lucydom_model und lucydom_interface : datenmodell und interface zum datenmodell (wir arbeiten nur mir dem workflow object) -- workflow.py: die routerdatei, welche die funktionen von lucydom_interface über den gateway nutzt -- agentservice_registry (old): registry der agenten, diese bitte neu und kompakt erstellen als "chat_registry.py" -- agentservice_base (old): template für agents definitionen. - -kannst du bitte mit dem datenmodell (es wurde angepasst) folgendes tun: - -1. lcuydom_interface.py überarbeiten, damit es mit dem angepassten datenmodell wieder korrekt funktioniert. - -2. workflow.py überarbeiten, sodass die immer wieder gleichen funktionen der routes in hilfsfunktionen ausgelagert werden und alle routinen umschreiben, dass sie nicht agentservice_workflow_manager.py" aufrufen, sondern "chat.py". in der router funktion "workflow.py" keine implementierungen, sondern diese in die chat.py funktion übergeben. Die route "submit_user_input" umschreiben, dass workflow_id auch leer sein kann. direkt die funktion "workflow_integrate_userinput" aufrufen. - - -3. die funktionen implementieren mit diesen hinweisen: - -workflow_integrate_userinput: - - den parameter workflow umbenennen in optional workflow_id. dieser kann initial None sein, wenn ein neuer workflow startet. daher zuerst die zu implementierende funktion workflow_init(workflow_id) aufrufen, welche das workflow object zurückgibt. - - generell werden 2 kommunikationen geführt: - - a) "log_add" (umbenennen von "send_message_to_user") sendet einen log-eintrag, implementiert in mit der implementierung in "lucydom_interface.create_workflow_log" und gleichzeitig einen "Info" Eintrag im logger erstellen - - b) "message_add" speichert eine message im workflow objekt. Implementierung über lucydom_interface - - Vor Step 1. die message_user im workflow als neue message speichern - - Anstatt "# Send initial response" die "user_response" als message object im workflow speichern und auch gleich den obj_answer und obj_workplan in den logger schreiben mittels einer hilfsfunktion "json2text(), welche das json-Objekt als Strukturobjekt lesbar schreibgeschützt - - send_message_to_user(step_info), dies als log_add schreiben - - format_final_response umbenennen in format_final_message und damit das finale message objekt mit den documents erstellen, dieses dann mit messagE_add dem workflow zufügen -- update_workflow(...) nicht mehr nötig, dafür workflow_finish - - -prompt_project_manager: - - mach nur einen typ "doc_type" und gib dafür eine abschliessende liste von optionen an, welche aus der funktion get_available_document_types() kommen - - der obj_workplan soll pro listenelement doc_input und doc_output ein Dict haben mit den Elementen "label","doc_type". auch hier die abschliessende liste der möglichen werte angeben, welche aus der funktion get_available_document_types() kommt. - - -workflow_init: - - wenn die workflow_id leer ist oder nicht existiert, wird ein neuer workflow erzeugt, andernfalls wird der bestehende workflow geladen - - die statuswerte werden gesetzt: status="running", started_at, last_activity=strated_at - -workflow_finish: - - die statuswerte werden gesetzt: status="stopped", last_activity - -message_add: -- die message dem workflow ergänzen -- die statuswerte werden gesetzt: last_activity, last_message_id - -get_available_agents: -- die function aus der agents_registry aufrufen - -get_available_document_types: -- liste dieser doc-types ausgeben: text, csv, png, html - -summarize_workflow(workflow,prompt): -- in der chronologie der messages von aktuell zu historisch pro message mit der funktion summarize_message(prompt) die zusammenfassung holen. Die zusammenfasusng ausgeben mit agent-name, generierte zusammenfassung, liste der dokumente mit jeweils ihrer zusammenfassung - -summarize_message(prompt): -- mit ai call die zusammenfassung der message mit dem prompt generieren. Die zusammenfasusng ausgeben mit agent-name, generierte zusammenfassung des contents, liste der dokumente mit jeweils ihrer zusammenfassung - -summarize_user_documents: -- pro document mit dem angegebenen prompt den content zusammenfassen und die liste ausgeben mit [document.content: text] - -call_agent: braucht es nicht, ai calls können direkt über den connector erfolgen, welcher initial eingebunden wird: "from connectors.connector_aichat_openai import ChatService" - - - - - -Kannst Du mir die python funktion erstellen, um nachfolgendes zu tun. Ich möchte eine kompakte Funktion, welche keine Details enthält, ausser den Prompt-Teil bis und mit Antwort an den user. Alle nötigen Datenkonversionen und Details bitte in Hilfs-funktionen auslagern. Diese müssen nicht implementiert sein, sondern nur deren input und output definieren. - -# Kontext - -Der User liefert im AI Chat eine Anfrage in einem Message Objekt. Dieses beinhaltet seinen Prompt und eine Liste der mitgelieferten Dokumente mit ihnen contents im "message" objekt. Ebenfalls verfügbar ist der bisherige Chatverlauf im objekt "workflow". - -Wir befinden uns in der python funktion "workflow_integrate_userinput", wo der User prompt ankommt, also diese 2 parameter: "message_user" und "workflow". - -Es steht eine Liste von agents zur Verfügung. Das agents in der Art: -- Loop: Er führt repetitive Aufgaben aus. Er benötigt eine Liste von Dokumenten und einen Prompt zur Anwendung auf jedes Dokument, er liefert eine liste von "content" -. Coder: Er führt Pyton Code aus. Benötigt als Input einen Prompt, content und die spezifikation des resultatformates. -(weitere...) - -# Auftrag - -Kannst Du mir bitte den Prompt für den Projektleiter zusammenstellen, welcher dem User die Antwort liefert. - -Dies soll er tun: - -1. Eine Liste von Resultaten, welche der User für seine Antwort benötigt, als json-Objekt "obj_answer" liefern. Die Antworten des Projektleiters sollen strikt in einem vorgegebenen json-format geliefert werden. - -2. Antwort des Vorgehens an den Benutzer mit den Resultat-Dokumenten als Liste senden - -3. Falls für die Antwort oder die Resultate Inputs von Agenten (diese sind gemäss "obj_agents" mit ihren eigenschaften definiert) benötigt werden, diese als json Liste (ich nenne sie "obj_workplan") angeben, welcher agent welches resultat liefern soll - -Dann soll der Code dies machen: - -4. die agenten gemäss obj_workplan ausführen lassen und den user über jeden schritt informieren. die gelieferten dokumente als liste sammeln "obj_results". Jeden Agenten mit den Datenobjekten gemäss seiner Datenstruktur bedienen. - -Dann anhand der gelieferten Dokumente die finale Antwort an den Benutzer senden. Dokumente vom Typ "text" direkt in die Antwort an den Benutzer integrieren. Die Dokumente referenzieren. - -Dann im Code: - -5. Dem benutzer die antwort mit den dokumenten senden - - -Jedes Dokument soll anhand des Labels eindeutig identifizierbar sein. Du hast alle Dokument-conteot-labels im workflow objekt. - -Diese Objektinformationen dazu: - -- datenmodell für workflow inklusive message: - - - workflow - - messages: list of message - - - message - - agent (who created message) - - input (the input prompt) - - content (text) - - documents: list of document - - - document - - source - - contents: list of content - - - content - - label - - format: formatType - - data: the data of the content in the format according to formatType - - - formatType: [text, csv, jpg, gif, png] - - -- obj_answer: json-Liste mit diesen Attributen: - - label: document label (unique name in the documents list) - - doc_type_src: document type des zu liefernden dokumentes: [text, csv, png, html] - - doc_type_final: document type des dokumentes an den Benutzer: [text, csv, jpg, gif, png, pdf, html, docx, xlsx] - - summary: summary of required document content - -- obj_workplan: json-Liste mit diesen Attributen: - - agent: agent identifier based on the given agent list with the skills of the agents - - doc_output: List of label,doc_type_src (documents to deliver) - - prompt: Prompt to use for answer delivery and document-content-extraction - - doc_input: List of label,doc_type_src (documents to read with prompt) - -- obj_agents: Pro Agent sind diese Informationen verfügbar: - - name: Sein Name, um die entsprechende Funktion aufzurufen - - skills: Was dieser Agent macht - - input: datenformat, in welchem der agent die informationen benötigt - -- obj_result: List of documents with label, format, data - -Es soll durchgängig mit dem content objekt gearbeitet werden, wenn content übergeben wird. - - - -backend: all object actions in interfaces generic for the objects in models for CRU-methods - - -We have here an ai agents workflow. - -a big problem is document extraction. i uploaded a pdf file with a picture inside. in the database i see, that the document has 1 contents, "text" with a endline, marked as "is_extracted=True". it is missing the picture inside the pdf. - -I would like to have the following implementation for files in a workflow: - -How do documents arrive in the workflow: -a) user input with upload or drag&drop: the file shall be stored in the database (files) and its content stored in the workflow message as documents item with reference to the file_id in the database. all contents of the file will be stored as content items in the document item of the message object. according to the content type whey will be extracted as text or as base64 string (e.g. images). the document id will be a uuid and the document-source id the integer from the object in the database "files" -b) produces documents delivered by the agents: exactly the same like a) - -the content provided to an agent will now be a document consisting of the content of all previous messages including the extracted content of the documents within the messages. the extracted content of the documents is produced for each content of the document: -- for text: An ai call with the extraction prompt delivers the text to be integrated -- for an image (it is available as base64 content) an ai call with the extraction prompt delivers the text to be integrated - -Like this we have not anymore the problem, that file content is not found by the agents. - -For code implementation I see a big opportunity to massively reduce code. To build basic methods to be used everywhere: -1. function "document_store_upload(message_id,fileName,filepath...) --> function to store an uploaded or drag&drop document from the user and return the document object. This function does the steps for a) respectively b) like described above and identified the filetype -2. function "document_store_agent(message_id,fileName,document_content,document_type...) --> function to store the produced document from the agent and return the document object. This function does the steps like described in section a) above -3. function "document_get_from_message() - -Based on these 3 functions all operations can be done much more comfortable in the workflow, but also in connection with the ui (download file, copy file, preview file), because all references to the files are always ensured. - -Can you analyze this idea? -What did I not yet consider, that would be relevant for the current code to adapt? -how big is the effort to have this logic implemented? - - -Currently the webcrawler is always called for unclear prompts. Can you please add an agent for "Creative" or "knowledge" answers and select him rather than the webcrawler (meaning to adapt criteria for webcrawler, that he is only called for explicit web research or internet search). - -The Creative Agent shall be selected for open questions or simple documentation topics, e.g. writing an email, write a birthday card, what to consider if going 1 year to usa, etc. He can also deliver documents. So to specify in his prompt, that it is clear what he delivers and how it it taken out for the next agent. - -The exception for "poweron" keyword shall also be routed to this agent. This means, he is the one to answer the keyword "poweron". Like this you can please remove all "poweron"-specific code in the modules and integrate the answer for poweron in this "Creative" agent. - -Please use the agentservice_base.py to create this agent (same template as for all other agents). - - -Modul "agentservice_agent_documentation.py": Bitte die Berichterstellung adaptiv zum Prompt machen. Bei einfachen Berichten eher eine Zusammenfassung, bei komplexen Berichten mit Kapiteln arbeiten. - - -PowerOn Message: Kannst Du einbauen, dass bei einem User Prompt, welcher in irgend einer Sprache fragt, "was PowerOn ist", dass dann die Rückmeldung is der Sprache der Anfrage etwas in dieser Art ist (bitte schön formulieren): *Ich bin glücklich, Teil der PowerOn Familie zu sein, welche sich dafür einsetzt, dass wir einander unterstüzzen und Gutes tun". - - - -DOKUS -Doku des Systems für Investoren (Hi-level Struktur, Integrationsfähigkeit und Skalierbarkeit) -Doku des Systems für Code Integration -Release Notes (was kann das Teil) -Log der Anpassungen -Systemarchitektur (Grundsätze der Architektur, Komponenten und deren Aufbau) - - - - -# WORKFLOW EXECUTION - -Die workflow execution soll so angepasst werden: -1. Der Workflow startet wie bisher bis und mit message initialisierung -2. Dann wird über den AI Call der Arbeitsplan erstellt, welcher als Resultat eine Liste der Aktivitäten liefert, die auszuführen sind. Pro Schritt ist strukturiert erfasst: - - Was ist im Schritt zu tun? Dies als AI Prompt, um anschliessend die Agenten für den Schritt zu definieren - - Welche Daten sind dazu nötig? Dies formuliert als AI Prompt an den Dateien-Manager - - Welches Resultat soll geliefert werden? - Strukturierte Angabe von Formatvorgaben (z.B. "Liste von Dateien","Text","JSON", "Tabelle", etc.) -3. Nun wird die Liste der Aktivitäten abgearbeitet. Pro Aktivität erfolgt dies: - - Agenten mit ihren Eigenschaften und dem Resultatformat zusammenstellen - - Mit AI Call festlegen, welche Agenten in welcher Reihenfolge nötig sind. - - Nun die Agenten schrittweise ausführen lassen. dazu diese schritte pro agent: - -- message object mit prompt und der angabe des letzten message objectes im workflow vorbereiten - -- Mit dem Hilfsmodul "agentservice_dataextraction.py" die nötigen Daten aus dem Workflow extrahierenund dem message object des agenten zufügen. Im Hilfsmodul noch das Objekt messages definieren. - -- agent liefert das resultat, welches als message object im workflow ergänzt wird. -4. Nun die Zusammenfassung der durch die agenten erstellten resultate für den User erstellen und ebenfalls als message im workflow speichern. - - -# CODE STRUKTUR - -Aktuell hat es in jedem Modul und auch im Hauptmodul von agentservice* detaillierten Code drin. Kannst Du im gleichen Zug den Code aufräumen, dass "agentservice_workflow_manager" als master-modul nur funktionen aufruft und nicht noch details bearbeitet. so kann der workflow besser geführt werden. - -Die Meldungen im "_add_log()" sowie die Logger-Mledungen sind unübersichtlich und helfen kaum zur Analyse. Bitte diese Meldungen anhand des Workflows strukturieren und auch die Moderator-Anweisungen (zusammengefasst im _add_log und mit den parametern (lange texte gekürzt) im logger) ausgeben, damit eine Fehlersuche einfacher ist. - -Bitte Hilfsfunktionen, welche überall immer wieder verwendet werden, in ein utility modul auslagern. Als Idee Dinge wie -- Class mit Methoden zum lesen, schreiben, extrahieren von messages im workflow inklusive Typenkonversion von Dict in str. Dass ich z.B. schreiben kann (nur als idee, gibt eventuell schlauere funktionen): workflow(id).documents.extract_by_prompt(prompt).to_str() -- Bitte analysiere den code, was an Funktionen Sinn macht - -Allenfalls noch andere Themen, die helfen, den Code zu vereinfachen. Das Ziel soll es sein, dass der Workflow und die Agentencodes nicht jedes Detail immer codiert haben müssen mit immer wieder fehlerabfangroutinen, sondern dass wie auf vordefinierte module zugreifen können und diese durchgängig nutzen. damit soll der code massiv verkürzt werden. - - - - - -# DATEIEN EINLESEN - -Wenn eine Datei/File (in der datenbank ein Dokument) als Text lesbar ist (txt, csv, html, Text in Pdf etc.), dann wird der text des dokumentes direkt ausgelesen und als DocumentContent in der DB erfasst --> is_extracted=True. Wenn ein Dokument nicht als Text lesbar ist (Bilder, Videos, Bilder in PDF etc.), dann wird der text des entsprechenden DocumentContent nicht extrahiert, also is_extracted=False. (hinweis: Die extraktion findet dann erst im workflow mit einem prompt statt.) - - -# AGENTEN - -In jedem Agenten-Profil ein Attribut ergänzen, welches spezifisch angibt, in welchem Format der Agent das Resultat zurückliefert (z.b. "DocumentID" oder "Text" oder "List of ..." etc.). - - -# HILFSFUNKTIONEN - -1. data_extraction(prompt) --> messages: ai call durchführen mit einer liste aller dateien mit ihren metadaten und aller messages im workflow. mit dem prompt prüfen, welche inhalte von welchem datenobjekt erforderlich sind. das resultat soll eine liste sein, welche pro datenobjekt den prompt enthält, um die nötigen daten zu extrahieren. diese liste abarbeiten (falls ein dokument den inhalt nicht extrahiert hat, diesen nun mit der entsprechenden funktion extrahieren; bild-extraktion ist bereits als funktion verfügbar) und die extrahierten daten mit ihren kontext-informationen als strukturiertes text-object zurückgeben (metadaten mit extrahierten inhalten) - - -# ZUSAETZLOCHE AGENTEN - -NEU: Der Filecreator kann dies tun, welche relevant für seine Fähigkeiten sind: -Datei erstellen --> Document object in der Datenbank mit dem mitgelieferten inhalt und datentyp erzeugen und die id zum Datenobjekt zurückliefern - - -Implementieren: Coder -Dieser soll python code generieren und als Parameter die verfügbaren Funktionen im Umsystem (z.b. für Files laden und speichern) (als Erweiterung im Beispiel soll pro Funktion angegeben werden, welche Parameter und welches Resultat-format, hier ein geeigneter vorschlag von dir bitte). Den Code anschliessend ausführen, so wie im Code Beispiel "_code_exec_temp.py". Dann das Resultat zurückgeben. - - - - -Workflow module refactored - -Summary of Changes -I've refactored the workflow module into separate modules with clear responsibilities: -1. workflow.js - Main Coordinator - -Acts as the central controller for the workflow functionality -Coordinates interactions between all other modules -Manages the workflow lifecycle (starting, stopping, resetting) -Contains minimal direct DOM manipulation -Maintains the core workflow state - -2. workflow_ui.js - UI Rendering and Layout - -Handles all DOM rendering functionality -Manages layout changes (resize, expand/collapse) -Sets up UI-related event listeners -Updates visual status (buttons, statistics) -Completely separates UI concerns from data and business logic - -3. workflow_data.js - Data Management - -Handles all API communication (via utils.js) -Centralizes state management for workflow data -Processes data from API responses -Manages file references and retrieval -Handles data statistics tracking - -4. workflow_features.js - Feature Modules - -Manages chat functionality -Handles file upload/processing -Controls log management -Processes user input -Encapsulates drag-and-drop functionality - -5. workflow_utils.js - Helper Functions - -Contains shared utility functions -Text formatting helpers -File-related utility functions -Error/dialog management -Data validation and conversion - - - -* Refactoring-Auftrag: Workflow-System-Überarbeitung * - -## Übersicht -Dieser Auftrag umfasst eine vollständige Überarbeitung des Workflow-Ablaufs sowohl im Frontend als auch im Backend. Das Ziel ist eine Vereinfachung der Benutzeroberfläche, bessere Modularisierung des Codes und Optimierung der Datenhaltung, sowie Hinzufügen einer Löschfunktion für einzelne Nachrichten zur Datenmengenbegrenzung. - -## Anforderungen Frontend - -### 1. UI-Elemente entfernen -- Vollständige Entfernung der Sektion "Prompt eingeben" -- Entfernung aller Buttons im Bereich "Ausführung & Ergebnisse", mit Ausnahme des "Workflow stoppen"-Buttons, der nur während eines aktiven Workflows sichtbar sein soll -- Der "Workflow stoppen"-Button soll automatisch ausgeblendet werden, sobald eine Benutzereingabe angefordert wird -- Entfernung der Anzeige des ausgewählten Workspaces in der index.html - -### 2. User-Input-Modul auslagern -- Extraktion aller Funktionen zur Benutzereingabe aus "workflow.js" in ein neues separates Modul "workflow_userinput.js" -- Das neue Modul soll sowohl für den initialen Prompt als auch für alle weiteren Benutzerantworten im Workflow verwendet werden - -### 3. Funktionalität User-Input-Modul -Das neue "workflow_userinput.js" Modul soll folgende Funktionen enthalten: -- Erkennung, wann eine Benutzereingabe erforderlich ist (initial und wenn der User-Agent aufgerufen wird) -- Auswahl vordefinierter Prompts ermöglichen -- Datei-Upload und Drag & Drop Funktionalität -- Senden des Prompts an das Backend mit der workflowid, falls vorhanden -- Implementation einer Löschfunktion ("x") für jede Nachricht und angehängte Datei im Chat-Protokoll - -### 4. Nachrichtenlöschfunktion -- Jede Nachricht im Multi-Agent Chat Protokoll erhält einen "x"-Button zum Löschen -- Löschfunktion soll auch für Dateien innerhalb einer Nachricht implementiert werden -- Nahtlose API-Integration mit dem neuen DELETE-Endpunkt für Nachrichten - -## Anforderungen Backend - -### 1. Route "workflow.py" -- Reduzierung auf minimale Routing-Funktionalität -- Verlagerung aller Implementierungslogik in den "agentservice_workflow_manager" -- Hinzufügen eines neuen Endpunkts: `DELETE /api/workflows/{workflow_id}/messages/{message_id}` - -### 2. Workflow-Manager-Logik -Überarbeitung des "agentservice_workflow_manager" mit folgender Ablauflogik: -1. Workflow-Initialisierung: - - Bei neuem Workflow: Initialisierung mit leerem Messages-Objekt - - Bei bestehendem Workflow: Übernahme des vorhandenen Messages-Objekts - -2. Message-Objekt-Verwaltung: - - Starten eines neuen Message-Objekts für jede Interaktion - - Vollständige Nutzung des Datenmodells aus "lucydom_model.py" - - Korrekte Speicherung in der Datenbank - -3. Dateivorbereitung: - - Erstellung von Datei-Kontexten und Integration ins neueste Message-Objekt - - Extraktion und Speicherung von Dateiinhalten - - Formatierung der Daten für die Agenten-Verarbeitung - -4. Agent-Workflow: - - Initialisierung verfügbarer Agenten einschließlich User-Agent - - Implementierung der Moderator-Entscheidungslogik (bereits implementiert) -> entweder wird eine liste von agenten verarbeitet (OHNE User agent!), oder der user agent aufgerufen. - 4a) - Ausführung der Agenten in der festgelegten Reihenfolge - 4b) - Abschluss mit User-Agent und Prompt-Aufforderung (Anmerkung: die wofkflow id mitgeben, damit nach dem senden der user antwort im frontend der workflow weitergeführt wird bei Punkt 1. Aber im Backend ist es hier fertig) - -5. Nachrichten-Löschfunktion: - - Implementierung der Löschlogik für einzelne Nachrichten - - Vollständige Entfernung der Nachrichtendaten auch aus dem Backend-Speicher - -## Betroffene Dateien - -### Frontend-Dateien: -1. `workflow.js` - Umfassende Überarbeitung und Entfernung von User-Input-Funktionalität -2. `workflow_userinput.js` - Neue Datei für die ausgelagerte User-Input-Funktionalität -3. `index.html` - Entfernung der nicht mehr benötigten UI-Elemente und Integration des neuen Moduls -4. `main.js` - Anpassungen für die geänderte Modularität -5. `globalState.js` - Ggf. Anpassungen für die geänderte Workflow-Struktur -6. `utils.js` - Erweiterung um die neue DELETE-Funktion für Nachrichten - -### Backend-Dateien: -1. `workflows.py` - Vereinfachung und Hinzufügen des neuen DELETE-Endpunkts -2. `agentservice_workflow_manager.py` - Umfassende Überarbeitung der Workflow-Logik -3. `lucydom_interface.py` - Erweiterung um Methoden zum Löschen von Nachrichten -4. `agentservice_agent_user.py` - Anpassungen für das neue User-Input-Handling - -## Fehlerbehandlung -- Frontend: - - Konsistente Fehlerbehandlung für alle API-Aufrufe implementieren - - Benutzerfreundliche Fehlermeldungen bei fehlgeschlagenen Operationen anzeigen - - Status-Indikatoren während laufender Operationen (z.B. Löschen von Nachrichten) - -- Backend: - - HTTP-Statuscode 404 zurückgeben, wenn eine zu löschende Nachricht nicht gefunden wird - - Sicherstellen, dass alle Workflow-Operationen Transaktionssicherheit bieten - - Ausführliche Logging-Funktionalität für Fehlerdiagnose - -## Richtlinien zur Codequalität -- Clean Code-Prinzipien beachten (DRY, SOLID) -- Konsistente Benennung und Dokumentation -- Entfernung ungenutzter Funktionen und Code-Teile -- Ausreichende Kommentierung für komplexe Logik - -## Zusätzliche Hinweise -- Daten dürfen ohne Bedenken gelöscht werden -- Keine Übergangsstrategie erforderlich, System startet neu -- Keine Bestätigungsdialoge für das Löschen von Nachrichten erforderlich -- Keine speziellen Berechtigungsanforderungen für das Löschen von Nachrichten - - - - -*WORKFLOW* - -ich habe das backend komplett angepasst mit dem workflow und dem datenmodell. die wichtigsten anpassungen sind das datenmodell für workflow und messages. Nun muss das Frontend entsprechend angepasst werden. - - -hier der ablauf des workflows im backend zur information: - -1. Der User kann (A) einen neuen Workflow starten oder (B) bei einem bestehenden Workflow einen user Input liefern. Die Endpunkte liegen bei. -. Varinate (A): Der User sendet einen Prompt mit Dateien für einen neuen Workflow. Damit wird ein neuer leerer Workflow erstellt -. Varinate (B): Es erfolgt ein User Input mit allenfalls Dateien zu einem bestehenden Workflow. Als Input wird ein messages objekt geliefert. Der workflow Status wird auf "running" gesetzt. - -2. Message Initialisierung: Das letzte Message Objekt wird abgeschlossen (falls eines existiert) und ein neues Message Object erstellt. Dieses wird nun komplettiert. -3. Dateivorbereitung: Datei-Kontexte werden erstellt und ins neuste Message objekt abgefüllt. Dateiinhalte werden gelesen, extrahiert und ins message objekt abgefüllt. Daten werden für die Verarbeitung durch die Agenten formatiert -4. Agent-Initialisierung: Die verfügbaren Agenten werden geladen inklusive der user agent. -5. Moderator-Entscheidung -6. Agent-Ausführung, bis am Schluss der User aufgerufen wird, um einen Input zu geben. -7. Nun ist der "User Agent" an der Reihe. Der user Input hat immer obendran die Frage, die dem User gestellt wird. der user input hat die workflow id dabei. -hier ist der workflow beendet. wenn der user seine antwort sendet, geht es weiter bei punkt 1 Variante (B) - -Dies zusätzlich anzupassen: - -- Der initiale Prompt mit File-Upload ist gleichzeitig auch der Prompt, der dem User angeboten wird, wenn im Chat ein Input von ihm nötig ist. Dieses Eingabefeld soll an die Stelle verschoben werden, wo aktuell der User-Dialog angezeigt wird bei "wait for user". So gibt der User die Daten immer am gleichen Ort ein. - -- Für den File Upload sollen zwei Methoden möglich sein. - -- a: Upload-Button direkt unten am Prompt. Jedes geladene File wird dann als kleines Icon mit dem Filenamen unter dem Prompt ergänzt mit einem "x", damit es wieder gelöscht werden kann, wenn nicht benötigt. Wenn Text aus dem File extrahiert werden konnte, so ist das Feld mit dem Dateinamen grün, sonst rot. Ist Dir klar, wie Du diese Information abfragen kannst? - Wenn der User den Prompt absetzt, wird dieser über das Backend anschliessend in die Resultate geliefert. Du musst dies nicht im Frontend machen, sonst haben wir es doppelt. - -- b: Drag & Drop: Ein File kann in den Prompt-Bereich gezogen werden, dann wird es auch hochgeladen. - -- Das Auswahlfenster für vordefinierte Prompts soll direkt über dem Eingabefeld für den Benutzer sein. - -- Die Buttons zur Steuerung des Workflows sollen oben am Bereich "Ausführung & Ergebnisse" verschoben werden. - -- Der Bereich "1. Dateien auswählen" entfällt somit, da integriert bei mUser Prompt. - -- Der Bereich "2. Promot eingeben oder auswählen" entfällt auch - -- Der Bereich "3. Agenten auswählen" entfällt auch - -- Resultateintrag: Jeder Eintrag im Resultat-Log hat zuoberst Icons mit den Files, welche die Agenten zurückliefern, dann den Text dazu. Jedes Icon eines Files hat die Buttons "Download" und "Copy" (für Clipboard") und "Vorschau" - -- Damit entfällt der Bereich "Workflow-Konfiguration" komplett. Die beiden Bereiche sollen aber beibehalten werden, einfach mit anderem Inhalt. Im aktuellen Bereich "Workflow-Konfiguration" soll neu der Bereich "Ausführung & Ergebnisse" drin sein. Im aktuellen Bereich"Ausführung & Ergebnisse" soll NEU der Bereich "Dateivorschau" hinkommen. Dort kann eine von den Agenten gelieferte Datei (siehe Punkt "Resultateintrag" zuvor) als "Vorschau" angeschaut werden. Oben rechts hat es zwei Icons "Download" und "Copy" (für Clipboard). - -* WEITERE ANPASSUNGEN * -- Die Objekte "agents" und "workspaces" sind eliminiert und zu entfernen. Somit fallen auch die entsprechenden Navigationseinträge weg und alle Funktionen im Zusammenhang mit Workspaces. Es gibt keine Workspaces mehr. - - -Kannst Du vor der Umsetzung prüfen, ob Du alle nötigen Dateien und Informationen hast und mir zusammenstellen, was Du machen wirst? - - - -Ich möchte den agentenchat workflow ändern. kannst du mir bitte dazu in einem ersten schritt das backend anpassen. - -1. das datenobjekt *workspaces" und "agents" wird nicht mehr benötigt, und kann entfernt werden. Der user arbeitet mit einzelnen workflows. Agenten sind systemseitig fix definiert. - -2. alle workflow router endpunkte bleiben bestehen, wie sie sind - -3. Neue Objektstruktur für den workflow ablauf: - - - -4. Die Schritte in einem Workflow (neu) - bitte den code revidieren und alle unnötigen teile entfernen. - - 4.1 Der User kann (A) einen neuen Workflow starten (Enpunkt api/workflows/run) oder (B) bei einem bestehenden Workflow einen user Input liefern (Endpunkt /api/workflows/{workflow_id}/user-input). Mit beiden Varianten soll bei execute_workflow() gestartet werden. - - . Varinate (A): Der User sendet einen Prompt mit Dateien für einen neuen Workflow. Damit wird ein neuer Workflow mit execute_workflow() erstellt, aber noch ohne message objekt. Als Input wird ein messages objekt geliefert. Initialer workflow Status wird auf "running" gesetzt. - - . Varinate (B): Es erfolgt ein User Input mit allenfalls Dateien zu einem bestehenden Workflow. Als Input wird ein messages objekt geliefert. Der workflow Status wird auf "running" gesetzt. - - 4.2.- Message Initialisierung: Das letzte Message Objekt wird abgeschlossen (falls eines existiert) und ein neues Message Object erstellt. Dieses wird nun komplettiert. - - 4.3- Dateivorbereitung: Datei-Kontexte werden mit prepare_file_contexts() erstellt und ins neuste Message objekt abgefüllt. Dateiinhalte werden mit read_file_contents() gelesen, extrahiert und ins message objekt abgefüllt. - Daten werden für die Verarbeitung durch die Agenten formatiert - - 4.4 Agent-Initialisierung: Die verfügbaren Agenten werden mit initialize_agents() aus dem Modul "agentservice_part_agents" geladen inklusive der user agent. - - 4.5. Moderator-Entscheidung: Es gibt keinen agenten "Moderator". Anhand des des neusten Message Objektes und den Profilen der verfügbaren Agenten wird mit dem OpenAI Call abgefragt, wie die Anfrage gelöst werden soll. Als Resultat-Format soll ein json-objekt vorgegeben werden, welcher agent welchen job (=Prompt für diesen) ausführen soll, mit welchen antworten und welchen datenobjekten. dazu sind keine weiteren subfunktionen nötig. das antwortformat soll so vorgegeben werden, dass zwingend pro auftrag verfügbare agenten rückgemeldet werden. Das Agentenset soll immer entweder nur der User oder nur system-agenten sein. somit ist das antwortformat eine liste mit agenten und deren aufträgen. - - 4.6. Agent-Ausführung: Falls eine agentenliste zürückgegeben wird (und nicht der user), werden die Agenten in der angegeben Sequenz aufgerufen werden, um ihren Beitrag zu liefern. Agent-Antworten werden mit create_agent_result() ins message objekt integriert, die verschiedenen files separiert. Als nächstes wird mit den gelieferten Antworten der Agenten (nur dieser Teil, nicht die früheren Nachrichten) über den OpenAI Call eine Zusammenfassung erstellt und als Input-Text dem user Agenten übergeben, welcher nun als nächsten Agenten ausgewählt wird. Der ablauf wird gestoppt, wenn der Workflow manuell mit stop_workflow() gestoppt wird (status auf "stopped"), oder ein Fehler auftritt (status auf "failed"). - - 4.7. Nun ist der "User Agent" an der Reihe. Der Workflow-Status wird auf "waiting_for_user" gesetzt. Der ganze Teil mit _process_user_input() etc. entfällt. Nach der Benutzereingabe wird der Workflow nicht mit _continue_workflow_after_user_input() fortgesetzt, sondern regulär wieder bei Punkt 4.1 über den Zweig (B). - - 4.8. Protokollierung: Jeder Schritt wird mit _add_log() protokolliert. Logs werden im Workflow-Objekt gespeichert. - - 4.9. Hier endet der Workflow regulär, bis der User eine neue Anfrage macht. Das heisst, es benötigt keine Moderatoren-Checks mehr, keine maximale Rundenzahl. Der Workflow wird mit save_workflow_results() gespeichert. - - -5. Fortlaufendes Polling: Der Client kann den Workflow-Status mit get_workflow_status() abfragen. Protokolle können mit get_workflow_logs() abgerufen werden. Ergebnisse können mit get_workflow_results() abgerufen werden. - - -6. agents: Die verfügbaren Agenten werden mit initialize_agents() aus dem Modul "agentservice_part_agents" geladen. die agentendaten, werden in separaten dateien abgelegt, damit dies wartbar ist. Es werden diese agenten-module vorbereitet: -.agentservice_agent_user -.agentservice_agent_coder -.agentservice_agent_analyst -.agentservice_agent_webcrawler -.agentservice_agent_sharepoint -.agentservice_agent_documentation -Pro agent werden diese attribute definiert: -.name -.description -.capabilities -Jeder Agent hat dann seine eigenen Funktionen in seinem File integriert, die er benötigt. - -7. Konnektorenbereinigung: -- Alle Konnectoren in einen subfolder "connectors" verschieben, d.h. alle files mit "connector_..." -- Die zwei Konnektoren "connector_aichat..." so umschreiben, dass sie daten als Input im format des messgaes Objekt gemäss Punkt 3 als input übernehmen und auch wieder zurückgeben. - -8. geänderte speicherng von workflows: Bitte den code so anpassen, dass workflows als datenbankobjekte gespeichert werden, analog so wie prompts. -D.h. die routes für "workflows" ergänzen mit "GET /api/workflows", "PUT /api/workflows/{workflow_id}", "DELETE /api/workflows/{workflow_id}" -Die Route "POST /api/workflows/run" umbenennen in "POST /api/workflows" -Die Route "/api/workflows/{workflow_id}/results" umbenennen in "GET /api/workflows/{workflow_id}" -Sinngemäss alle module anpassen und die Datenbankklassen vorbereiten. - -Die Buttons "Workflow starten" und "Zurücksetzen" haben keinen Rahmen. Ist hier ggf. die Style Class falsch oder nicht appliziert? - -Anpassung des Visuals "Ausführung & Ergebnisse": -- Das Ausführungsprotokoll so belassen. Einen Button rechts von den anderen zwei Buttons (alle anzeigen / Details zuklappen) ergänzen, für dies mit dem Ausführungsprotokollfenster: toggle function collapse and restore -- Die Bereiche "Multi-Agent-Chat" und "Ergebnisse" machen so keinen Sinn. Diese beiden Bereiche bitte zusammenlegen in einen grossen Bereich mit dem Namen "Multi-Agent Chat Area". Dort laufend die Messages der Agenten in einer HTML-Ansicht der Messages protokollieren. Jeweils der Name des Agenten im Titel und darunter seine Message. Die letzte Message soll aufgeklappt sein, alle früheren sollen jeweils zugeklappt sein, aber durch den User soll ein toggle pro Message möglich sein, um die Details zu sehen. - - -Kannst Du den Ablauf des Agenten-Chats wie folgt optimieren: -- Bei jedem Chat einen "User Agent" mit dem Namen des eingelogten Benutzers ergänzen. Wenn etwas im Chat nicht klar ist, oder zusätzliche Informationen nötig sind, so fragt er den User Agent. Auch bevor er den Chat beendet, fragt er den User Agent, ob dieser einverstanden ist. -- Wenn der User Agent eine Anfrage erhält, so kann er direkt unter der Chat History im Bereich ereiche "Multi-Agent-Chat" seinen Text in einem mehrzeiligen Textfeld erfassen. Er kann auch zusätzliche Files hochladen. Wenn er "Enter" drückt, werden die zusätzlichen Daten mit den ergänzten Files zur Message ergänzt, das Eingabefenster verschwindet wieder und der Moderator führt den Chat fort. Immer nach einer Benutzereingabe startet der Zähler wieder bei Runde 1. - -Statistik ergänzen: Kannst Du bitte rechtsbündig neben dem Titel des "Ausführungsprotokolls" laufend die Statistik nachführen, wieviele kBytes (kB) Daten über den Connector zum AI-Modell gesendet wurden (dies ist die Datengrösse des Message-Objektes) und wieviele kB an Messages zurückgeliefert wurden. Diese angabe pro Workflow-Durchlauf, also immer beim Start eines neuen Workflows wird der Zähler auf 0 gesetzt. In diesem Format: "^ 250k v 1'250k ", v und ^ durch Pfeile ersetzt. - - -In den Einstellungen des Frontends soll die Sprache des aktiven benutzers gemäss den Listenoptionen in den "...model.py" angepasst werden können. die sprache gilt dann auch für die Attributnamen in einem Formularfeld im "generic-entity.js". eine sprachänderung zieht somit eine anpassung des Users über das API nach sich, indem die Sprache in der Datenbank angepasst wird. - -kannst du die ausführungsprotokollierung anpassen? das protokoll soll laufend anzeigen, welcher assistent welches resultat produziert hat und welcher assistent aktuell am arbeiten ist. Prozentzahlen sind keine nötig, diese machen keinen sinn. das polling so beibehalten, aber wenn keine neuen Daten bereitstelen, dann beim letzten Timestamp einfach laufend "." ergänzen, bis die nächste Meldung ausgegeben wird. hast du alle daten, um dies im frontend und im backend anzupassen? - -Im Ausführungsprotokoll pro Eintrag nur den Titel zeigen und die Details zwar ins Protokoll nehmen, aber ausblenden. Der Benutzer kann dann im Protokoll die zugeklappten Texte aufklappen, um die gewünschten Details gezielt zu sehen. - -Im Front-End beim Workflow-Modul bitte das Ausführungsprotokoll-Fenster dynamisch in der Grösse anpassbar machen. in der Breite und der Höhe. Dasselbe für das Ergebnis-Fenster. Zudem die Ansicht so gestalten, dass die Fensterteile "Workflow-Konfiguration" und "Ausführung & Ergebnisse" ein- und ausgeblendet werden können, damit jeweils ein Teil die komplette Arbeitsfläche verwenden kann, weil dort viel Text stehen wird. Dies ist für den Benutzer besser. - -nun zu diesem zentralen modul. ich hätte gern, dass die daten als tabellen dargestellt und bearbeitet werden können. für view, add, modify, delete jeweils icon pro datensatz ganz links und zuoberst im header ein "new item" symbol oder text, mach einen vorschlag. - -ist es möglich, eine checkbox pro datensatz zu machen, um mehrere elemente auszuwählen und oben an der tabelle icons zu haben für mehrfach delete? - -die tabelle soll nach allen feldern gefiltert und sortiert werden können - -kannst du bitte den code so anpassen, dass main.js die seitenmodule im Anhang dynamisch erst dann lädt, wenn die entsprechende seite in der navigation aufgerufen wird? - -dann bitte main.js modularisieren, sodass dort nur funktionsaufrufe auf sub-module ausgeführt werden. das navigationsmenu nach "navigation.js" auslagern. den aufbau und betrieb des aktuellen workspaces im main.js drin lassen. - -Der aktuelle Hauptbereich mitt der Auswahl des workspaces, den zugehörigen Agenten etc ist neu ein Objekt, welches in der "mainView" dargestellt werden kann. Auch andere Objekte können in der mainView dargestellt werden und haben jeweils ihre spezifischen Paramter dazu, wie nachfolgend erklärt. - -im main.js wird ein globales objekt aller elemente erstellt, welche in der navigation enthalten sein sollen und welches die grundlage für alle funktonsaufrufe beinhaltet. damit gibt es dann im index.html keine details mehr zu den navigationen. - - -diese attribute hat das globale objekt: - -globalState -.objects -.user -.mainView - -Hier die Spezifikation der Objekte. - -.objects[...]: hat eine liste von objekten, welche im mainScreen geladen werden können. Diese Attribute pro Objekt bitte gemäss den heutigen js files im anhang sinngemäss übernehmen: -- label: Liste des Labelnamen in den verschiedenen sprachen (default, en, fr...) -- modulName: string; dieser wird verwendet für die objektklasse "js/modules/{modulname}.js" und für die html-komponente dazu "modules/part-{modulname}.html und für die calls ans backend /api/{modulname}/..." -- icon: Icon vor dem Menupunkt -- navigationContext: "left" für agents, data, prompts, users, mandates, workspaces ; "top" für sprachauswahl, logout -- isVisible (hier wird z.b. users und mandates nur angezeigt, wenn auch die berechtigung dafür besteht) -- isActive: Wenn der Menupunkt ausgewählt ist -- navigationContext: diese Optionen, wo ein Objekt ins Menu genommen wird: - --"nav_left" für agents, data, prompts, users, mandates, workspaces - --"nav_top" für sprachauswahl, logout -- navigationActionType: Was passiert, wenn auf das Menu geklickt wird. Diese Optionen: - --"module": Standard-Menu button. Es wird ein Modul in die mainView geladen. Das Modul wird erst geladen und mit den Daten initiiert, wenn der Menupunkt ausgewählt wird - --"group_open": Gruppenheader; Start einer neuen Gruppe; alle nachfolgenden Objekte der Liste sind in dieser Gruppe integriert. Die Gruppe kann im Menu auf- und zugeklappt werden. Initial Gruppe open, alle Menupunkte sichtbar - --"group_collapsed": Gruppenheader; Start einer neuen Gruppe; alle nachfolgenden Objekte der Liste sind in dieser Gruppe integriert. Die Gruppe kann im Menu auf- und zugeklappt werden. Initial Gruppe collapsed. - -.user: Attribute zum aktiven user -- mandate_id -- user_id -- username -- full_name -- language (default, en, fr, ...) -- isAdmin -- isSysAdmin -- lastWorkspaceId: Id des zuletzt genutzten Workspaces - aktuell "null" -- session: aktuell null und nicht verwendet - -.mainView: enthält immer die aktuellen Attribute, welche die Seite in der mainView nutzen kann -- currentWorkspace: objekt des aktuell ausgewählten Workspaces -- availableFiles[]: list of objects -- availableAgents[]: list of objects -- availablePrompts[]: list of objects -- currentWorkflowId: id - - -kannst du bitte part-workflow.html und workflow.js mit dem dynamischen Multi-Agent Chat aktualisieren, welcher im backend angepasst wurde und im Ausführungsprotokoll die Details eines laufenden Chats mit aufklappbaren Texten ergänzen. Das Ausführungsprotokoll-Fenster dynamisch in der Grösse anpassbar machen. - -Css aufräumen und konsolidieren für gemeinsame Klassen mit allen html und js parallel - -Admin Seite mit CRUD für User Mgmt und Mandate Management, generisch - -Im Frontend soll im generischen Formular "generic-entity.js" für ein neues Objekt die ID entweder hidden oder schreibgeschützt sein. die ID wird nicht benötigt, sondern wird erst mit dem speichern in der datenbank erstellt. d.h. nach dem speichern in der datenbank werden die daten der entsprechenden tabelle neu geladen. - - -Kannst du mir bitte code struktur und logik das 'agentservice_interface.py' anpsssen und die code struktur zur besseren wartung und weiterenwticklung verbessern: - -1. die anbindung der ai-modelle mit den entsprechenden config-daten und den funktionsaufrufen in separate dateien auslagern ("connector_ai_openai","connector_ai_webscraping"). im 'agentservice_interface.py' die connector module bei der initialisierung importieren und vorbereiten. - -2. den agenten-chat 'execute_workflow' nicht in der reihenfolge der agents ausführen, sondern als tischrunde der agents.das heisst ein AI moderator moderiert die agenten autonom und ruft anhand der produzierten antworten und der eigenschaften der agentss den jeweils nächsten geeigneten agenten anhand der 'capabilities' auf, nachdem ein agent seine antwort geliefert hat. -der initiale prompt mit den zugehörigen files und dem chatverlauf im 'LogEntry' mit den n letzten Datensätzen (n wird aus dem Config file aus der variablen Application.MAX_HISTORY gelesen) wird in ein 'message'-objekt als dictionary transformiert, welches so aussieht: - message = { - "role": "user", #--> statisch, immer so - "content": [ #--> liste der Files - { - "type": "text", - "text": prompt_text - }, - { - "type": content_type, # --> diese funktion integrieren wir später - "source": { - "type": "base64", - "media_type": mime_type, - "data": base64_file # --> hier das dateiname der jeweiligen datei - } - }, - { - "type": "text", - "text": LogEntries # --> hier die LogEinträge als Textpaket - } - ] - } -wenn der AI moderator der Meinung ist, dass die aufgabe erfüllt ist, beendet er den workflow. - - -3. initialisierungsset: beantwortet Anfragen direkt mit dem hinterlegten KI Modell, welche keine spezialisierten Agenten benötigen. Dies ist die Generierung von Text, Code, Strukturen, die Analyse von Files, Graphiken erstellen, etc. -(Agent) Organisator: Dieser analysiert den User Prompt und strukturiert die auszuführenden Aufräge sowie die nötigen zu liefernden Resultate -(Agent) Entwickler: Dieser entwickelt python code im Auftrag der anderen Agents und führt ihn anschliessend aus -(Agent) Webscrape: Ein Agent, welcher webscraping durchführt. Dieser nutzt die Funktion '_scrape_url', um eine Webseite zu scannen und den Inhalt zurückzugeben. Er kann auch den Entwickler beauftragen, einen Code zu generieren, welcher die funktion _scrape_url mit einer logik (z.B. iterativ oder batch-mässig) ausführt -(Prompt): Kannst Du mir ein paar initiale Prompts für die folgenden Fragebereiche vorbereiten, welche ausgewählt werden können: -. Web Research -. Analyse -. Protokoll -. Design - - -4. Kannst Du bitte die fehlenden CRUD Methoden in den modulen "workspaces" und "prompts" ergänzen. Ich glaube, es fehlen Post und Delete. - - -5. Datenbank-Management verbessern: In den zwei Modulen "gateway_interface.py" und "lucydom_interface" finden keine Manipulationen oder Referenzierungen mit ID's statt. Die ID's für einen neuen Datensatz werden nur in "connector_....py" modulen vergeben. Jeder datensatz hat eine unique id. in den modulen "...interface.py" werden keine id's generiert. die abfrage für die id=1 wird ersetzt mit der funktion 'get_initial_id', welche weiter unten erklärt ist. -Dazu bitte die Module anpassen und in den Modulen "connector...py" eine system-tabelle ergänzen, welche sich merkt, welche ID der erste datensatz jeder tabelle hat, denn dieser ist der jeweilige system-datensatz. dann eine funktion 'get_initial_id' erfassen, welche in den modulen Modulen "gateway_interface.py" und "lucydom_interface" aufgerufen werden kann, um die id des initialen datensatzes pro tabelle abzufragen. - - - - - -der gateway funktioniert noch nicht ganz. -kannst mir bitte die module prüfen und besser stukturieren? - -Diese anforderungen und das setting der dateien: - -models.py: die datei umbenennen in "model_lucydom.py" - - die class "User", "UserInDB", "Token" in der datei entfernen und in eine separate datei "model_gateway.py" auslagern. - - alle datentypen-definitionen sind hier, abschliessend und unabhängig vom datenbanksystem. - - alle ID's sind long-Zahlen, keine Texte - - bei jeder class und bei jedem attribut einer class ein label ergänzen, was der name des attributes bzw. der class ist, wenn dies in einem formular abgefragt wird. das label soll einen defaultwert haben und pro sprache gesetzt werden können. - - alle objekte mandantenfähig machen, d.h. bei jedem Objekt die Attribute "mandate_id" und "user_id" ergänzen. - -model_gateway.py: - - alle datentypen-definitionen sind hier, abschliessend und unabhängig vom datenbanksystem. - - alle ID's sind long-Zahlen, keine Texte - - bei jeder class und bei jedem attribut einer class ein label ergänzen, was der name des attributes bzw. der class ist, wenn dies in einem formular abgefragt wird. das label soll einen defaultwert haben und pro sprache gesetzt werden können. - - Die class "Mandate" mit den Attributen (id,name,language) ergänzen - - Bei der class "User" die "id" und "mandate_id" und "language" ergänzen - - alle objekte mandantenfähig machen, d.h. bei jedem Objekt die Attribute "mandate_id" und "user_id" ergänzen. - -database.py aufteilen in 2 files "connector_db_json.py" und "interface_lucydom.py". - -connector_db_json.py: Ein erster Konnektor von zukünftig weiteren Konnektoren - 1. Parameter, welche übergeben werden: - - DB_Folder, DB_USER und DB_APIKEY - - Kontextparamter für "mandate_id" und "user_id", welche nicht null sein dürfen. - - Die aktuelle JSON-Datenbank im Folder DB_Folder einbinden und so übernehmen, wie sie ist. Falls der Folder fehlt, diesen erstellen. - 2. Der Konnector "db" wird als Objekt zur verfügung gestellt. - 3. Es werden diese generischen Methoden im Objekt "db" zur Verfügung gestellt. jede abfrage filtert automatisch die datensätze auf die Kontextparamter "mandate_id" und "user_id", sofern diese Parameter in einem Datensatz nicht null oder "" sind. - - get_tables(optional filterkriterien): liste aller tabellen - - get_fields(table, optional filterkriterien): liste aller attribute einer tabelle - - get_schema(table, language, optional filterkriterien): objekt aller attribute einer tabelle mit ihrem Datentyp und dem Label in der entsprechenden Sprache. Ohne Sprache Angabe wird der Default Wert als Label genommen - - get_recordset(table, optional filterkriterien für fields, optional filterkriterien für records): liefert das entsprechende datenobjekt mit den Datensätzen - - record_create(table,json with attributes): ergänzt einen Datensatz im Kontext "mandate_id", alle attribute, welche nicht im "json with attributes" drin sind, werden auf die standardwerte gemäss dem models.py gesetzt - - record_delete: löscht einen Datensatz, aber nur wenn es im Kontext "mandate_id" ist, sonst Verweigerung "Not your mandate" - - record_modify: ändert einen Datensatz, aber nur wenn er im Kontext "mandate_id" ist, sonst Verweigerung "Not your mandate" - -interface_lucydom.py: Ein Interface zum Gateway, es werden weitere Interfaces folgen. Das Interface macht dies: - 1. Die Datenbank mit diesen Parametern einbinden: - - Connector "connector_db_json.py" - - Datenbank "/data_lucydom" - - Datenmodell "model_lucydom.py" - 2. Das Objekt "db" kann nun genutzt werden - 3. initialisierung der Datenbank, falls sie nicht existiert, aber nur die minimal nötigen Objekte: Der "Default Workspace" in "workspaces" - -interface_gateway.py: Ein Interface zum Gateway, es werden weitere Interfaces folgen. Das Interface macht dies: - 1. Die Datenbank mit diesen Parametern einbinden: - - Connector "connector_db_json.py" - - Datenbank "/data_gateway" - - Datenmodell "model_gateway.py" - 2. Das Objekt "db" kann nun genutzt werden - 3. initialisierung der Datenbank, falls sie nicht existiert, aber nur die minimal nötigen Objekte: User "Admin", Mandate "Root" - -app.py: Die Initialisierung klar strukturieren und die Endpunkte gemäss der neuen Struktur anpassen - 1. Teil: Interfaces einbinden. - 2. Alle nötigen Initialisierungen: diese sollen in den jeweiligen Interfaces drin sein, ausser die generischen Teile. - 3. Alle Access & Security Funktionen auslagern in "auth.py" - 4. Alle Token-Endpunkte komplett generisch halten und vereinfachen: - - Dort keine Attributdefinitionen oder Feld-Listen reinnehmen. Wenn ein Modell angepasst wird, sollen hier keine Anpassungen nötig sein. - - Die Abfragen und exceptions mit Hilfsfunktionen vereinfachen, sodass die Modellierung der Endpunkte für den Programmierer sehr einfach, übersichtlich und klar ist. - - Tasks als Kommentare erfassen, was mit all diesen Aenderungen der Endpunkte im Frontend umgebaut werden muss. - - -agent_service.py: Umbenennen in "interface_agentservice.py" -- Bei allen Workflow-Endpunkten, welche nur von einem Interface Logik beziehen, die Logik im Interface integrieren und den Code beim Endpunkt vereinfachen. -- Nur bei Endpunkten, welche Logik kombiniert von mehreren Interfaces benötigen, die Logik beim Endpunkt integrieren -- Ziel soll es sein, dass die Endpunkte-Codestruktur maximal schlank und übersichtlich ist, also auch die Strukturierung und Gruppierung der Endpunkte - - diff --git a/notes/produce_diagrams.md b/notes/produce_diagrams.md deleted file mode 100644 index f4f02abc..00000000 --- a/notes/produce_diagrams.md +++ /dev/null @@ -1,48 +0,0 @@ -MERMAID DIAGRAM: - -can you make chart "wiki/diagramm_komponenten.mermaid". produce an component diagram, based on current code in poweron/* -if document existsadd missing components, remove obsolete components. - -in box texts to use
instead of \n - -for all subgraphs to to add path on a separate line to find the module in the code. - -read all code modules caerfully to identify all components and their relations. - -connectors without texts, only lines. - -to add connector between frontend and backend (apiCalls.js -> app.py) - -to connect app.py (Main application module) with the route*.py - -to put all items of frontend into subgraph "Frontend" -to put all items of gateway into subgraph "Gateway" - - - - - - -to put following boxes to a dedicated subgraph within their existing subgraph: -- workflowManager.py, workflowAgentsRegistry.py, documentProcessor.py, --> "Workflow" -- mimeUtils.py, defAttributes.py, configuration.py, autho.py --> "Shared" -- agent*.py --> "Agents" -- workflow*.js --> "Workflow" -- all *.js in js/modules/ not starting with workflow* --> "Administration" -- formGeneric.js not to put to subgraph "Shared", but to a separated subgraph "Shared - -to connect the main.js (main app in the frontend) to nativation.js, globalState.js, login.js, register.js, msftCall.js, config.js - -to connect navigation.js to moduleLoader.js - -to connect moduleLoader.js to workflow.js, and all *.js in js/modules/ not starting with workflow* - -to connect all *.js in js/modules/ not starting with workflow* --> formGeneric.js - -to connect fomrGeneric.js --> apiCalls.js - - -to use underscores (e.g. Backend_Python, Workflow_Modules, etc.) for all subgraph titles. - -if adding legend, then to give same colors like references to legend - diff --git a/notes/readme.md b/notes/readme.md deleted file mode 100644 index 894a3910..00000000 --- a/notes/readme.md +++ /dev/null @@ -1,39 +0,0 @@ -### Launch APP - -cd .\frontend_agents\ -cls; python ./server.py - -conda activate C:\Users\pmots\anaconda3\envs\poweron -cd .\gateway\ -cls; uvicorn app:app --host 0.0.0.0 --port 8000 - - -### git permanent login with vs code -git remote set-url origin https://valueon@github.com/valueonag/gateway -git remote set-url origin https://valueon@github.com/valueonag/frontend_agents -git remote set-url origin https://valueon@github.com/valueonag/wiki -git remote set-url origin https://valueon@github.com/valueonag/customer-svbe -git remote set-url origin https://valueon@github.com/valueonag/customer-althaus - -### git delete workflow runs (cleanup) - -gh auth login - -Navigate to your repository folder (if not already there): -bash: cd /path/to/your/repository - -List workflow runs: -bash: gh run list - -Delete a specific workflow run: -bash: gh run delete [RUN_ID] - -Delete all completed workflow runs (to clear up space): -bash: gh run list --status completed --json databaseId -q '.[].databaseId' | xargs -I{} gh run delete {} -powershell: - -$runs = gh run list --status completed --json databaseId -q ".[].databaseId" | ConvertFrom-Json -foreach ($run in $runs) { - Write-Host "Deleting run $run" - echo "y" | gh run delete $run -} diff --git a/query b/query deleted file mode 100644 index a02a1cc7..00000000 --- a/query +++ /dev/null @@ -1 +0,0 @@ -postgresql diff --git a/requirements.txt b/requirements.txt index fd9d119f..f5a1a2dc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ slowapi==0.1.8 # For rate limiting ## Authentication & Security python-jose[cryptography]==3.3.0 # For JWT tokens +cryptography>=41.0.0 # For encryption/decryption of configuration values passlib==1.7.4 argon2-cffi>=21.3.0 # Für Passwort-Hashing in gateway_interface.py google-auth-oauthlib==1.2.0 # Für Google OAuth diff --git a/test_excel_fix.py b/test_excel_fix.py deleted file mode 100644 index 17a57070..00000000 --- a/test_excel_fix.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -""" -Test script to verify the Excel header parsing fix -""" - -import sys -import os -import pandas as pd -from io import BytesIO - -# Add the gateway modules to the path -sys.path.append(os.path.join(os.path.dirname(__file__), 'modules')) - -from modules.interfaces.interfaceTicketObjects import TicketSharepointSyncInterface - -def test_excel_header_parsing(): - """Test the Excel header parsing fix""" - print("=== Testing Excel Header Parsing Fix ===\n") - - # Create a mock interface instance - interface = TicketSharepointSyncInterface( - connector_ticket=None, - connector_sharepoint=None, - task_sync_definition={ - "ID": ["get", ["id"]], - "Summary": ["get", ["fields", "summary"]], - "Status": ["get", ["fields", "status", "name"]], - "Assignee": ["put", ["fields", "assignee", "displayName"]] - }, - sync_folder="test", - sync_file="test.xlsx", - backup_folder="backup", - audit_folder="audit", - site_id="test" - ) - - # Test data - test_data = [ - {"ID": "TEST-1", "Summary": "Test Issue 1", "Status": "Open", "Assignee": "John Doe"}, - {"ID": "TEST-2", "Summary": "Test Issue 2", "Status": "Closed", "Assignee": "Jane Smith"}, - ] - - # Create Excel content - print("1. Creating Excel content...") - excel_content = interface._create_excel_content(test_data) - print(f" ✓ Created Excel content: {len(excel_content)} bytes") - - # Parse it back - print("2. Parsing Excel content...") - try: - parsed_data, parsed_headers = interface._parse_excel_content(excel_content) - print(f" ✓ Parsed Excel content: {len(parsed_data)} records") - print(f" ✓ Headers type: header1={type(parsed_headers['header1'])}, header2={type(parsed_headers['header2'])}") - print(f" ✓ Headers content: header1='{parsed_headers['header1']}', header2='{parsed_headers['header2']}'") - - # Test creating content with the parsed headers - print("3. Testing round-trip with parsed headers...") - new_excel_content = interface._create_excel_content(test_data, parsed_headers) - print(f" ✓ Created new Excel content: {len(new_excel_content)} bytes") - - # Parse the new content - final_data, final_headers = interface._parse_excel_content(new_excel_content) - print(f" ✓ Final parse successful: {len(final_data)} records") - print(f" ✓ Final headers: header1='{final_headers['header1']}', header2='{final_headers['header2']}'") - - print("\n✅ All tests passed! The header parsing fix works correctly.") - return True - - except Exception as e: - print(f" ✗ Error during parsing: {e}") - import traceback - traceback.print_exc() - return False - -if __name__ == "__main__": - success = test_excel_header_parsing() - exit(0 if success else 1) diff --git a/tool_security_encrypt_config_value.py b/tool_security_encrypt_config_value.py new file mode 100644 index 00000000..7abb6356 --- /dev/null +++ b/tool_security_encrypt_config_value.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 +""" +Tool for encrypting configuration values. + +This tool allows developers to encrypt secret values for use in configuration files. +It supports both text and JSON values and automatically determines the environment. +It can also encrypt all *_SECRET keys in an environment file at once. + +Usage: + # Encrypt a single value + python tool_encrypt_config_value.py --value "my_secret_value" --env dev + python tool_encrypt_config_value.py --file "path/to/file.json" --env prod + + # Encrypt all secrets in a file + python tool_encrypt_config_value.py --encrypt-all env_dev.env --env dev + python tool_encrypt_config_value.py --encrypt-all env_prod.env --env prod --dry-run + + # Decrypt a value (for testing) + python tool_encrypt_config_value.py --decrypt "DEV_ENC:encrypted_value" +""" + +import sys +import os +import json +import argparse +import shutil +from pathlib import Path +from datetime import datetime + +# Add the modules directory to the Python path +sys.path.insert(0, str(Path(__file__).parent / 'modules')) + +from shared.configuration import encrypt_value, decrypt_value, _is_encrypted_value + +def find_secret_keys_in_file(file_path: Path) -> list: + """ + Find all *_SECRET keys in an environment file that are not encrypted. + + Args: + file_path: Path to the environment file + + Returns: + list: List of tuples (line_number, key, value, full_line) + """ + secret_keys = [] + + if not file_path.exists(): + return secret_keys + + try: + with open(file_path, 'r', encoding='utf-8') as f: + lines = f.readlines() + + i = 0 + while i < len(lines): + line = lines[i].strip() + + # Skip empty lines and comments + if not line or line.startswith('#'): + i += 1 + continue + + # Check if line contains a key-value pair + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Check if it's a secret key and not already encrypted + if key.endswith('_SECRET') and value and not _is_encrypted_value(value): + # Check if value starts with { (JSON object) + if value.startswith('{'): + # Collect all lines until we find the closing } + json_lines = [value] + start_line = i + 1 + i += 1 + brace_count = value.count('{') - value.count('}') + + while i < len(lines) and brace_count > 0: + json_lines.append(lines[i].rstrip('\n')) + brace_count += lines[i].count('{') - lines[i].count('}') + i += 1 + + # Join all lines and create the full JSON value + full_json_value = '\n'.join(json_lines) + secret_keys.append((start_line, key, full_json_value, line)) + i -= 1 # Adjust for the loop increment + else: + # Single line value + secret_keys.append((i + 1, key, value, line)) + # Check if it's a secret key with multiline JSON (value is just "{") + elif key.endswith('_SECRET') and value == '{' and not _is_encrypted_value(value): + # Collect all lines until we find the closing } + json_lines = [value] + start_line = i + 1 + i += 1 + brace_count = 1 # We already have one opening brace + + while i < len(lines) and brace_count > 0: + json_lines.append(lines[i].rstrip('\n')) + brace_count += lines[i].count('{') - lines[i].count('}') + i += 1 + + # Join all lines and create the full JSON value + full_json_value = '\n'.join(json_lines) + secret_keys.append((start_line, key, full_json_value, line)) + i -= 1 # Adjust for the loop increment + + i += 1 + + except Exception as e: + print(f"Error reading {file_path}: {e}") + + return secret_keys + +def backup_file(file_path: Path) -> Path: + """ + Create a backup of the file before modification. + + Args: + file_path: Path to the file to backup + + Returns: + Path: Path to the backup file + """ + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_path = file_path.with_suffix(f'.{timestamp}.backup') + shutil.copy2(file_path, backup_path) + return backup_path + +def encrypt_all_secrets_in_file(file_path: Path, env_type: str, dry_run: bool = False, create_backup: bool = True) -> dict: + """ + Encrypt all non-encrypted secrets in a file. + + Args: + file_path: Path to the environment file + env_type: The environment type + dry_run: If True, only show what would be changed + create_backup: If True, create a backup before modifying + + Returns: + dict: Results of the encryption process + """ + results = { + 'file': str(file_path), + 'env_type': env_type, + 'secrets_found': 0, + 'secrets_encrypted': 0, + 'errors': [], + 'backup_created': None + } + + # Find all secret keys + secret_keys = find_secret_keys_in_file(file_path) + results['secrets_found'] = len(secret_keys) + + if not secret_keys: + return results + + print(f"\n📁 Processing {file_path.name} ({env_type}):") + print(f" Found {len(secret_keys)} non-encrypted secrets") + + if dry_run: + print(" [DRY RUN] Would encrypt the following secrets:") + for line_num, key, value, full_line in secret_keys: + print(f" Line {line_num}: {key} = {value[:50]}{'...' if len(value) > 50 else ''}") + return results + + # Create backup if requested + if create_backup: + try: + backup_path = backup_file(file_path) + results['backup_created'] = str(backup_path) + print(f" 📋 Backup created: {backup_path.name}") + except Exception as e: + results['errors'].append(f"Failed to create backup: {e}") + print(f" ⚠️ Warning: Could not create backup: {e}") + + # Read the file content + try: + with open(file_path, 'r', encoding='utf-8') as f: + lines = f.readlines() + except Exception as e: + results['errors'].append(f"Failed to read file: {e}") + return results + + # Process each secret key + for line_num, key, value, full_line in secret_keys: + try: + print(f" 🔐 Encrypting {key}...") + + # Encrypt the value using the existing function + encrypted_value = encrypt_value(value, env_type) + + # Replace the line in the file content + new_line = f"{key} = {encrypted_value}\n" + lines[line_num - 1] = new_line + + # If this was a multiline JSON, we need to remove the remaining lines + if value.startswith('{') and '\n' in value: + # Count how many lines the original JSON spanned + json_lines = value.split('\n') + lines_to_remove = len(json_lines) - 1 # -1 because we already replaced the first line + + # Remove the remaining lines + for i in range(line_num, line_num + lines_to_remove): + if i < len(lines): + lines[i] = "" + + results['secrets_encrypted'] += 1 + print(f" ✓ Encrypted successfully") + + except Exception as e: + error_msg = f"Failed to encrypt {key}: {e}" + results['errors'].append(error_msg) + print(f" ✗ {error_msg}") + + # Write the modified content back to the file + if results['secrets_encrypted'] > 0: + try: + with open(file_path, 'w', encoding='utf-8') as f: + f.writelines(lines) + print(f" 💾 File updated successfully") + except Exception as e: + results['errors'].append(f"Failed to write file: {e}") + print(f" ✗ Failed to write file: {e}") + + return results + +def main(): + parser = argparse.ArgumentParser(description='Encrypt configuration values') + parser.add_argument('--value', '-v', help='Plain text value to encrypt') + parser.add_argument('--file', '-f', help='File containing the value to encrypt') + parser.add_argument('--env', '-e', choices=['dev', 'int', 'prod'], + help='Environment type (default: current environment)') + parser.add_argument('--decrypt', '-d', help='Decrypt an encrypted value (for testing)') + parser.add_argument('--interactive', '-i', action='store_true', + help='Interactive mode - prompt for value') + parser.add_argument('--encrypt-all', '-a', help='Encrypt all *_SECRET keys in the specified file') + parser.add_argument('--dry-run', action='store_true', + help='Show what would be changed without making changes (for --encrypt-all)') + parser.add_argument('--no-backup', action='store_true', + help='Skip creating backup files (for --encrypt-all)') + + args = parser.parse_args() + + try: + # Handle encrypt-all functionality + if args.encrypt_all: + file_path = Path(args.encrypt_all) + if not file_path.exists(): + print(f"Error: File not found: {file_path}") + return 1 + + if not args.env: + print("Error: --env is required when using --encrypt-all") + return 1 + + print("🔐 PowerOn Secret Encryption Tool") + print("=" * 50) + + if args.dry_run: + print("🔍 DRY RUN MODE - No changes will be made") + print() + + results = encrypt_all_secrets_in_file( + file_path, + args.env, + dry_run=args.dry_run, + create_backup=not args.no_backup + ) + + # Summary + print("\n" + "=" * 50) + print("📊 SUMMARY") + print("=" * 50) + print(f"File processed: {file_path.name}") + print(f"Secrets found: {results['secrets_found']}") + + if not args.dry_run: + print(f"Secrets encrypted: {results['secrets_encrypted']}") + print(f"Errors: {len(results['errors'])}") + + if len(results['errors']) == 0 and results['secrets_encrypted'] > 0: + print("\n🎉 All secrets encrypted successfully!") + elif len(results['errors']) > 0: + print(f"\n⚠️ Completed with {len(results['errors'])} errors") + else: + print("\n✅ No secrets needed encryption") + else: + print(f"Secrets that would be encrypted: {results['secrets_found']}") + + # Show backup information + if results['backup_created']: + print(f"\n📋 Backup created: {Path(results['backup_created']).name}") + + # Show errors if any + if results['errors']: + print(f"\n❌ Errors encountered:") + for error in results['errors']: + print(f" - {error}") + + return 0 if len(results['errors']) == 0 else 1 + + # Handle decryption + if args.decrypt: + if _is_encrypted_value(args.decrypt): + decrypted = decrypt_value(args.decrypt) + print(f"Decrypted value: {decrypted}") + else: + print("Error: Value does not appear to be encrypted (missing ENV_ENC: prefix)") + return + + # Determine the value to encrypt + value_to_encrypt = None + + if args.value: + value_to_encrypt = args.value + elif args.file: + if not os.path.exists(args.file): + print(f"Error: File not found: {args.file}") + return + + with open(args.file, 'r', encoding='utf-8') as f: + value_to_encrypt = f.read().strip() + elif args.interactive: + print("Enter the value to encrypt (press Ctrl+D when done):") + try: + value_to_encrypt = sys.stdin.read().strip() + except EOFError: + print("Error: No input provided") + return + else: + # Interactive mode by default + print("Enter the value to encrypt (press Ctrl+D when done):") + try: + value_to_encrypt = sys.stdin.read().strip() + except EOFError: + print("Error: No input provided") + return + + if not value_to_encrypt: + print("Error: No value provided to encrypt") + return + + # Validate JSON if it looks like JSON + if value_to_encrypt.strip().startswith('{'): + try: + json.loads(value_to_encrypt) + print("✓ Valid JSON detected") + except json.JSONDecodeError as e: + print(f"Warning: Value looks like JSON but is invalid: {e}") + response = input("Continue anyway? (y/N): ") + if response.lower() != 'y': + return + + # Encrypt the value + encrypted_value = encrypt_value(value_to_encrypt, args.env) + + print(f"\n✓ Encryption successful!") + print(f"Environment: {args.env or 'current'}") + print(f"Encrypted value:") + print(f"{encrypted_value}") + print(f"\nCopy the above value to your configuration file.") + + # Show usage example + print(f"\nUsage in config file:") + print(f"MY_SECRET_KEY = {encrypted_value}") + + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/tool_durations_from_log.py b/tool_stats_durations_from_log.py similarity index 100% rename from tool_durations_from_log.py rename to tool_stats_durations_from_log.py diff --git a/tool_getStats.py b/tool_stats_get_codelines.py similarity index 100% rename from tool_getStats.py rename to tool_stats_get_codelines.py diff --git a/tool_showUnusedFunctions.py b/tool_stats_showUnusedFunctions.py similarity index 100% rename from tool_showUnusedFunctions.py rename to tool_stats_showUnusedFunctions.py diff --git a/tools_security_generate_master_keys.py b/tools_security_generate_master_keys.py new file mode 100644 index 00000000..6ca35884 --- /dev/null +++ b/tools_security_generate_master_keys.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +""" +Generate secure master keys for all environments. + +This tool generates cryptographically secure 256-bit master keys for all environments +and updates the key.txt file with the new keys. + +Usage: + python generate_master_keys.py + python generate_master_keys.py --output "path/to/key.txt" +""" + +import sys +import os +import secrets +import base64 +import argparse +from pathlib import Path + +def generate_master_key(): + """Generate a secure 256-bit master key.""" + # Generate 32 random bytes (256 bits) + key_bytes = secrets.token_bytes(32) + # Encode as base64 for easy storage + return base64.urlsafe_b64encode(key_bytes).decode('utf-8') + +def main(): + parser = argparse.ArgumentParser(description='Generate secure master keys for all environments') + parser.add_argument('--output', '-o', + default='../local/key.txt', + help='Output file path (default: ../local/key.txt)') + parser.add_argument('--force', '-f', action='store_true', + help='Overwrite existing key file without confirmation') + + args = parser.parse_args() + + # Convert to absolute path + output_path = Path(args.output).resolve() + + # Check if file exists and get confirmation + if output_path.exists() and not args.force: + response = input(f"File {output_path} already exists. Overwrite? (y/N): ") + if response.lower() != 'y': + print("Operation cancelled.") + return + + try: + # Generate keys for all environments + keys = { + 'prod': generate_master_key(), + 'int': generate_master_key(), + 'dev': generate_master_key() + } + + # Create output content + content = [] + content.append("# PowerOn Master Keys") + content.append("# Generated on: " + str(Path(__file__).stat().st_mtime)) + content.append("# WARNING: Keep this file secure and never commit to version control!") + content.append("") + + for env, key in keys.items(): + content.append(f"{env} = {key}") + + # Ensure output directory exists + output_path.parent.mkdir(parents=True, exist_ok=True) + + # Write to file + with open(output_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(content)) + + print("✓ Master keys generated successfully!") + print(f"Output file: {output_path}") + print("\nGenerated keys:") + for env, key in keys.items(): + print(f" {env}: {key[:20]}...") + + print(f"\n⚠️ IMPORTANT SECURITY NOTES:") + print(f" - Keep this file secure and never commit to version control") + print(f" - Store production keys in Azure environment variables") + print(f" - Share development keys securely with team members") + print(f" - Consider rotating keys regularly") + + except Exception as e: + print(f"Error generating keys: {e}") + sys.exit(1) + +if __name__ == '__main__': + main() From ebfdd9ab039f63f9e0ca939e8e94f4be0b2554ab Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Mon, 22 Sep 2025 07:44:39 +0200 Subject: [PATCH 002/169] admin interface --- app.py | 14 + env_int.env | 32 +-- env_prod.env | 32 +-- modules/connectors/connectorDbPostgre.py | 10 +- modules/routes/routeAdmin.py | 256 +++++++++++++++++- modules/routes/routeDataConnections.py | 20 ++ modules/routes/routeDataFiles.py | 252 +----------------- modules/routes/routeDataNeutralization.py | 274 +++++++++++++++++++ modules/routes/routeDataUsers.py | 159 +++++++++++ modules/routes/routeSecurityLocal.py | 125 +++++++-- modules/security/auth.py | 115 +++++++- modules/security/csrf.py | 96 +++++++ modules/security/tokenRefreshMiddleware.py | 191 ++++++++++++++ modules/security/tokenRefreshService.py | 291 +++++++++++++++++++++ 14 files changed, 1526 insertions(+), 341 deletions(-) create mode 100644 modules/routes/routeDataNeutralization.py create mode 100644 modules/security/csrf.py create mode 100644 modules/security/tokenRefreshMiddleware.py create mode 100644 modules/security/tokenRefreshService.py diff --git a/app.py b/app.py index cade2e7c..02e05076 100644 --- a/app.py +++ b/app.py @@ -274,6 +274,17 @@ app.add_middleware( max_age=86400 # Increased caching for preflight requests ) +# CSRF protection middleware +from modules.security.csrf import CSRFMiddleware +from modules.security.tokenRefreshMiddleware import TokenRefreshMiddleware, ProactiveTokenRefreshMiddleware +app.add_middleware(CSRFMiddleware) + +# Token refresh middleware (silent refresh for expired OAuth tokens) +app.add_middleware(TokenRefreshMiddleware, enabled=True) + +# Proactive token refresh middleware (refresh tokens before they expire) +app.add_middleware(ProactiveTokenRefreshMiddleware, enabled=True, check_interval_minutes=5) + # Include all routers from modules.routes.routeAdmin import router as generalRouter app.include_router(generalRouter) @@ -290,6 +301,9 @@ app.include_router(userRouter) from modules.routes.routeDataFiles import router as fileRouter app.include_router(fileRouter) +from modules.routes.routeDataNeutralization import router as neutralizationRouter +app.include_router(neutralizationRouter) + from modules.routes.routeDataPrompts import router as promptRouter app.include_router(promptRouter) diff --git a/env_int.env b/env_int.env index d6d1be1d..5677525e 100644 --- a/env_int.env +++ b/env_int.env @@ -10,25 +10,25 @@ APP_KEY_SYSVAR = CONFIG_KEY DB_APP_HOST=gateway-int-server.postgres.database.azure.com DB_APP_DATABASE=poweron_app DB_APP_USER=heeshkdlby -DB_APP_PASSWORD_SECRET=VkAjgECESbEVQ$Tu +DB_APP_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8wTnBISUNVLWVobHJzX0xtS0pMcV9neXY3S05qc1F6RU9SRTdHM2F2VW1ldVlMYU9zRTU2OE9QTDBmcGRjN3ZUb1dobGZrUHZrR2EyWURtUXRYWk5MTExMVUJxY01yaFBTWFE4OTlHNHBsWHFSUnc9 DB_APP_PORT=5432 # PostgreSQL Storage (new) DB_CHAT_HOST=gateway-int-server.postgres.database.azure.com DB_CHAT_DATABASE=poweron_chat DB_CHAT_USER=heeshkdlby -DB_CHAT_PASSWORD_SECRET=VkAjgECESbEVQ$Tu +DB_CHAT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIVmhCSEtCcDF6dXBCSkVzOTdaNUZVOUgtZ2JQQ3lMUjVKdUgxbnBkZHdPSFE5amNWTzhKNW4zcV9QSFdNakFVNXRVcDVlTnd4Tm51QjA2MTVVMVY1b3dBZHhQZXZLdUlsc3lBektKRjhIUXc9 DB_CHAT_PORT=5432 # PostgreSQL Storage (new) DB_MANAGEMENT_HOST=gateway-int-server.postgres.database.azure.com DB_MANAGEMENT_DATABASE=poweron_management DB_MANAGEMENT_USER=heeshkdlby -DB_MANAGEMENT_PASSWORD_SECRET=VkAjgECESbEVQ$Tu +DB_MANAGEMENT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8wTnBISHA2OXVrWjhaQURZM3g4WGxiTmt3WW05WXBIRGVwNFNfdmphOGdUQ0ZCMUdFTlAzZlJTM2ZFaEhVWGRqNXBtREpTalItcDNxS1BJeEZKdWc0dWxHUm41QTBMZ3VqT3pHeFVmVUtJWE1YbTA9 DB_MANAGEMENT_PORT=5432 # Security Configuration -APP_JWT_KEY_SECRET=rotated_jwt_secret_2025_09_17_2c5f8e7a-1b3d-49c7-ae5d-9f0a2c3d4b5e +APP_JWT_KEY_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIVXVUQnhWcjhvVFhtTDl5T1M1SXZZdjZDY0tIa0hmbnRuanUweUdoQ04xNzhod3VscG44V0xlNldzY2t1MVE5UjVjUTdSRUU1N3VBUGNVN0ozU0o1akNBX0x0X1FNOGE0TE9paTh0ZEVnZmNTbGFnSjBpNTBXMTZxemJwWmRTdkJOWms4VVRieGpSM3VtaFY3Zmw0NlJTbVVfbDdwYldVYUlfbGVFUGhsajVZPQ== APP_TOKEN_EXPIRY=300 # CORS Configuration @@ -51,41 +51,29 @@ Service_GOOGLE_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/ # OpenAI configuration Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions -Connector_AiOpenai_API_SECRET = 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 +Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIS0RqLW13RThlbTNNYUdLa3pXeFVYVm5Mc0czREtRczRBSlVjcVJJcVpKU19kRUZTU2pqMGZFR2pHZnZ4TGdMeFJqbHl5aTYwa2pzcTlNZklnMUNIZHZwdGFuWFhGZDlkemI2cnJuRURBZVBmM3Fxbm91c0ZQai1UMGJSM29kanIzMFB4Z2x6QWcycVk2SzRHQXc2YmZRPT0= Connector_AiOpenai_MODEL_NAME = gpt-4o Connector_AiOpenai_TEMPERATURE = 0.2 Connector_AiOpenai_MAX_TOKENS = 2000 # Anthropic configuration Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages -Connector_AiAnthropic_API_SECRET = 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 +Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIN0pPeHE3SzFWbTNySU1NRThmcURKWWNiZ3pQLTlwSXZmd0JkTUxXb2VGTVIyeUhZb2JKRzJsQ1AwTlZBWl9RYkRaQkVoR3dxQkdGYUFmd0xRdW1jUGxXdjJPbDlDVTVtT1c3aldRVVNoWmRLd09TZW5xU1JOVHp1ZE5Za0xBODR1TlhMQ1ZiaEZ4Nm00QnpPSks4RGVxYUhqaGdvMWVwMzBKSTdIUEVXSE1XM1ZNUjNBWDAzLWxwLXlib29OV0pOV21MTkFpb0ZDLU5seHMyTldxSFdIZz09 Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022 Connector_AiAnthropic_TEMPERATURE = 0.2 Connector_AiAnthropic_MAX_TOKENS = 2000 # Agent Mail configuration Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c -Service_MSFT_CLIENT_SECRET = Kxf8Q~2lJIteZ~JaI32kMf1lfaWKATqxXiNiFbzV +Service_MSFT_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8wTnBIVXktVWJLTEdLSDd1MENKejQ2bzdCTUlTQ1ZELVJfSGhaeExkMjQ4N1dNVnhjZjRTMl83dlBqeEJCMHVabVpZVlQxRjhjQkRiOHdpMUNaODJqN0UtYW9GallJekY0U2RVZHpORXg4dThuc01uMy11ZGtDb01BQXc3TlE1ZXBjaU4= Service_MSFT_TENANT_ID = common # Google Service configuration Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com -Service_GOOGLE_CLIENT_SECRET = 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 +Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8wTnBJeVpuNWVraERfUFBaT3BDRVk0T21KcGdrYU9zNGNyRkljNDR2TnB6R291VGJJM3d4RnBHTVVXYTRCT1F1RGFRYnNTX0xTLXFqVHVHTnN0bG9LeHdEbFpZcUNIMXFWY0dJYko4U3FNSk5vUnY2ZWRWWFJLUjR5WkJrZmpMU0pxNGI= # Tavily Web Search configuration -Connector_WebTavily_API_KEY_SECRET = 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 +Connector_WebTavily_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8wTnBJV1BlRS1UaTZmZkVYZ2hQU0lBRXVEbDl3N3BFNVI4MlBsN2JRSHdrYV95SC1vdk1pMnQzNGRaQThrRy1HcEJyT1Y2OXdQcmw2Yk9KQ3RDRzRpamx2cFpkYkN5SjkzNVVmZnVaOWJnN0MwTGZMcVdRdU1jY2kwVGhNMXZQUG9kajA= # Google Cloud Speech Services configuration -Connector_GoogleSpeech_API_KEY_SECRET = { - "type": "service_account", - "project_id": "poweronid", - "private_key_id": "88db66e4248326e9baeac4231bc196fd46a9a441", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTnJuxA+xBL3LA\nPgFILYCsGuppkkdO6d153Q36f2jTj6zpH3OhKMVsaaTBknG2o2+D0Whlk6Yh5rOw\nkWzpMC3y81leRLm5kucERMkBUgd2GL4v16k6m+QGuC3BFlt/XeyuckJNW0V6v/Dy\n3+bSYM7/5o1ftPNWJeAIEWoE/V4wKCYde8RE4Vp1LO5YwhgcM4rRuPmF2OhekpA+\npteYwkY/8/gTTRpZIc8OTsBYRbaMwsjoDj5riuL3boVtkwZwKRb+ZLvupXeU7Ds7\n1305odTcZUwnImHiHfuq83ZJViQiLRNhUAFnQIXPrYLwEpCmzRBGzYHaRlb69ga/\nzqUbKnclAgMBAAECggEAH6W9qHehubioPMAJM7Y6bC2KU/JLNS4csBZd+idb52gG\nwBwIEFjR+H4ZjymhAA4+pe7c4h7MKyh0RI/l7eoFX98Cb+rEq/r1udm1BhGH3s2h\n2UiI8qRQh1YRjF2/nrN5VjhDBOFa6W9opaopZy/l8AzsT8f21zIgPen8z8o6GpFg\n64fJFcbqCGk2ykN2+x2pIOT04tmCszrfbXZP8LEs4xrUB/XwlHL1vT/M3EWIKbnj\njDaIMjw7q/KRgNUvmKS6SU9b3fnOLcQCz9f5cKdiWACKIU/UvuiWhWJ9ou6BWLWU\nva1A6Fi4XJjhW7s3po58/ioQfl0A9p/L92lGg4ST8QKBgQDx8LIM1g0dh9Ql6LmH\nBUGCOewNNXTs+y3ZznUfvVMoyyZK5w/pzeUvkmOwzbRGnZJ9WyCghq8aezyEpo2D\nPL7Odf988IeHmvhyZIM4PLJYgDvSwGXyf/gh6gJkf/4wpx+tx/yQYNBm3Rht7sA0\npSaLehK0E0kW1uyBzHGKgyQOhwKBgQDf6LiZ7hSQqh54vIU1XMDRth0UOo/s/HGi\nDoij29KjmHjLkm8vOlCo83e79X0WhcnyB5kM7nWFegwcM1PJ0Dl8gidUuTlOVDtM\n5u2AaxDoyXAUL457U5dGFAIW+R653ZDkzMfCglacP8HixXEyIpL1cTLqiCAgzszS\nLcSWwoAr8wKBgQC4CGm3X97sFpTmHSd6sCHLaDnJNl9xoAKZifUHpqCqCBVhpm8x\nXp+11vmj1GULzfJPDlE8Khbp4tH+6R39tOhC7fjgVaoSGWxgv1odHfZfYXOf9R/X\nHUZmrbUSM1XsNkPfkZ7pR+teQ1HA1Xo40WMHd1zgw0a2a9fNR/EZ9nUn4wKBgGaK\nUEgGNRrPHadTRnnaoV8o1IZYD2OLdIqvtzm7SOqsv90SkaKCRUAqR5InaYKwAHy7\nqAa5Cc73xqX/h4arujff7x0ouiq5/nJIa0ndPmAtKAvGf6zQ6j0ompBkxAKAioON\nmInmYL2roSI2I5G/LagDkDrB3lzH+Brk5NvZ9RKrAoGAGox462GGGb/NbGdDkahN\ndifzYYvq4FPiWFFo0ynKAulxCBWLXO/N45XNuAyen433d8eREcAYz1Dzax44+MdQ\nHo9dU7YcZvFyt6iZsYeQF8dluHui3vzMpUe0KbqpZC5KMOSw53ZdNIwzo8NTAK59\n+uv3dHGj7sS8fhDo3yCifzc=\n-----END PRIVATE KEY-----\n", - "client_email": "poweron-voice-services@poweronid.iam.gserviceaccount.com", - "client_id": "116641749406798186404", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/poweron-voice-services%40poweronid.iam.gserviceaccount.com", - "universe_domain": "googleapis.com" -} +Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8wTnBJSDh5aW9CNE04dDVsYnBUNWdGUWhDMmFlNmY2bnl5X1llVnNZV3VGakI5RFFNYVprYm5mU0F6TVZ5NDZkYlVhMGpzM0RPNGFmNDdvV1c0Y2hUQnowYzRmREhwRk5fMVVnejlGR0Y3V1pVemtFbEZEeTFEOUptbThaSHJJeGtwWGZZQ0VLYkpaTGRXMVFxX0hRX2treG1ES2VheTdsR1U3eUxYV0xPbzExSDZzOHBQR0FSdGh2V0hXRFpRLW52ZlJyMTBDR1VkVkh4VU52MWVwMDdxYlBfbjlMeTd5M0FIVGtaT1c3WmpwZjh6Skp2djB2cXM5NkdOS29ONzdaNGk0WlpzTlJxU2cwQWxTd25XYmllbjJXemQxY0ljZkZqZFV3MEhucXNfYUR3T3diWEFyLS1WQmRiZEJXbERuQXhXanZQUDBJZFphZGk5aHFTQVRkM3B5QllYZ0Q0V19VRlRtVThEb09TWGFHVHRKc0R3eGRoYWpkT0xRbXhGb0pFYUk3MXBGekV6WDdzekMtaU1JNlNaaXdQa19keUotSDJkZDNQTVpZQjlxLWhwRWIta05YR0sxTXRVS1ZLaHRJM1IwUTMtQlUtbHU4dmVfQjdsY1Y3ODFSZXBiQUJIdFNrR3dGelkzWjhQaXR1NlFIYm1KMFVNMmlMcGQtRE1zNmx0ZTRuVUhVRUFuNUEwTnNNSTBnaTRtaVNOT0lLQTR3U01SOHNjZkdOQ2VXQXBuZ3k3Q3NjbDh1dU5fWXVkN0pvNmxZWlQyaVVLNEFEN3dxRkV5NUU1dG5kdGxieXo3WGhIalAzWjQ0TFNLRFFVZkJFRUNjQW1xNWdUUHRTOG0xVklmd09NdGd4SURxdmI2UXU3U01PVDZSM0lhbDFQUjZobkl3VC10eThuV3BSV0l3Nkw5X1dVN1RhYkdqb0ROempfQ2xjc0lQemtaSGNkMjJjR1hjN0V3NFhta2l1MVRGeG9PekdhM1V6NGpCMG5yYmZJb1BmdndyMXdpOGdSSldmRFg0UlZSX3EyTVN3ckotaGJLbU5EMG1jYnY0VmtFNk14dzdzWVloVFhWMkQxNDlmc3QwSWJZV2ZaU3J2NlNkdHlyQUVXUTNXczJZMzBua0Zmbl9MYWxSSi1QdUowQkdINWJIZFNoUlY5V2NYYjFva3A3OHZ6MEd6MGRvNkJrQjNKa21FcHI2Y3pfTWQ2TUFzUEp5M1FZazVUSUVUTnlkQ2U1RVU3OFdYYXE0S3QwT210a1c5aVlPNDhET3JBeUFHeG41MDM3aUdXVWN6TUtUMi01aGJOWGN4WTZDN29WNl9SdGRtR0gzRzJFZmhZa3p6UUpoVEMyb196aFdUVE1nTDNDSkJuN1lsSWlMWlBVS3VhclRxd1ZhWXhNUDZRa0Jlb3N5UkhiZ2pYc29ZZm55bWFZa09DZ2lvZmE4YTRoYmJJREh0ZXMtSkN3MkJBNDlJQTR1MTlEVTFQTUFJMENCQmFCeGtXYlJJVmtSUjBuNXBDa0wtVTJuVk8tVGk2dWxmeEgyV3pkOFdDU1JhRnowLU9EbWZhYWkyZVRfSDVJWDdtd0l0TF9OQ19NRi1tUnAxdHg5a0dFSDY5RzFsR1NiV0p3VG5DckNyREVjcWQ0elV1aktKNlJkNXdIVnpXY3U1bTBUbVJ5a3VucDdualg2cU1rZkEtOWpWa2tGU0puNUpNUzVaV3Y4UmhiZWhLTkdzS0h5NkxkcmNLblB2dG9lb2xYYXlqZkZiM3ZRTFVtM1VwOFFGQ1QxWFh2cUlhMFFyME5rSEJwLW5IUS1pRmNpVXVWYUR3emg0N2lDXzlFN1NnRk9ab0lLaEVvaV9FcEVfR0VBUzRkWG9KUm1sRk9DcDEtSGQzMFFXLUt3QnBpaV9fV2lQVExXSDcwc0E1ejE5SWd5c0NnQTlyWlBuVFNCeFpxN0M4M3kxaHN2RmJXekxiNy0zTEN0N2daLWpERFJ3SFFMUk11N25mRVk0MHlyMHE1d1NDbFpFaTBGSDFFcksyYVJZUEdKemtNWE9qbDBfMEpyaVVaSFdOZWpodGt4N0g5TGN1NVRQUm41cTNxRWdyTDFjd0xTSEZibUt3R0pISkpmRzlSVEYzeWNUdm5qUEZPVlVJX1d5MGpxUWNjTzFMaGlEVG9GY3RhUWZiMFpsbVR2OHNLaUs4ZkFENmNoRFRyd290a1FFZng1ckczaWxMMEVsZ3dBb0ZXdlh2YzJxTlhwZkJTV2VBandjWFB3MjJrRTR4LUUxV2lCRTdGYjNoeVhZTUx3RjFRNlFoY1VYMHRyTmlxdm9jUjAtWndLQ1RNcENDclh4TkR3ZE9tSjFOaGxIcmtWQ2ZIaHRabGNJQVI3RnZHUGtBRWt3YmpuUUhrT0VRTUxfVFZOSWZ1Z25IWUsxVDIwZEY0blQwbEdXY3hETW41UldqcW0zMUNCVHNDZkRyTGlrVlU3c0lWdFpvUzlfTGtLMGxJZUg4dUdjTU01VUtYaVQtSFBqT2F4NXhEUUlBRU1lSU54dzFhd3d2UjUxb3JXSUdQbVRyUTFlc2Z5WkNGWlNzVTc5aWllcGsxbzRmYVlFTWw4VVVtTDdkczdzQ3NFSGMzdVltSjlfY3dyNzlPaEk1cE5jdk4weDFKc3BTUXpPbDI5Y1ptblp0TGJ5UGwzVmU4VEtWUTZEQlRtemp1YnppdDdkSHpyY0c4NlZqSmI1UVBwSTJSZ1NNcEQwRTdySGRySi1XUkhWTHlaajFZSkQwc1k5NGZDZUhzRFdBZXNqSVdwU0ZsMGlNLUZPSU1OT243N0RFQ091RjVyaVRBdEUtOWlfSTdpX25laHNwVFlFU0RjWE42amhxSHlvcDlDdE13Y3JtWFNsQkZoclVFS2hFblQ3MXpxOHJsUGMwbkVmT3ZMMFZiNmpHZVRCa2k0YTJnNHM0dWpyQXJaYjU1Z1hNWGU2aU1hY3RxYWVzU1hVWWI4bjR2Y0R1MmF0NDVlc08xTmVwQ01ENThUMC1kWk9SWG9IWWFDZ1V1RGhDY0pjVEZBUGJreXh3RTRuQUlTWVR4NlVuaGozbmJVTXNzRDdrMGZaclpsb290WmFaTzF0NThITENXN1JSN25JS18yWnpJY19maVlXN21QTWE5M1FhWGJqTFQ0RTZoY3dlcEM2Q0gzQ1RtYk9jTnVkU1drVnhZc1NkMm82c25CWkpaTW5KbTd5dGJHSHhhQzR2TFk1NW9pX2pQNDdOeVhGZ0swTklYUkRZbmdsVzNabTZjWDRrX3BCMC1OT0M4R0dMTWlhQXhIMXJVT0Z0bmp2aTBFWnRDSFYySVFULXRneFpBQ0ZwdEZaTHAyeWx3bFF0Q2o5YzlfbUQ5Xy1uWjFKQXVVTE9Qa2VPNDZmQ0kzaUxBNG9EdGpyZm9VOEZhTC05V2JBMU1KTmt6RlY5aUpILVE5bExnVkdCcDlLZExBeHRSWWxQS1pkcE9BWjdXeV8tb1ltcTVoalQwLTl5Mk5GanphVGxKUzlJUjk0Y1g2QXBSRzBSNkkzTjg3bTFlYU5XbE0wYkNacC05bzVFN3F4NXdkUFBqQU9QYWVtdUM1MWtNR1RzbzBQVHJGR1NzTEV5TzdCUXRmcXJuTUVCVV9QbXFXcG5QdmJfQWlVMXRvU1hOakpHandKbnFhZTlqUWtWbXg3MzFpS2I5SlJzN0dQN0JuaGNKSWtVX0Vxc1FPUk4zMzVTMVIzSWJYcUVYU3Jldm01bHpleU90MlU3XzhhVWI3a3pqajRjd0g3YVppY3MxNUdKU1lUZFhyUnZDYXJWcnltQ2tadEFxdG92aUI3MU96WnFBdXdLRWZkMFVtc0N0SDVJc0RYdjhJZVhVWXZfb2VDY3NxVEdZc05tRWNVZTRUVHphT1RyWlRoX25iNXlLX0pDeXZrbXR1VkxnUTlZOVpUak5GYzVUODZvVmtKNHo2QUg3b2pSZmNhY1lUUVA3WXktMjYxWjBiTE5XeWJlQ0VhY2VyVnRxd3hvUWxtSjhCSGNkOHFLRTF4QmpLc0FoYzg0b2xRZXZpVTZ5TENXNXdiTEZNV01nSXpPR2U4ZlZzXzZZeGdmSG5kN2hLeDR6WU0zWExxUXJBcXR6NmdZb0ZiZHRPSFVuRUVyZV9zOUJsN09GWE5YM25FRlNoLTlNNWVMcTctb2l1Mzdyb0Y5TDU1YjdnQVUwSFoxXzF0VFh3TGktbFl2bVQ3NzdkRHV3SlNfaEZjY1ItYUk0OHhLRk9rUXdqNUVTdGNBc1ZCY1pFQ0N0WE45MUEwUlpwejdrcDNBPT0= diff --git a/env_prod.env b/env_prod.env index ab046d57..62ce7253 100644 --- a/env_prod.env +++ b/env_prod.env @@ -10,25 +10,25 @@ APP_KEY_SYSVAR = CONFIG_KEY DB_APP_HOST=gateway-prod-server.postgres.database.azure.com DB_APP_DATABASE=poweron_app DB_APP_USER=gzxxmcrdhn -DB_APP_PASSWORD_SECRET=prod_password_very_secure.2025 +DB_APP_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMd0FKLUpzaTdYT0Zia2V3VExPSktfTUx6RmRDc1hobjhYamxyMTkxakhDeGVHRTA3TmVoNC1Mamh0elFiV0h5MnA3YmpheXRzLVdhN2Ytb2R4a1NiSWY0RlFQMXlJU2hUMFY1RGJ1dEdRTFE9 DB_APP_PORT=5432 # PostgreSQL Storage (new) DB_CHAT_HOST=gateway-prod-server.postgres.database.azure.com DB_CHAT_DATABASE=poweron_chat DB_CHAT_USER=gzxxmcrdhn -DB_CHAT_PASSWORD_SECRET=prod_password_very_secure.2025 +DB_CHAT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMM3p1TEY3VTQxT0xrbW9fbHFJLXNDZHJUOVBSUHhhdURpMi1EZTZIaXQ0M1V5ZUZFQVhjSGF5SUVzTDNrWW11UlNQQVhwNEU0al9yZXQxSnRIU1U0akRDbFVIUHVvUV9SMkFkaEFGR1ZVUjA9 DB_CHAT_PORT=5432 # PostgreSQL Storage (new) DB_MANAGEMENT_HOST=gateway-prod-server.postgres.database.azure.com DB_MANAGEMENT_DATABASE=poweron_management DB_MANAGEMENT_USER=gzxxmcrdhn -DB_MANAGEMENT_PASSWORD_SECRET=prod_password_very_secure.2025 +DB_MANAGEMENT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMRDJRY19uM1hTNC1mMzhVaGNtamtScGpVYTY3RUdBTlpTTDdrUF9PdF84WkFSakRoX0VEcGhwanBPSU9OUGJNWXJDblVUS0o0Y0FBd0hMejUyTXFJTFVCaUJmTkpVYVQzWXFRSDV2d1lENHM9 DB_MANAGEMENT_PORT=5432 # Security Configuration -APP_JWT_KEY_SECRET=rotated_jwt_secret_2025_09_17_prod_e1a9c4d7-6b8f-4f2e-9c1a-7e3d2a1b9c5f +APP_JWT_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMX2lyNHVQVVkzamE1eURGMkRoVmhJTTVSTEQ1c3E4XzlucExfdUNxTHNwazB2X1h4YzdUeDhsYWNCbUZ5VjJNVTZDYlY2dGhreTg5UGV2Z3A4X1FTc094XzhxdWRNSzBXd20yY3pFNkpUYzhaeml5ME9OMjFkNjZMQkdvczZnWTVYX09fR0RYQXhpVHFPQnA2cWh1T3pqTFVieXpHV1hlUjVQdWRCSEc1bk1ZPQ== APP_TOKEN_EXPIRY=300 # CORS Configuration @@ -50,41 +50,29 @@ Service_GOOGLE_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google # OpenAI configuration Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions -Connector_AiOpenai_API_SECRET = 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 +Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMSlhwejcyRl9EUWpPX3M5bnI3QTNiRDd1QXVaVkFCczBzeUczcHhyenJvRDN0SDZGaHp6dGJqNjNiLW9oTjJPZGV1b0VxWElfT29jQ19vNWF4aG11bkRlS1JMa1VoeG82VWVmWkV0VDZUWTFmcXZXYUh6ZWs0bEswNXhhZ1ZEU1JNYk1jU0p3YVZkZmZVWmF4dURDcGR3PT0= Connector_AiOpenai_MODEL_NAME = gpt-4o Connector_AiOpenai_TEMPERATURE = 0.2 Connector_AiOpenai_MAX_TOKENS = 2000 # Anthropic configuration Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages -Connector_AiAnthropic_API_SECRET = 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 +Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMaEFnaHBDYndpTkZJSFp5OGdmY2xtNDZEZmFmbk1rUUQ2STZCQlprMjRhY3BLdkhTWWdDRlIzcm94NE5LZ2dCdlNkdWpkVVk2QnIzTzQ5TGEtX2p6a2kzeF9PR3QtNWs4aWFKX1ozUTNYT09sMkJNb1JMRk1vbTE0U0Y2eU1SUjhwY3Z2TWIyU2d4Nk1iS2d0YkRKUm0wNjNEbWNxYTg3SGNnU3FMSzVtYjhLVnhxbXd1SmZyam9QSGtna1dkSGlpeENEREZQck1tZk4tTkJvTERTcjZSdz09 Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022 Connector_AiAnthropic_TEMPERATURE = 0.2 Connector_AiAnthropic_MAX_TOKENS = 2000 # Agent Mail configuration Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c -Service_MSFT_CLIENT_SECRET = Kxf8Q~2lJIteZ~JaI32kMf1lfaWKATqxXiNiFbzV +Service_MSFT_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMMXV1OE5qODFrcGJqVEt2Zlk1TkFyQ3VoMzVad21UcTgwSXJqRjdiWmdsS0J3VWRBWWg4WWllNzE5X21ubGItMl96b0hZYTlXbVBkTmVhQVRadGlnWUlWQWdOZUV2U0pDSDdiWEhMNHJQUVllYzFpWFNJUnY0M0FpZ1ZWcExyWmk= Service_MSFT_TENANT_ID = common # Google Service configuration Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com -Service_GOOGLE_CLIENT_SECRET = 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 +Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBMYmNCSXM5cnRBVUxlYm83VG11MlBGZHhiV2hWOWxWYk5XRk1hSmhsTGdsX2dHSGhxYk5FWEpEbXdQM3hCNE1nRjZHNjlDb0RMWTIwb2pqczdocjFkSWxfYWlLOU9KbmtUcTl1SmZJZUh2V1RwM2kzVkZhRFIyTERsaThXYS1OVFk= # Tavily Web Search configuration -Connector_WebTavily_API_KEY_SECRET = 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 +Connector_WebTavily_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBNVU9JZEcwUWFuQ0lfRElGdDRhSFJDNVVBNUhBVzVKQlhBZXNsUDluRXYyV1NuaWw3eEJMdnhscGNZNW5KVmgtMzNfSGRfX1RMZlB5SmtHSzNTMC1RUlp1c2dqOWhSVnhuVUVGVUlaak16ZjlpWW00OFVIRFU1aEZXYzNaN3VNS1I= # Google Cloud Speech Services configuration -Connector_GoogleSpeech_API_KEY_SECRET = { - "type": "service_account", - "project_id": "poweronid", - "private_key_id": "88db66e4248326e9baeac4231bc196fd46a9a441", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTnJuxA+xBL3LA\nPgFILYCsGuppkkdO6d153Q36f2jTj6zpH3OhKMVsaaTBknG2o2+D0Whlk6Yh5rOw\nkWzpMC3y81leRLm5kucERMkBUgd2GL4v16k6m+QGuC3BFlt/XeyuckJNW0V6v/Dy\n3+bSYM7/5o1ftPNWJeAIEWoE/V4wKCYde8RE4Vp1LO5YwhgcM4rRuPmF2OhekpA+\npteYwkY/8/gTTRpZIc8OTsBYRbaMwsjoDj5riuL3boVtkwZwKRb+ZLvupXeU7Ds7\n1305odTcZUwnImHiHfuq83ZJViQiLRNhUAFnQIXPrYLwEpCmzRBGzYHaRlb69ga/\nzqUbKnclAgMBAAECggEAH6W9qHehubioPMAJM7Y6bC2KU/JLNS4csBZd+idb52gG\nwBwIEFjR+H4ZjymhAA4+pe7c4h7MKyh0RI/l7eoFX98Cb+rEq/r1udm1BhGH3s2h\n2UiI8qRQh1YRjF2/nrN5VjhDBOFa6W9opaopZy/l8AzsT8f21zIgPen8z8o6GpFg\n64fJFcbqCGk2ykN2+x2pIOT04tmCszrfbXZP8LEs4xrUB/XwlHL1vT/M3EWIKbnj\njDaIMjw7q/KRgNUvmKS6SU9b3fnOLcQCz9f5cKdiWACKIU/UvuiWhWJ9ou6BWLWU\nva1A6Fi4XJjhW7s3po58/ioQfl0A9p/L92lGg4ST8QKBgQDx8LIM1g0dh9Ql6LmH\nBUGCOewNNXTs+y3ZznUfvVMoyyZK5w/pzeUvkmOwzbRGnZJ9WyCghq8aezyEpo2D\nPL7Odf988IeHmvhyZIM4PLJYgDvSwGXyf/gh6gJkf/4wpx+tx/yQYNBm3Rht7sA0\npSaLehK0E0kW1uyBzHGKgyQOhwKBgQDf6LiZ7hSQqh54vIU1XMDRth0UOo/s/HGi\nDoij29KjmHjLkm8vOlCo83e79X0WhcnyB5kM7nWFegwcM1PJ0Dl8gidUuTlOVDtM\n5u2AaxDoyXAUL457U5dGFAIW+R653ZDkzMfCglacP8HixXEyIpL1cTLqiCAgzszS\nLcSWwoAr8wKBgQC4CGm3X97sFpTmHSd6sCHLaDnJNl9xoAKZifUHpqCqCBVhpm8x\nXp+11vmj1GULzfJPDlE8Khbp4tH+6R39tOhC7fjgVaoSGWxgv1odHfZfYXOf9R/X\nHUZmrbUSM1XsNkPfkZ7pR+teQ1HA1Xo40WMHd1zgw0a2a9fNR/EZ9nUn4wKBgGaK\nUEgGNRrPHadTRnnaoV8o1IZYD2OLdIqvtzm7SOqsv90SkaKCRUAqR5InaYKwAHy7\nqAa5Cc73xqX/h4arujff7x0ouiq5/nJIa0ndPmAtKAvGf6zQ6j0ompBkxAKAioON\nmInmYL2roSI2I5G/LagDkDrB3lzH+Brk5NvZ9RKrAoGAGox462GGGb/NbGdDkahN\ndifzYYvq4FPiWFFo0ynKAulxCBWLXO/N45XNuAyen433d8eREcAYz1Dzax44+MdQ\nHo9dU7YcZvFyt6iZsYeQF8dluHui3vzMpUe0KbqpZC5KMOSw53ZdNIwzo8NTAK59\n+uv3dHGj7sS8fhDo3yCifzc=\n-----END PRIVATE KEY-----\n", - "client_email": "poweron-voice-services@poweronid.iam.gserviceaccount.com", - "client_id": "116641749406798186404", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/poweron-voice-services%40poweronid.iam.gserviceaccount.com", - "universe_domain": "googleapis.com" -} +Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8wTnBNR2pBeUR6NmFTcFZkQmZnSmlweEpfQ1RSNnFobHZ2N0Z5Q1luRVJWSmx1WFJTMlloX2hROVQ2TjlvRTJQWjVuT1F4WjJmUVdCbTEyYTQxY2tjSXA2S2Q2NTN6RnRiQmxTTWdBeC1GdnNZUV9KWU1IQVFaQnc2VmJFSHVPVDFhaF9VYUJjWS1MRTlNSEkzUl9lTDZjX016MHZKSVE0bUNjZ3Q3Ni0zOXNfMmQxYUhTcnRzSDdQSUlyUExjUHpYQTF2cG9CS2dZajJLUzZUS09JeGRDRkdPd3Y4VFhWaS1DY3FOc0hQWXhNNVc1LTI4RjdKYjktWXc1X3hVUmg5VnhQX3BuQzJzZ0ZnbUVLNHZHWTRqNjI3VDlrNDlMLWNFVl9sVFd5cm0xbjlXa3VMOEthdXdWTkFPR2ZjQm5ReGNqbFN4NTg5NTVqcmlETGoteWhqLVEwSTNBQTN0Y2ptc0JBd1BabjUwaml0NVllOF90ejhObkR0VHdIU28yRmY1QWk1VktURW1DNWZPR2FnQ2dqS3lWSU8yT3ZDN3J5Y2FzRmJOOFNTWGZhUXNzbmFKOXdxakRfekFyZEFSQlF1QnJIMF9idktTQlp5MWI3eklrOHJPUmVxOFRyWmJNSGVNXzhDSTkwd29tSTJhTVM0T3lMYWhQZzEyd0RYX21NVVFTMm5JMHVpOFFfSHdkc3RZX1A2czVtRGwzU2RZUTVYaEp1TGpfejNwLXdZY0pZQmotOXVGbGRxcnFNdU9XUEZKODJvWnA5TE1mYktjUGUtRkJJbHZuTF81em0wVTdVWi1QRGk1dG45cWR6QkZmRHd0WWRqZE9xR0FCRXktenhLYmNpY2pfYTU0bUtPYk5KWFVRc1E2M0dsSWVFWWJucXc4QnBBanRjZk5GbzVKXzVMX0ZzTFZNYUZXTm8zNjMtb1dEU0VkTmtaR2xPZmZXam5qTVIzMXJpeG5raXVJOHdBZXhXeTA1SjBJU0xHZjZubDk1Z2k3ZFltdzRQbzZZNlJfcHVHWjRzWXJIVlAyRVU1eVVYVzd0R0JoaU5DWEF6ZkdDeGZXdDFiMXpiTkEtTUtuMEt6ZFJyMG0yN2NFbDFna2V6dEl5cmJkS2liOHV5OUhBVGJRX29vVmk5VnptUEhoT01oOTlHeHZaUTVCbTQ3Z1ZkY3J0NnpPcl9JLXVYOXBCZVh6blNSVFJFVUJBT0lOckNyQjI4SlpBV283TUtBYWpZMWVpV1p2czQxZHlBYnJ6d2JUQjZ1WXRGdEdkYy1keHFjRG51N0NSN1c1ZEttTXRfSGZEbzhsV0p3aW1rbHJpS0pEaV9IYTRiVi1WLWF5TUJGMnhVZ3c4dUxiY2ljTXE0S0JJUWM2M21NemwtdlhxcTJta1ROYUpUTmVaLThyQWRxS2NpM0QtaVJyQVU3WWQ5V1kwdUp6dzlpbUxzekxwVGgtQl83MTdWeFoweUZLcFZwNnQ4cVdfMkw2MHBZUU1CSXJNMjk3YThpczRqWTBuZjlRSUJfMXQtdk1uZUVTMzVvNFNYekFmRFVJSG1ib3RzMHZiQXVqa0VWNzNid3RfTzNiTDJrU1VBY0xoWDV3Y21OMWpzUW5IZ25RRWZRT3ZwLVVESG8wUGNBcXFfWGZKUVZPLWVnNlRjaFRibmlrLTgwUmhRRWVNNFRUWjBjbjNHSFUydGY3NWNQSTc3NWlXY2s5U0lOQ2hUeWZiVXdVOV8wRjdFTXNzN25nSE9JMTJqNmxEMTN2U0N1TnRDWWJpMm9WM3FRbWY3bWZGY2huUURlQmdIdkRBNGhLWEl4M1hqNkotY1FBWm9xX2FJZVRBekZoaWx0R2k4eEF2T042dkV3cmVhMG42Q3NTM3dILTI3NEVadUpiUjU2cmxVeExMTlFpVzBfWmxmZVRXSU4xWmVhdTZqaVpmeWhwUjB1VE15SVFtMFhqUTVLOHd4dEVkQ2hiSk5nczJ5aUV1Vk85OW05YWJMYkdMSXRCZV9WLWxudEtUX0FKR0hDNjdMcEREMUlWMHJaV3RUcUpMLXlEU0Q2ZUhETVJycENlTjV2VktibXhTelJDcUFYWWRwV1VHbldtOFdHRlZJcDc2dzBXZGlJTnlSeU9mUzZHRzJNd25WS1FfdUpCRUd0NDdGaFoxVU41Qi1pM1ZQdHZ1THJBdkRMeVYwNGpkRWl2WWtrbnpoRWVFMzc2ME9WVUNNYXEtRnJKWnprekhxcHJVRVRTdHNlSE1ZdWFtLVRjWDJvVVFBTS01NXBjbGlNNC1Dcm1NaUdiUk1uUDFDdlJ0UFRlSkR0eDJwOWladzFxOFpoY1lUc3o1MmUzWm1MYUs3MjYzdk9KbWtrOWxDVWhlOTY5TlVEM210YnlmZXhnUzR6bTVHbi1IS3ZyX0lSc0FtOVBJV1BBeldJNDk1UUxaSW90UG5IY1hJZVhhWHQ3ZzQyd29YOU9DNEthWXdnRkx0aDB1LTBUN0VOUlBGZ2ZPLWY1UXdTQVRCRmhJQ090a2xUbzN6YXhuTmNqaWJpajR2Mm1Rd2lxelc3UjRSRm11dFBlVlRsTlRoRUtPYkJVbzh2Q2Y2MUhqZnNsb3E5cUhLV0hFRTlQVzcwVm5DZ283ekJPNDRxSi1neTZYR3E3UEdXT2kwLWVaZGxFUnlaazRHa01TSEtyRy15S3QydHBHcnlMZWx5Z2xqY1ZkTEVIMERIajl1dW5JZzY2NWVjV0JSb1NzSG5OeXNheFR6QmoybjlBeThHVkJtNHduaXlJYkVySlNYTDMzNWpneHlXeklvYWRvQzAzV1lDa2lzaU5CUEFNNVdLMEpWZ1BVbUhhNm1NX0k0ZVM1dFNFOXFaSkVuTXhFUHYtdF82bGxfcmFMMG9kTUtMOG9adWF3VFlYVnBTSjREbWRMX0pLR2ZHRHpIdGMzYW5OVUtzVGp3OXc5WDJfd0l2T0lVN2xvMjBzSHZSaWY1cDhTUmhVdDR0dWgtMGp4V2V2bDRQSC05cFdyNHBYaHF2dldwTVVNWExYZEZFQTlmaHFMald2LTdJX3lfRi1WbXhfXzRtUzNXVXBHUTBocFRST1h0akVZc1BleDc0aGxidS0zTjl1cTA1aEdWRXUzaXlNQUR4N0ctdkhISE9DSGl2amdyTFZ5bTFnakFrVndhQ0ZTYi1GcHM2TG5aU0xXUGhjMVRBaTRNYzBlcXdtYkdsQ0xwTmxmRzJpV3NnUkdMMDA5czFaUnR0R01TSmZqeHB3MGM0WkxpNzE2SXpSaFhENzh0OWU2M01JZ1pCWHhZdGN0aUJmS3lSWHNMWXhZNnVZSHlrYkFkNHlaVm1FcEwyTzRYN3dhdGItVEh6TS1NU2R6YkNhNjEwSEpwdzF1WUdtWldJQ2ZkUEhqM3VhTFpvWjhxQ2dpNlhpY0NTcTEycm9GQk9NM3Bla1F2REJRLV9ZYW1Dc19JZ1NFOW5yLUxCX0tvakE4ODhhOEh0SXA5anJjRGJ6d3dObzVfU3FYbW1kLWVCSThIWnl5TEh1OFlVTml5QTRDVlJPaU9mZTB0NjhSZjQtT0lpa1piWmwybUFQOURlOHBtQW9KdE5SZTJzR1A3MkJVS0xuZTRKSmYtcW5OT2M4Y3BWaUVYQUJTblpsR0xYckx1eklwNzh4ckpBNUxaMWlKd1dINjQ0UWUzejNPcnpMWHZHZTg4NlhyTnlhdXZSQ093QWktTnNLcU40aTBGUFFNUWFKcU1EZWRQalVxbEtIcENTWXl4NXRRLUkxYXhGeDkzY3pfcVgyUlFWa0dVVXNHU3dvOXRDZC1TYnZVS1d4SjdKT3NKTjRMbFZqUlJXSkRQQnNUM2VkbWRhaG1qMFBVQ1VxX0FNYnJmbFI1RjgxSjE4c25VNm1EOXpaanhQLXVJcXEyN2VaTExZMURzYThwdzh5NEVUY3Rab0plT0tWRU5rQmJvOTlWRlp3elRQSWd0LW5laGpQaFFtUzdZVzExSjQ3ZVowck82TjZjektjZzZ4NElGa2pIYjJHa29FR1BsSG5HZGlmd0xJU0RSUW96emZVSUNzSzNCTlNUcGNLQzVSaTRacWtOUWxwOTBLdkt5Q19PWDRONGp4bVNXZHd5ZmtHSVFfX0lCNFhrY2NVU0N1MllJOGtmcVFXQXpJRkdiX2dSVWRuakdldHZ5bEo2U21jQmVtcHpTaHlVZ0k4ZF9xemxMOU92bHllM2VVd1FPUDVRNC1DTFZ1U25yQnlrNm4yUi1qNTRtOU9OaVVYVzZ6Z28wT3lubDk1SF95Zm9rSWZsMVg5NnNDQTl1YzhRPT0= diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index c6b254e7..ce1cfe9b 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -598,18 +598,24 @@ class DatabaseConnector: tables = [] try: + # Ensure connection is alive + self._ensure_connection() + + if not self.connection or self.connection.closed: + logger.error("Database connection is not available") + return tables + with self.connection.cursor() as cursor: cursor.execute(""" SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' - AND table_name NOT LIKE '_%' ORDER BY table_name """) rows = cursor.fetchall() tables = [row['table_name'] for row in rows] except Exception as e: - logger.error(f"Error reading the database: {e}") + logger.error(f"Error reading the database {self.dbDatabase}: {e}") return tables diff --git a/modules/routes/routeAdmin.py b/modules/routes/routeAdmin.py index f895521c..60663598 100644 --- a/modules/routes/routeAdmin.py +++ b/modules/routes/routeAdmin.py @@ -1,4 +1,4 @@ -from fastapi import APIRouter, Response, Depends, Request +from fastapi import APIRouter, Response, Depends, Request, Body from fastapi.responses import FileResponse from fastapi.staticfiles import StaticFiles import os @@ -6,16 +6,14 @@ import logging from pathlib import Path as FilePath from typing import Dict, Any, List from fastapi import HTTPException, status +from datetime import datetime from modules.shared.configuration import APP_CONFIG from modules.security.auth import limiter, getCurrentUser from modules.interfaces.interfaceAppModel import User - -router = APIRouter( - prefix="", - tags=["General"], - responses={404: {"description": "Not found"}} -) +from modules.interfaces.interfaceAppObjects import getRootInterface +from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface +from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface # Static folder setup - using absolute path from app root baseDir = FilePath(__file__).parent.parent.parent # Go up to gateway root @@ -33,24 +31,79 @@ router = APIRouter( # Mount static files router.mount("/static", StaticFiles(directory=str(staticFolder), html=True), name="static") +def get_interface_for_database(database_name: str, currentUser: User): + """ + Get the appropriate interface based on database name. + + Args: + database_name: Name of the database + currentUser: Current user for interface initialization + + Returns: + Interface object for the specified database + + Raises: + HTTPException: If database name is unknown or interface cannot be created + """ + # Get database names from configuration + appDbName = APP_CONFIG.get("DB_APP_DATABASE") + chatDbName = APP_CONFIG.get("DB_CHAT_DATABASE") + managementDbName = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") + + if not appDbName: + raise HTTPException(status_code=500, detail="DB_APP_DATABASE configuration is required") + + # Map database names to their corresponding interfaces + if database_name == appDbName: + return getRootInterface() + elif chatDbName and database_name == chatDbName: + return getChatInterface(currentUser) + elif managementDbName and database_name == managementDbName: + return getComponentInterface(currentUser) + else: + available_dbs = [appDbName] + if chatDbName: + available_dbs.append(chatDbName) + if managementDbName: + available_dbs.append(managementDbName) + raise HTTPException(status_code=400, detail=f"Unknown database. Available: {', '.join(available_dbs)}") + @router.get("/") @limiter.limit("30/minute") async def root(request: Request) -> Dict[str, str]: """API status endpoint""" + # Validate required configuration values + allowedOrigins = APP_CONFIG.get("APP_ALLOWED_ORIGINS") + if not allowedOrigins: + raise HTTPException(status_code=500, detail="APP_ALLOWED_ORIGINS configuration is required") + return { "status": "online", "message": "Data Platform API is active", - "allowedOrigins": f"Allowed origins are {APP_CONFIG.get('APP_ALLOWED_ORIGINS')}" + "allowedOrigins": f"Allowed origins are {allowedOrigins}" } @router.get("/api/environment") @limiter.limit("30/minute") async def get_environment(request: Request, currentUser: Dict[str, Any] = Depends(getCurrentUser)) -> Dict[str, str]: """Get environment configuration for frontend""" + # Validate required configuration values + apiBaseUrl = APP_CONFIG.get("APP_API_URL") + if not apiBaseUrl: + raise HTTPException(status_code=500, detail="APP_API_URL configuration is required") + + environment = APP_CONFIG.get("APP_ENV") + if not environment: + raise HTTPException(status_code=500, detail="APP_ENV configuration is required") + + instanceLabel = APP_CONFIG.get("APP_ENV_LABEL") + if not instanceLabel: + raise HTTPException(status_code=500, detail="APP_ENV_LABEL configuration is required") + return { - "apiBaseUrl": APP_CONFIG.get("APP_API_URL", ""), - "environment": APP_CONFIG.get("APP_ENV", "development"), - "instanceLabel": APP_CONFIG.get("APP_ENV_LABEL", "Development"), + "apiBaseUrl": apiBaseUrl, + "environment": environment, + "instanceLabel": instanceLabel, # Add other environment variables the frontend might need } @@ -63,3 +116,184 @@ async def options_route(request: Request, fullPath: str) -> Response: @limiter.limit("30/minute") async def favicon(request: Request) -> FileResponse: return FileResponse(str(staticFolder / "favicon.ico"), media_type="image/x-icon") + +# ---------------------- +# Log Management +# ---------------------- + +@router.get("/api/logs/app") +@limiter.limit("10/minute") +async def download_app_log(request: Request, currentUser: User = Depends(getCurrentUser)) -> FileResponse: + """Download the current day's application log file""" + # Check if user has admin privileges + if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): + raise HTTPException(status_code=403, detail="Admin privileges required") + + # Get log directory from config + logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR") + if not logDir: + raise HTTPException(status_code=500, detail="APP_LOGGING_LOG_DIR configuration is required") + + if not os.path.isabs(logDir): + # If relative path, make it relative to the gateway directory + gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + logDir = os.path.join(gatewayDir, logDir) + + # Get current date for log file + today = datetime.now().strftime("%Y%m%d") + logFile = os.path.join(logDir, f"log_app_{today}.log") + + if not os.path.exists(logFile): + raise HTTPException(status_code=404, detail=f"Application log file for today not found: {logFile}") + + return FileResponse( + path=logFile, + filename=f"log_app_{today}.log", + media_type="text/plain" + ) + +@router.get("/api/logs/audit") +@limiter.limit("10/minute") +async def download_audit_log(request: Request, currentUser: User = Depends(getCurrentUser)) -> FileResponse: + """Download the current day's audit log file""" + # Check if user has admin privileges + if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): + raise HTTPException(status_code=403, detail="Admin privileges required") + + # Get log directory from config + logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR") + if not logDir: + raise HTTPException(status_code=500, detail="APP_LOGGING_LOG_DIR configuration is required") + + if not os.path.isabs(logDir): + # If relative path, make it relative to the gateway directory + gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + logDir = os.path.join(gatewayDir, logDir) + + # Get current date for log file + today = datetime.now().strftime("%Y%m%d") + logFile = os.path.join(logDir, f"log_audit_{today}.log") + + if not os.path.exists(logFile): + raise HTTPException(status_code=404, detail=f"Audit log file for today not found: {logFile}") + + return FileResponse( + path=logFile, + filename=f"log_audit_{today}.log", + media_type="text/plain" + ) + +# ---------------------- +# Database Management +# ---------------------- + +@router.get("/api/databases") +@limiter.limit("10/minute") +async def list_databases(request: Request, currentUser: User = Depends(getCurrentUser)) -> Dict[str, Any]: + """List available databases""" + # Check if user has admin privileges + if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): + raise HTTPException(status_code=403, detail="Admin privileges required") + + try: + # Get configured database names from configuration + databases = [] + + # App database - required configuration + appDb = APP_CONFIG.get("DB_APP_DATABASE") + if not appDb: + raise HTTPException(status_code=500, detail="DB_APP_DATABASE configuration is required") + databases.append(appDb) + + # Chat database - optional configuration + chatDb = APP_CONFIG.get("DB_CHAT_DATABASE") + if chatDb and chatDb not in databases: + databases.append(chatDb) + + # Management database - optional configuration + managementDb = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") + if managementDb and managementDb not in databases: + databases.append(managementDb) + + return {"databases": databases} + except HTTPException: + raise + except Exception as e: + logger.error(f"Error listing databases: {e}") + raise HTTPException(status_code=500, detail="Failed to list databases") + +@router.get("/api/databases/{database_name}/tables") +@limiter.limit("10/minute") +async def list_tables( + request: Request, + database_name: str, + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """List tables in a specific database""" + # Check if user has admin privileges + if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): + raise HTTPException(status_code=403, detail="Admin privileges required") + + try: + # Get the appropriate interface based on database name + interface = get_interface_for_database(database_name, currentUser) + + # Check if interface and database connection exist + if not interface or not interface.db: + raise HTTPException(status_code=500, detail="Database interface not available") + + # Get tables from database + tables = interface.db.getTables() + + return {"database": database_name, "tables": tables} + except HTTPException: + raise + except Exception as e: + logger.error(f"Error listing tables for database {database_name}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to list tables for database {database_name}") + +@router.post("/api/databases/{database_name}/tables/drop") +@limiter.limit("5/minute") +async def drop_table( + request: Request, + database_name: str, + currentUser: User = Depends(getCurrentUser), + payload: Dict[str, Any] = Body(...) +) -> Dict[str, Any]: + """Drop a specific table from a database""" + # Check if user has admin privileges + if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): + raise HTTPException(status_code=403, detail="Admin privileges required") + + table_name = payload.get("table") + if not table_name: + raise HTTPException(status_code=400, detail="Table name is required") + + try: + # Get the appropriate interface based on database name + interface = get_interface_for_database(database_name, currentUser) + + # Check if interface and database connection exist + if not interface or not interface.db: + raise HTTPException(status_code=500, detail="Database interface not available") + + # Check if table exists + tables = interface.db.getTables() + if table_name not in tables: + raise HTTPException(status_code=404, detail=f"Table '{table_name}' not found in database '{database_name}'") + + # Drop the table + with interface.db.connection.cursor() as cursor: + cursor.execute(f'DROP TABLE IF EXISTS "{table_name}" CASCADE') + interface.db.connection.commit() + + logger.warning(f"Admin drop_table executed by {currentUser.id}: dropped table '{table_name}' from database '{database_name}'") + return {"message": f"Table '{table_name}' dropped successfully from database '{database_name}'"} + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error dropping table {table_name} from database {database_name}: {e}") + if 'interface' in locals() and interface.db.connection: + interface.db.connection.rollback() + raise HTTPException(status_code=500, detail=f"Failed to drop table '{table_name}' from database '{database_name}'") diff --git a/modules/routes/routeDataConnections.py b/modules/routes/routeDataConnections.py index 01452206..30c5601f 100644 --- a/modules/routes/routeDataConnections.py +++ b/modules/routes/routeDataConnections.py @@ -65,8 +65,15 @@ def get_token_status_for_connection(interface, connection_id: str) -> tuple[str, return "none", None current_time = get_utc_timestamp() + + # Add 5 minute buffer for proactive refresh + buffer_time = 5 * 60 # 5 minutes in seconds if expires_at <= current_time: return "expired", expires_at + elif expires_at <= (current_time + buffer_time): + # Token expires soon - mark as active but log for proactive refresh + logger.debug(f"Token for connection {connection_id} expires soon (in {expires_at - current_time} seconds)") + return "active", expires_at else: return "active", expires_at @@ -89,6 +96,7 @@ async def get_connections( """Get all connections for the current user SECURITY: This endpoint is secure - users can only see their own connections. + Automatically refreshes expired OAuth tokens in the background. """ try: interface = getInterface(currentUser) @@ -97,6 +105,18 @@ async def get_connections( # This prevents admin from seeing other users' connections and causing confusion connections = interface.getUserConnections(currentUser.id) + # Perform silent token refresh for expired OAuth connections + try: + from modules.security.tokenRefreshService import token_refresh_service + refresh_result = await token_refresh_service.refresh_expired_tokens(currentUser.id) + if refresh_result.get("refreshed", 0) > 0: + logger.info(f"Silently refreshed {refresh_result['refreshed']} tokens for user {currentUser.id}") + # Re-fetch connections to get updated token status + connections = interface.getUserConnections(currentUser.id) + except Exception as e: + logger.warning(f"Silent token refresh failed for user {currentUser.id}: {str(e)}") + # Continue with original connections even if refresh fails + # Enhance each connection with token status information enhanced_connections = [] for connection in connections: diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py index f0feef25..3f11342b 100644 --- a/modules/routes/routeDataFiles.py +++ b/modules/routes/routeDataFiles.py @@ -17,8 +17,7 @@ from modules.security.auth import limiter, getCurrentUser import modules.interfaces.interfaceComponentObjects as interfaceComponentObjects from modules.interfaces.interfaceComponentModel import FileItem, FilePreview from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse, AttributeDefinition -from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes -from modules.features.featureNeutralizePlayground import NeutralizationService +from modules.interfaces.interfaceAppModel import User # Configure logger logger = logging.getLogger(__name__) @@ -365,253 +364,4 @@ async def preview_file( detail=f"Error previewing file: {str(e)}" ) -# Data Neutralization endpoints - -@router.get("/neutralization/config", response_model=DataNeutraliserConfig) -@limiter.limit("30/minute") -async def get_neutralization_config( - request: Request, - currentUser: User = Depends(getCurrentUser) -) -> DataNeutraliserConfig: - """Get data neutralization configuration""" - try: - service = NeutralizationService(currentUser) - config = service.get_config() - - if not config: - # Return default config instead of 404 - return DataNeutraliserConfig( - mandateId=currentUser.mandateId, - userId=currentUser.id, - enabled=True, - namesToParse="", - sharepointSourcePath="", - sharepointTargetPath="" - ) - - return config - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error getting neutralization config: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error getting neutralization config: {str(e)}" - ) - -@router.post("/neutralization/config", response_model=DataNeutraliserConfig) -@limiter.limit("10/minute") -async def save_neutralization_config( - request: Request, - config_data: Dict[str, Any] = Body(...), - currentUser: User = Depends(getCurrentUser) -) -> DataNeutraliserConfig: - """Save or update data neutralization configuration""" - try: - service = NeutralizationService(currentUser) - config = service.save_config(config_data) - - return config - - except Exception as e: - logger.error(f"Error saving neutralization config: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error saving neutralization config: {str(e)}" - ) - -@router.post("/neutralization/neutralize-text", response_model=Dict[str, Any]) -@limiter.limit("20/minute") -async def neutralize_text( - request: Request, - text_data: Dict[str, Any] = Body(...), - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, Any]: - """Neutralize text content""" - try: - text = text_data.get("text", "") - file_id = text_data.get("fileId") - - if not text: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Text content is required" - ) - - service = NeutralizationService(currentUser) - result = service.neutralize_text(text, file_id) - - return result - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error neutralizing text: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error neutralizing text: {str(e)}" - ) - -@router.post("/neutralization/resolve-text", response_model=Dict[str, str]) -@limiter.limit("20/minute") -async def resolve_text( - request: Request, - text_data: Dict[str, str] = Body(...), - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, str]: - """Resolve UIDs in neutralized text back to original text""" - try: - text = text_data.get("text", "") - - if not text: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Text content is required" - ) - - service = NeutralizationService(currentUser) - resolved_text = service.resolve_text(text) - - return {"resolved_text": resolved_text} - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error resolving text: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error resolving text: {str(e)}" - ) - -@router.get("/neutralization/attributes", response_model=List[DataNeutralizerAttributes]) -@limiter.limit("30/minute") -async def get_neutralization_attributes( - request: Request, - fileId: Optional[str] = Query(None, description="Filter by file ID"), - currentUser: User = Depends(getCurrentUser) -) -> List[DataNeutralizerAttributes]: - """Get neutralization attributes, optionally filtered by file ID""" - try: - service = NeutralizationService(currentUser) - attributes = service.get_attributes(fileId) - - return attributes - - except Exception as e: - logger.error(f"Error getting neutralization attributes: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error getting neutralization attributes: {str(e)}" - ) - -@router.post("/neutralization/process-sharepoint", response_model=Dict[str, Any]) -@limiter.limit("5/minute") -async def process_sharepoint_files( - request: Request, - paths_data: Dict[str, str] = Body(...), - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, Any]: - """Process files from SharePoint source path and store neutralized files in target path""" - try: - source_path = paths_data.get("sourcePath", "") - target_path = paths_data.get("targetPath", "") - - if not source_path or not target_path: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Both source and target paths are required" - ) - - service = NeutralizationService(currentUser) - result = await service.process_sharepoint_files(source_path, target_path) - - return result - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error processing SharePoint files: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error processing SharePoint files: {str(e)}" - ) - -@router.post("/neutralization/batch-process", response_model=Dict[str, Any]) -@limiter.limit("10/minute") -async def batch_process_files( - request: Request, - files_data: List[Dict[str, Any]] = Body(...), - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, Any]: - """Process multiple files for neutralization""" - try: - if not files_data: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Files data is required" - ) - - service = NeutralizationService(currentUser) - result = service.batch_neutralize_files(files_data) - - return result - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error batch processing files: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error batch processing files: {str(e)}" - ) - -@router.get("/neutralization/stats", response_model=Dict[str, Any]) -@limiter.limit("30/minute") -async def get_neutralization_stats( - request: Request, - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, Any]: - """Get neutralization processing statistics""" - try: - service = NeutralizationService(currentUser) - stats = service.get_processing_stats() - - return stats - - except Exception as e: - logger.error(f"Error getting neutralization stats: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error getting neutralization stats: {str(e)}" - ) - -@router.delete("/neutralization/attributes/{fileId}", response_model=Dict[str, str]) -@limiter.limit("10/minute") -async def cleanup_file_attributes( - request: Request, - fileId: str = Path(..., description="File ID to cleanup attributes for"), - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, str]: - """Clean up neutralization attributes for a specific file""" - try: - service = NeutralizationService(currentUser) - success = service.cleanup_file_attributes(fileId) - - if success: - return {"message": f"Successfully cleaned up attributes for file {fileId}"} - else: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to cleanup file attributes" - ) - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error cleaning up file attributes: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error cleaning up file attributes: {str(e)}" - ) diff --git a/modules/routes/routeDataNeutralization.py b/modules/routes/routeDataNeutralization.py new file mode 100644 index 00000000..697c6f1c --- /dev/null +++ b/modules/routes/routeDataNeutralization.py @@ -0,0 +1,274 @@ +from fastapi import APIRouter, HTTPException, Depends, Path, Request, status, Query, Body +from typing import List, Dict, Any, Optional +import logging + +# Import auth module +from modules.security.auth import limiter, getCurrentUser + +# Import interfaces +from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes +from modules.features.featureNeutralizePlayground import NeutralizationService + +# Configure logger +logger = logging.getLogger(__name__) + +# Create router for neutralization endpoints +router = APIRouter( + prefix="/api/neutralization", + tags=["Data Neutralisation"], + responses={ + 404: {"description": "Not found"}, + 400: {"description": "Bad request"}, + 401: {"description": "Unauthorized"}, + 403: {"description": "Forbidden"}, + 500: {"description": "Internal server error"} + } +) + +@router.get("/config", response_model=DataNeutraliserConfig) +@limiter.limit("30/minute") +async def get_neutralization_config( + request: Request, + currentUser: User = Depends(getCurrentUser) +) -> DataNeutraliserConfig: + """Get data neutralization configuration""" + try: + service = NeutralizationService(currentUser) + config = service.get_config() + + if not config: + # Return default config instead of 404 + return DataNeutraliserConfig( + mandateId=currentUser.mandateId, + userId=currentUser.id, + enabled=True, + namesToParse="", + sharepointSourcePath="", + sharepointTargetPath="" + ) + + return config + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting neutralization config: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error getting neutralization config: {str(e)}" + ) + +@router.post("/config", response_model=DataNeutraliserConfig) +@limiter.limit("10/minute") +async def save_neutralization_config( + request: Request, + config_data: Dict[str, Any] = Body(...), + currentUser: User = Depends(getCurrentUser) +) -> DataNeutraliserConfig: + """Save or update data neutralization configuration""" + try: + service = NeutralizationService(currentUser) + config = service.save_config(config_data) + + return config + + except Exception as e: + logger.error(f"Error saving neutralization config: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error saving neutralization config: {str(e)}" + ) + +@router.post("/neutralize-text", response_model=Dict[str, Any]) +@limiter.limit("20/minute") +async def neutralize_text( + request: Request, + text_data: Dict[str, Any] = Body(...), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Neutralize text content""" + try: + text = text_data.get("text", "") + file_id = text_data.get("fileId") + + if not text: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Text content is required" + ) + + service = NeutralizationService(currentUser) + result = service.neutralize_text(text, file_id) + + return result + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error neutralizing text: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error neutralizing text: {str(e)}" + ) + +@router.post("/resolve-text", response_model=Dict[str, str]) +@limiter.limit("20/minute") +async def resolve_text( + request: Request, + text_data: Dict[str, str] = Body(...), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, str]: + """Resolve UIDs in neutralized text back to original text""" + try: + text = text_data.get("text", "") + + if not text: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Text content is required" + ) + + service = NeutralizationService(currentUser) + resolved_text = service.resolve_text(text) + + return {"resolved_text": resolved_text} + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error resolving text: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error resolving text: {str(e)}" + ) + +@router.get("/attributes", response_model=List[DataNeutralizerAttributes]) +@limiter.limit("30/minute") +async def get_neutralization_attributes( + request: Request, + fileId: Optional[str] = Query(None, description="Filter by file ID"), + currentUser: User = Depends(getCurrentUser) +) -> List[DataNeutralizerAttributes]: + """Get neutralization attributes, optionally filtered by file ID""" + try: + service = NeutralizationService(currentUser) + attributes = service.get_attributes(fileId) + + return attributes + + except Exception as e: + logger.error(f"Error getting neutralization attributes: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error getting neutralization attributes: {str(e)}" + ) + +@router.post("/process-sharepoint", response_model=Dict[str, Any]) +@limiter.limit("5/minute") +async def process_sharepoint_files( + request: Request, + paths_data: Dict[str, str] = Body(...), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Process files from SharePoint source path and store neutralized files in target path""" + try: + source_path = paths_data.get("sourcePath", "") + target_path = paths_data.get("targetPath", "") + + if not source_path or not target_path: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Both source and target paths are required" + ) + + service = NeutralizationService(currentUser) + result = await service.process_sharepoint_files(source_path, target_path) + + return result + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error processing SharePoint files: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error processing SharePoint files: {str(e)}" + ) + +@router.post("/batch-process", response_model=Dict[str, Any]) +@limiter.limit("10/minute") +async def batch_process_files( + request: Request, + files_data: List[Dict[str, Any]] = Body(...), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Process multiple files for neutralization""" + try: + if not files_data: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Files data is required" + ) + + service = NeutralizationService(currentUser) + result = service.batch_neutralize_files(files_data) + + return result + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error batch processing files: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error batch processing files: {str(e)}" + ) + +@router.get("/stats", response_model=Dict[str, Any]) +@limiter.limit("30/minute") +async def get_neutralization_stats( + request: Request, + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Get neutralization processing statistics""" + try: + service = NeutralizationService(currentUser) + stats = service.get_processing_stats() + + return stats + + except Exception as e: + logger.error(f"Error getting neutralization stats: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error getting neutralization stats: {str(e)}" + ) + +@router.delete("/attributes/{fileId}", response_model=Dict[str, str]) +@limiter.limit("10/minute") +async def cleanup_file_attributes( + request: Request, + fileId: str = Path(..., description="File ID to cleanup attributes for"), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, str]: + """Clean up neutralization attributes for a specific file""" + try: + service = NeutralizationService(currentUser) + success = service.cleanup_file_attributes(fileId) + + if success: + return {"message": f"Successfully cleaned up attributes for file {fileId}"} + else: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to cleanup file attributes" + ) + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error cleaning up file attributes: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error cleaning up file attributes: {str(e)}" + ) diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py index 9d8be813..01fb41f1 100644 --- a/modules/routes/routeDataUsers.py +++ b/modules/routes/routeDataUsers.py @@ -132,6 +132,165 @@ async def update_user( return updatedUser +@router.post("/{userId}/reset-password") +@limiter.limit("5/minute") +async def reset_user_password( + request: Request, + userId: str = Path(..., description="ID of the user to reset password for"), + newPassword: str = Body(..., embed=True), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Reset user password (Admin only)""" + try: + # Check if current user is admin + if currentUser.privilege != UserPrivilege.ADMIN: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Only administrators can reset passwords" + ) + + # Get user interface + appInterface = getInterface(currentUser) + + # Get target user + target_user = appInterface.getUserById(userId) + if not target_user: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="User not found" + ) + + # Validate password strength + if len(newPassword) < 8: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Password must be at least 8 characters long" + ) + + # Reset password + success = appInterface.resetUserPassword(userId, newPassword) + if not success: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to reset password" + ) + + # SECURITY: Automatically revoke all tokens for the user after password reset + try: + from modules.interfaces.interfaceAppModel import AuthAuthority + revoked_count = appInterface.revokeTokensByUser( + userId=userId, + authority=None, # Revoke all authorities + mandateId=None, # Revoke across all mandates + revokedBy=currentUser.id, + reason="password_reset" + ) + logger.info(f"Revoked {revoked_count} tokens for user {userId} after password reset") + except Exception as e: + logger.error(f"Failed to revoke tokens after password reset for user {userId}: {str(e)}") + # Don't fail the password reset if token revocation fails + + # Log password reset + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_security_event( + user_id=str(currentUser.id), + mandate_id=str(currentUser.mandateId), + action="password_reset", + details=f"Reset password for user {userId}" + ) + except Exception: + pass + + return { + "message": "Password reset successfully", + "user_id": userId + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error resetting password: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Password reset failed: {str(e)}" + ) + +@router.post("/change-password") +@limiter.limit("5/minute") +async def change_password( + request: Request, + currentPassword: str = Body(..., embed=True), + newPassword: str = Body(..., embed=True), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Change current user's password""" + try: + # Get user interface + appInterface = getInterface(currentUser) + + # Verify current password + if not appInterface.verifyPassword(currentPassword, currentUser.passwordHash): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Current password is incorrect" + ) + + # Validate new password strength + if len(newPassword) < 8: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="New password must be at least 8 characters long" + ) + + # Change password + success = appInterface.resetUserPassword(str(currentUser.id), newPassword) + if not success: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Failed to change password" + ) + + # SECURITY: Automatically revoke all tokens for the user after password change + try: + from modules.interfaces.interfaceAppModel import AuthAuthority + revoked_count = appInterface.revokeTokensByUser( + userId=str(currentUser.id), + authority=None, # Revoke all authorities + mandateId=None, # Revoke across all mandates + revokedBy=currentUser.id, + reason="password_change" + ) + logger.info(f"Revoked {revoked_count} tokens for user {currentUser.id} after password change") + except Exception as e: + logger.error(f"Failed to revoke tokens after password change for user {currentUser.id}: {str(e)}") + # Don't fail the password change if token revocation fails + + # Log password change + try: + from modules.shared.auditLogger import audit_logger + audit_logger.log_security_event( + user_id=str(currentUser.id), + mandate_id=str(currentUser.mandateId), + action="password_change", + details="User changed their own password" + ) + except Exception: + pass + + return { + "message": "Password changed successfully. Please log in again with your new password." + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error changing password: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Password change failed: {str(e)}" + ) + @router.delete("/{userId}", response_model=Dict[str, Any]) @limiter.limit("10/minute") async def delete_user( diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index 15f998f9..7b396f77 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -13,7 +13,7 @@ from jose import jwt from pydantic import BaseModel # Import auth modules -from modules.security.auth import createAccessToken, getCurrentUser, limiter, SECRET_KEY, ALGORITHM +from modules.security.auth import createAccessToken, createAccessTokenWithCookie, setRefreshTokenCookie, getCurrentUser, limiter, SECRET_KEY, ALGORITHM from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface from modules.interfaces.interfaceAppModel import User, UserInDB, AuthAuthority, UserPrivilege, Token from modules.shared.attributeUtils import ModelMixin @@ -38,6 +38,7 @@ router = APIRouter( @limiter.limit("30/minute") async def login( request: Request, + response: Response, formData: OAuth2PasswordRequestForm = Depends(), ) -> Dict[str, Any]: """Get access token for local user authentication""" @@ -90,24 +91,25 @@ async def login( session_id = str(uuid.uuid4()) token_data["sid"] = session_id - # Create access token - access_token, expires_at = createAccessToken(token_data) - if not access_token: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to create access token" - ) + # Create access token with httpOnly cookie + access_token = createAccessTokenWithCookie(token_data, response) + + # Create refresh token with httpOnly cookie + refresh_token = setRefreshTokenCookie(token_data, response) + + # Get expiration time for response + try: + payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM]) + expires_at = datetime.fromtimestamp(payload.get("exp")) + except Exception as e: + logger.error(f"Failed to decode access token: {str(e)}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to finalize token") # Get user-specific interface for token operations userInterface = getInterface(user) - # Decode JWT to get jti for DB persistence - try: - payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM]) - jti = payload.get("jti") - except Exception as e: - logger.error(f"Failed to decode created JWT: {str(e)}") - raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to finalize token") + # Get jti from already decoded payload + jti = payload.get("jti") # Create token token = Token( @@ -137,12 +139,12 @@ async def login( # Don't fail if audit logging fails pass - # Create response data + # Create response data (tokens are now in httpOnly cookies) response_data = { "type": "local_auth_success", - "access_token": access_token, - "token_data": token.dict(), - "authenticationAuthority": "local" + "message": "Login successful - tokens set in httpOnly cookies", + "authenticationAuthority": "local", + "expires_at": expires_at.isoformat() } return response_data @@ -253,20 +255,85 @@ async def read_user_me( detail=f"Failed to get current user: {str(e)}" ) +@router.post("/refresh") +@limiter.limit("60/minute") +async def refresh_token( + request: Request, + response: Response, + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """Refresh access token using refresh token from cookie""" + try: + # Get refresh token from cookie + refresh_token = request.cookies.get('refresh_token') + if not refresh_token: + raise HTTPException(status_code=401, detail="No refresh token found") + + # Validate refresh token + try: + payload = jwt.decode(refresh_token, SECRET_KEY, algorithms=[ALGORITHM]) + if payload.get("type") != "refresh": + raise HTTPException(status_code=401, detail="Invalid refresh token type") + except jwt.ExpiredSignatureError: + raise HTTPException(status_code=401, detail="Refresh token expired") + except jwt.JWTError: + raise HTTPException(status_code=401, detail="Invalid refresh token") + + # Create new token data + token_data = { + "sub": currentUser.username, + "mandateId": str(currentUser.mandateId), + "userId": str(currentUser.id), + "authenticationAuthority": currentUser.authenticationAuthority + } + + # Create new access token with cookie + access_token = createAccessTokenWithCookie(token_data, response) + + # Get expiration time + try: + payload = jwt.decode(access_token, SECRET_KEY, algorithms=[ALGORITHM]) + expires_at = datetime.fromtimestamp(payload.get("exp")) + except Exception as e: + logger.error(f"Failed to decode new access token: {str(e)}") + raise HTTPException(status_code=500, detail="Failed to create new token") + + return { + "type": "token_refresh_success", + "message": "Token refreshed successfully", + "expires_at": expires_at.isoformat() + } + + except HTTPException as e: + # If it's a 503 error (service unavailable due to missing token table), return it as-is + if e.status_code == 503: + raise + # For other HTTP exceptions, re-raise them + raise + except Exception as e: + logger.error(f"Token refresh error: {str(e)}") + raise HTTPException(status_code=500, detail="Token refresh failed") + @router.post("/logout") @limiter.limit("30/minute") -async def logout(request: Request, currentUser: User = Depends(getCurrentUser)) -> JSONResponse: +async def logout(request: Request, response: Response, currentUser: User = Depends(getCurrentUser)) -> JSONResponse: """Logout from local authentication""" try: # Get user interface with current user context appInterface = getInterface(currentUser) - # Read bearer token from Authorization header to obtain session id / jti - auth_header = request.headers.get("Authorization") - if not auth_header or not auth_header.lower().startswith("bearer "): - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Missing Authorization header") - raw_token = auth_header.split(" ", 1)[1].strip() + + # Get token from cookie or Authorization header + token = request.cookies.get('auth_token') + if not token: + auth_header = request.headers.get("Authorization") + if auth_header and auth_header.lower().startswith("bearer "): + token = auth_header.split(" ", 1)[1].strip() + + if not token: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="No token found") + try: - payload = jwt.decode(raw_token, SECRET_KEY, algorithms=[ALGORITHM]) + payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) session_id = payload.get("sid") or payload.get("sessionId") jti = payload.get("jti") except Exception as e: @@ -293,8 +360,12 @@ async def logout(request: Request, currentUser: User = Depends(getCurrentUser)) # Don't fail if audit logging fails pass + # Clear httpOnly cookies + response.delete_cookie(key="auth_token", httponly=True, samesite="strict") + response.delete_cookie(key="refresh_token", httponly=True, samesite="strict") + return JSONResponse({ - "message": "Successfully logged out", + "message": "Successfully logged out - cookies cleared", "revokedTokens": revoked }) diff --git a/modules/security/auth.py b/modules/security/auth.py index 4ada086c..2ccaaa74 100644 --- a/modules/security/auth.py +++ b/modules/security/auth.py @@ -6,8 +6,8 @@ Handles JWT-based authentication, token generation, and user context. from datetime import datetime, timedelta, timezone import uuid from typing import Optional, Dict, Any, Tuple -from fastapi import Depends, HTTPException, status, Request -from fastapi.security import OAuth2PasswordBearer +from fastapi import Depends, HTTPException, status, Request, Response +from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials from jose import JWTError, jwt import logging from slowapi import Limiter @@ -24,8 +24,29 @@ ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM") ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY")) REFRESH_TOKEN_EXPIRE_DAYS = int(APP_CONFIG.get("APP_REFRESH_TOKEN_EXPIRY", "7")) -# OAuth2 Setup -oauth2Scheme = OAuth2PasswordBearer(tokenUrl="token") +# Cookie-based Authentication Setup +class CookieAuth(HTTPBearer): + """Cookie-based authentication that checks httpOnly cookies first, then Authorization header""" + def __init__(self, auto_error: bool = True): + super().__init__(auto_error=auto_error) + + async def __call__(self, request: Request) -> Optional[str]: + # 1. Check httpOnly cookie first (preferred method) + token = request.cookies.get('auth_token') + if token: + return token + + # 2. Fallback to Authorization header for API calls + authorization = request.headers.get("Authorization") + if authorization and authorization.startswith("Bearer "): + return authorization.split(" ")[1] + + if self.auto_error: + raise HTTPException(status_code=401, detail="Not authenticated") + return None + +# Initialize cookie-based auth +cookieAuth = CookieAuth(auto_error=False) # Rate Limiter limiter = Limiter(key_func=get_remote_address) @@ -59,7 +80,82 @@ def createAccessToken(data: dict, expiresDelta: Optional[timedelta] = None) -> T return encodedJwt, expire -def _getUserBase(token: str = Depends(oauth2Scheme)) -> User: +def createAccessTokenWithCookie(data: dict, response: Response, expiresDelta: Optional[timedelta] = None) -> str: + """ + Creates a JWT Access Token and sets it as an httpOnly cookie. + + Args: + data: Data to encode (usually user ID or username) + response: FastAPI Response object to set cookie + expiresDelta: Validity duration of the token (optional) + + Returns: + JWT Token as string + """ + access_token, expires_at = createAccessToken(data, expiresDelta) + + # Set httpOnly cookie + response.set_cookie( + key="auth_token", + value=access_token, + httponly=True, + secure=True, # HTTPS only in production + samesite="strict", + max_age=int(expiresDelta.total_seconds()) if expiresDelta else ACCESS_TOKEN_EXPIRE_MINUTES * 60 + ) + + return access_token + +def createRefreshToken(data: dict) -> Tuple[str, datetime]: + """ + Creates a JWT Refresh Token with longer expiration. + + Args: + data: Data to encode (usually user ID or username) + + Returns: + Tuple of (JWT Refresh Token as string, expiration datetime) + """ + toEncode = data.copy() + # Ensure a token id (jti) exists for revocation tracking + if "jti" not in toEncode or not toEncode.get("jti"): + toEncode["jti"] = str(uuid.uuid4()) + + # Add refresh token type + toEncode["type"] = "refresh" + + expire = get_utc_now() + timedelta(days=REFRESH_TOKEN_EXPIRE_DAYS) + toEncode.update({"exp": expire}) + encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM) + + return encodedJwt, expire + +def setRefreshTokenCookie(data: dict, response: Response) -> str: + """ + Creates a JWT Refresh Token and sets it as an httpOnly cookie. + + Args: + data: Data to encode (usually user ID or username) + response: FastAPI Response object to set cookie + + Returns: + JWT Refresh Token as string + """ + refresh_token, expires_at = createRefreshToken(data) + + # Set httpOnly cookie for refresh token + response.set_cookie( + key="refresh_token", + value=refresh_token, + httponly=True, + secure=True, # HTTPS only in production + samesite="strict", + max_age=REFRESH_TOKEN_EXPIRE_DAYS * 24 * 60 * 60 # Days to seconds + ) + + return refresh_token + +def _getUserBase(token: str = Depends(cookieAuth)) -> User: """ Extracts and validates the current user from the JWT token. @@ -138,7 +234,14 @@ def _getUserBase(token: str = Depends(oauth2Scheme)) -> User: db_tokens = appInterface.db.getRecordset( Token, recordFilter={"id": tokenId} ) - except Exception: + except Exception as e: + # Check if this is a table not found error (token table was deleted) + if "does not exist" in str(e).lower() or "relation" in str(e).lower(): + logger.error("Token table does not exist - database may have been reset") + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Authentication service temporarily unavailable. Please contact administrator." + ) db_tokens = [] if db_tokens: diff --git a/modules/security/csrf.py b/modules/security/csrf.py new file mode 100644 index 00000000..030ab665 --- /dev/null +++ b/modules/security/csrf.py @@ -0,0 +1,96 @@ +""" +CSRF Protection Middleware for PowerOn Gateway + +This module provides CSRF protection for state-changing operations. +""" + +import logging +from fastapi import Request, HTTPException, status +from starlette.middleware.base import BaseHTTPMiddleware +from typing import Set + +logger = logging.getLogger(__name__) + +class CSRFMiddleware(BaseHTTPMiddleware): + """ + CSRF protection middleware that validates CSRF tokens for state-changing operations. + """ + + def __init__(self, app, exempt_paths: Set[str] = None): + super().__init__(app) + # Paths that are exempt from CSRF protection + self.exempt_paths = exempt_paths or { + "/api/local/login", + "/api/local/register", + "/api/msft/login", + "/api/google/login", + "/api/msft/callback", + "/api/google/callback" + } + + # State-changing HTTP methods that require CSRF protection + self.protected_methods = {"POST", "PUT", "DELETE", "PATCH"} + + async def dispatch(self, request: Request, call_next): + """ + Check CSRF token for state-changing operations. + """ + # Skip CSRF check for exempt paths + if request.url.path in self.exempt_paths: + return await call_next(request) + + # Skip CSRF check for non-state-changing methods + if request.method not in self.protected_methods: + return await call_next(request) + + # Skip CSRF check for OPTIONS requests (CORS preflight) + if request.method == "OPTIONS": + return await call_next(request) + + # Get CSRF token from header + csrf_token = request.headers.get("X-CSRF-Token") + if not csrf_token: + logger.warning(f"CSRF token missing for {request.method} {request.url.path}") + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="CSRF token missing" + ) + + # Validate CSRF token format (basic validation) + if not self._is_valid_csrf_token(csrf_token): + logger.warning(f"Invalid CSRF token format for {request.method} {request.url.path}") + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Invalid CSRF token format" + ) + + # Additional CSRF validation could be added here: + # - Check token against session + # - Validate token expiration + # - Verify token origin + + return await call_next(request) + + def _is_valid_csrf_token(self, token: str) -> bool: + """ + Basic validation of CSRF token format. + + Args: + token: The CSRF token to validate + + Returns: + bool: True if token format is valid + """ + if not token or not isinstance(token, str): + return False + + # Basic format validation (hex string, reasonable length) + if len(token) < 16 or len(token) > 64: + return False + + # Check if token contains only valid hex characters + try: + int(token, 16) + return True + except ValueError: + return False diff --git a/modules/security/tokenRefreshMiddleware.py b/modules/security/tokenRefreshMiddleware.py new file mode 100644 index 00000000..89f21858 --- /dev/null +++ b/modules/security/tokenRefreshMiddleware.py @@ -0,0 +1,191 @@ +""" +Token Refresh Middleware for PowerOn Gateway + +This middleware automatically refreshes expired OAuth tokens +when API endpoints are accessed, providing seamless user experience. +""" + +import logging +from fastapi import Request, Response +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.responses import Response as StarletteResponse +from typing import Callable +import asyncio +from modules.security.tokenRefreshService import token_refresh_service + +logger = logging.getLogger(__name__) + +class TokenRefreshMiddleware(BaseHTTPMiddleware): + """ + Middleware that automatically refreshes expired OAuth tokens + when API endpoints are accessed. + """ + + def __init__(self, app, enabled: bool = True): + super().__init__(app) + self.enabled = enabled + self.refresh_endpoints = { + '/api/connections', + '/api/files', + '/api/chat', + '/api/msft', + '/api/google' + } + + async def dispatch(self, request: Request, call_next: Callable) -> Response: + """ + Process request and refresh tokens if needed + """ + if not self.enabled: + return await call_next(request) + + # Check if this is an endpoint that might need token refresh + if not self._should_check_tokens(request): + return await call_next(request) + + # Extract user ID from request (if available) + user_id = self._extract_user_id(request) + if not user_id: + return await call_next(request) + + try: + # Perform silent token refresh in background + # Don't wait for completion to avoid slowing down the request + asyncio.create_task(self._silent_refresh_tokens(user_id)) + + except Exception as e: + logger.warning(f"Error scheduling token refresh: {str(e)}") + # Continue with request even if refresh scheduling fails + + # Process the original request + response = await call_next(request) + return response + + def _should_check_tokens(self, request: Request) -> bool: + """ + Check if this request should trigger token refresh + """ + path = request.url.path + + # Only check specific API endpoints + for endpoint in self.refresh_endpoints: + if path.startswith(endpoint): + return True + + return False + + def _extract_user_id(self, request: Request) -> str: + """ + Extract user ID from request context + """ + try: + # Try to get user from request state (set by auth middleware) + if hasattr(request.state, 'user_id'): + return request.state.user_id + + # Try to get from JWT token in cookies or headers + # This is a fallback if user state is not available + return None + + except Exception as e: + logger.debug(f"Could not extract user ID: {str(e)}") + return None + + async def _silent_refresh_tokens(self, user_id: str) -> None: + """ + Perform silent token refresh for the user + """ + try: + logger.debug(f"Starting silent token refresh for user {user_id}") + + # Refresh expired tokens + result = await token_refresh_service.refresh_expired_tokens(user_id) + + if result.get("refreshed", 0) > 0: + logger.info(f"Silently refreshed {result['refreshed']} tokens for user {user_id}") + + except Exception as e: + logger.error(f"Error in silent token refresh for user {user_id}: {str(e)}") + +class ProactiveTokenRefreshMiddleware(BaseHTTPMiddleware): + """ + Middleware that proactively refreshes tokens before they expire + """ + + def __init__(self, app, enabled: bool = True, check_interval_minutes: int = 5): + super().__init__(app) + self.enabled = enabled + self.check_interval_minutes = check_interval_minutes + self.last_check = {} + + async def dispatch(self, request: Request, call_next: Callable) -> Response: + """ + Process request and check for proactive refresh needs + """ + if not self.enabled: + return await call_next(request) + + # Extract user ID from request + user_id = self._extract_user_id(request) + if not user_id: + return await call_next(request) + + # Check if we need to do proactive refresh + if self._should_check_proactive_refresh(user_id): + try: + # Perform proactive refresh in background + asyncio.create_task(self._proactive_refresh_tokens(user_id)) + self.last_check[user_id] = get_utc_timestamp() + + except Exception as e: + logger.warning(f"Error scheduling proactive refresh: {str(e)}") + + # Process the original request + response = await call_next(request) + return response + + def _extract_user_id(self, request: Request) -> str: + """ + Extract user ID from request context + """ + try: + if hasattr(request.state, 'user_id'): + return request.state.user_id + return None + except Exception: + return None + + def _should_check_proactive_refresh(self, user_id: str) -> bool: + """ + Check if we should perform proactive refresh for this user + """ + try: + from modules.shared.timezoneUtils import get_utc_timestamp + current_time = get_utc_timestamp() + last_check = self.last_check.get(user_id, 0) + + # Check every 5 minutes + return (current_time - last_check) > (self.check_interval_minutes * 60) + + except Exception: + return False + + async def _proactive_refresh_tokens(self, user_id: str) -> None: + """ + Perform proactive token refresh for the user + """ + try: + logger.debug(f"Starting proactive token refresh for user {user_id}") + + result = await token_refresh_service.proactive_refresh(user_id) + + if result.get("refreshed", 0) > 0: + logger.info(f"Proactively refreshed {result['refreshed']} tokens for user {user_id}") + + except Exception as e: + logger.error(f"Error in proactive token refresh for user {user_id}: {str(e)}") + +def get_utc_timestamp(): + """Get current UTC timestamp""" + from modules.shared.timezoneUtils import get_utc_timestamp as _get_utc_timestamp + return _get_utc_timestamp() diff --git a/modules/security/tokenRefreshService.py b/modules/security/tokenRefreshService.py new file mode 100644 index 00000000..649960bc --- /dev/null +++ b/modules/security/tokenRefreshService.py @@ -0,0 +1,291 @@ +""" +Token Refresh Service for PowerOn Gateway + +This service handles automatic token refresh for OAuth connections +when they are accessed via API calls. It runs silently in the background +to ensure users don't experience token expiration issues. +""" + +import logging +from typing import Optional, Dict, Any, List +from datetime import datetime, timedelta +from modules.interfaces.interfaceAppObjects import getInterface +from modules.interfaces.interfaceAppModel import User, UserConnection, AuthAuthority, Token +from modules.shared.timezoneUtils import get_utc_timestamp +from modules.shared.auditLogger import audit_logger + +logger = logging.getLogger(__name__) + +class TokenRefreshService: + """Service for automatic token refresh operations""" + + def __init__(self): + self.rate_limit_map = {} # Track refresh attempts per connection + self.max_attempts_per_hour = 3 + self.refresh_window_minutes = 60 + + def _is_rate_limited(self, connection_id: str) -> bool: + """Check if connection is rate limited for refresh attempts""" + now = get_utc_timestamp() + if connection_id not in self.rate_limit_map: + return False + + # Remove attempts older than 1 hour + recent_attempts = [ + attempt_time for attempt_time in self.rate_limit_map[connection_id] + if now - attempt_time < (self.refresh_window_minutes * 60) + ] + self.rate_limit_map[connection_id] = recent_attempts + + return len(recent_attempts) >= self.max_attempts_per_hour + + def _record_refresh_attempt(self, connection_id: str) -> None: + """Record a refresh attempt for rate limiting""" + now = get_utc_timestamp() + if connection_id not in self.rate_limit_map: + self.rate_limit_map[connection_id] = [] + self.rate_limit_map[connection_id].append(now) + + async def _refresh_google_token(self, interface, connection: UserConnection) -> bool: + """Refresh Google OAuth token""" + try: + logger.debug(f"Refreshing Google token for connection {connection.id}") + + # Get current token + current_token = interface.getConnectionToken(connection.id, auto_refresh=False) + if not current_token: + logger.warning(f"No Google token found for connection {connection.id}") + return False + + # Import Google token refresh logic + from modules.security.tokenManager import TokenManager + token_manager = TokenManager() + + # Attempt to refresh the token + refreshed_token = token_manager.refresh_token(current_token) + if refreshed_token: + # Save the refreshed token + interface.saveConnectionToken(refreshed_token) + + # Update connection status + interface.db.recordModify(UserConnection, connection.id, { + "lastChecked": get_utc_timestamp(), + "expiresAt": refreshed_token.expiresAt + }) + + logger.info(f"Successfully refreshed Google token for connection {connection.id}") + + # Log audit event + try: + audit_logger.log_security_event( + user_id=str(connection.userId), + mandate_id="system", + action="token_refresh", + details=f"Google token refreshed for connection {connection.id}" + ) + except Exception: + pass + + return True + else: + logger.warning(f"Failed to refresh Google token for connection {connection.id}") + return False + + except Exception as e: + logger.error(f"Error refreshing Google token for connection {connection.id}: {str(e)}") + return False + + async def _refresh_microsoft_token(self, interface, connection: UserConnection) -> bool: + """Refresh Microsoft OAuth token""" + try: + logger.debug(f"Refreshing Microsoft token for connection {connection.id}") + + # Get current token + current_token = interface.getConnectionToken(connection.id, auto_refresh=False) + if not current_token: + logger.warning(f"No Microsoft token found for connection {connection.id}") + return False + + # Import Microsoft token refresh logic + from modules.security.tokenManager import TokenManager + token_manager = TokenManager() + + # Attempt to refresh the token + refreshed_token = token_manager.refresh_token(current_token) + if refreshed_token: + # Save the refreshed token + interface.saveConnectionToken(refreshed_token) + + # Update connection status + interface.db.recordModify(UserConnection, connection.id, { + "lastChecked": get_utc_timestamp(), + "expiresAt": refreshed_token.expiresAt + }) + + logger.info(f"Successfully refreshed Microsoft token for connection {connection.id}") + + # Log audit event + try: + audit_logger.log_security_event( + user_id=str(connection.userId), + mandate_id="system", + action="token_refresh", + details=f"Microsoft token refreshed for connection {connection.id}" + ) + except Exception: + pass + + return True + else: + logger.warning(f"Failed to refresh Microsoft token for connection {connection.id}") + return False + + except Exception as e: + logger.error(f"Error refreshing Microsoft token for connection {connection.id}: {str(e)}") + return False + + async def refresh_expired_tokens(self, user_id: str) -> Dict[str, Any]: + """ + Refresh expired OAuth tokens for a user + + Args: + user_id: User ID to refresh tokens for + + Returns: + Dict with refresh results + """ + try: + logger.debug(f"Starting silent token refresh for user {user_id}") + + # Get user interface + from modules.interfaces.interfaceAppObjects import getRootInterface + root_interface = getRootInterface() + + # Get user connections + connections = root_interface.getUserConnections(user_id) + if not connections: + logger.debug(f"No connections found for user {user_id}") + return {"refreshed": 0, "failed": 0, "rate_limited": 0} + + refreshed_count = 0 + failed_count = 0 + rate_limited_count = 0 + + # Process each connection + for connection in connections: + # Only refresh expired OAuth connections + if (connection.tokenStatus == 'expired' and + connection.authority in [AuthAuthority.GOOGLE, AuthAuthority.MSFT]): + + # Check rate limiting + if self._is_rate_limited(connection.id): + logger.warning(f"Rate limited for connection {connection.id}") + rate_limited_count += 1 + continue + + # Record attempt + self._record_refresh_attempt(connection.id) + + # Refresh based on authority + success = False + if connection.authority == AuthAuthority.GOOGLE: + success = await self._refresh_google_token(root_interface, connection) + elif connection.authority == AuthAuthority.MSFT: + success = await self._refresh_microsoft_token(root_interface, connection) + + if success: + refreshed_count += 1 + else: + failed_count += 1 + + result = { + "refreshed": refreshed_count, + "failed": failed_count, + "rate_limited": rate_limited_count + } + + logger.info(f"Silent token refresh completed for user {user_id}: {result}") + return result + + except Exception as e: + logger.error(f"Error during silent token refresh for user {user_id}: {str(e)}") + return {"refreshed": 0, "failed": 0, "rate_limited": 0, "error": str(e)} + + async def proactive_refresh(self, user_id: str) -> Dict[str, Any]: + """ + Proactively refresh tokens that expire within 5 minutes + + Args: + user_id: User ID to check tokens for + + Returns: + Dict with refresh results + """ + try: + logger.debug(f"Starting proactive token refresh for user {user_id}") + + # Get user interface + from modules.interfaces.interfaceAppObjects import getRootInterface + root_interface = getRootInterface() + + # Get user connections + connections = root_interface.getUserConnections(user_id) + if not connections: + return {"refreshed": 0, "failed": 0, "rate_limited": 0} + + refreshed_count = 0 + failed_count = 0 + rate_limited_count = 0 + current_time = get_utc_timestamp() + five_minutes = 5 * 60 # 5 minutes in seconds + + # Process each connection + for connection in connections: + # Only refresh active tokens that expire soon + if (connection.tokenStatus == 'active' and + connection.tokenExpiresAt and + connection.authority in [AuthAuthority.GOOGLE, AuthAuthority.MSFT]): + + # Check if token expires within 5 minutes + time_until_expiry = connection.tokenExpiresAt - current_time + if 0 < time_until_expiry <= five_minutes: + + # Check rate limiting + if self._is_rate_limited(connection.id): + logger.warning(f"Rate limited for proactive refresh of connection {connection.id}") + rate_limited_count += 1 + continue + + # Record attempt + self._record_refresh_attempt(connection.id) + + # Refresh based on authority + success = False + if connection.authority == AuthAuthority.GOOGLE: + success = await self._refresh_google_token(root_interface, connection) + elif connection.authority == AuthAuthority.MSFT: + success = await self._refresh_microsoft_token(root_interface, connection) + + if success: + refreshed_count += 1 + logger.info(f"Proactively refreshed {connection.authority} token for connection {connection.id}") + else: + failed_count += 1 + + result = { + "refreshed": refreshed_count, + "failed": failed_count, + "rate_limited": rate_limited_count + } + + if refreshed_count > 0: + logger.info(f"Proactive token refresh completed for user {user_id}: {result}") + + return result + + except Exception as e: + logger.error(f"Error during proactive token refresh for user {user_id}: {str(e)}") + return {"refreshed": 0, "failed": 0, "rate_limited": 0, "error": str(e)} + +# Global service instance +token_refresh_service = TokenRefreshService() From 9ba45952e451ae0fc11eece352aa88ad6e24536d Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Mon, 22 Sep 2025 15:44:30 +0200 Subject: [PATCH 003/169] closed workflow action plan --- modules/chat/handling/promptFactory.py | 54 +++---- modules/connectors/connectorWebTavily.py | 81 ++++++++++- modules/interfaces/interfaceWebModel.py | 28 +++- modules/methods/methodAi.py | 39 ++++- modules/methods/methodDocument.py | 90 ++++++------ modules/methods/methodOutlook.py | 4 + modules/methods/methodSharepoint.py | 11 +- modules/methods/methodWeb.py | 176 ++++++++++++++++++----- modules/security/auth.py | 13 ++ modules/security/tokenManager.py | 17 +++ 10 files changed, 390 insertions(+), 123 deletions(-) diff --git a/modules/chat/handling/promptFactory.py b/modules/chat/handling/promptFactory.py index 2890e7bf..c15979e2 100644 --- a/modules/chat/handling/promptFactory.py +++ b/modules/chat/handling/promptFactory.py @@ -158,7 +158,7 @@ def createTaskPlanningPrompt(context: TaskContext, service) -> str: # Get previous workflow round context for better understanding of follow-up prompts previous_round_context = _getPreviousRoundContext(service, context.workflow) - return f"""You are a task planning AI that analyzes user requests and creates structured task plans with user-friendly feedback messages. + return f"""You are a task planning AI that analyzes user requests and creates structured, self-contained task plans with user-friendly feedback messages. USER REQUEST: {user_request} @@ -173,8 +173,8 @@ INSTRUCTIONS: use the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what the user wants to retry or continue 3. Group related topics and sequential steps into single, comprehensive tasks 4. Focus on business outcomes, not technical operations -5. Each task should produce meaningful, usable outputs -6. Ensure proper handover between tasks using result labels +5. Make each task self-contained: clearly state what to do and what outputs are expected +6. Ensure proper handover between tasks (later actions will use your task outputs) 7. Detect the language of the user request and include it in languageUserDetected 8. Generate user-friendly messages for each task in the user's request language 9. Return a JSON object with the exact structure shown below @@ -201,12 +201,14 @@ SPLIT INTO MULTIPLE TASKS: TASK PLANNING PRINCIPLES: - Break down complex requests into logical, sequential steps - Focus on business value and outcomes -- Keep tasks at a meaningful level of abstraction +- Keep tasks at a meaningful level of abstraction (not implementation details) - Each task should produce results that can be used by subsequent tasks - Ensure clear dependencies and handovers between tasks - Provide clear, actionable user messages in the user's request language - Group related activities to minimize task fragmentation - Only create multiple tasks when dealing with truly different, independent objectives +- Make task objectives action-oriented and specific (include scope, data sources to consider, and output intent at high level) +- Write success_criteria as measurable acceptance criteria focusing on outputs (what artifacts or insights will exist and how they are validated) FOLLOW-UP PROMPT HANDLING: - If the user request is a follow-up (e.g., "try again", "versuche es nochmals", "retry", "continue", "proceed"), @@ -246,6 +248,12 @@ EXAMPLES OF GOOD TASK OBJECTIVES (COMBINING RELATED ACTIVITIES): - "Execute business communication using specified channels and document outcomes" - "Develop comprehensive business strategy with implementation roadmap and success metrics" +EXAMPLES OF WELL-FORMED SUCCESS CRITERIA (OUTPUT-FOCUSED): +- "Deliver a prioritized list of 10–20 candidates with justification" +- "Provide a structured JSON with fields: company, ticker, rationale, metrics" +- "Produce a presentation outline with 5 sections and bullet points per section" +- "Include data sources and date stamped references for traceability" + EXAMPLES OF GOOD SUCCESS CRITERIA: - "Key insights extracted and ready for business use" - "Professional communication created with clear business value" @@ -417,7 +425,7 @@ USAGE GUIDE: CRITICAL DOCUMENT REFERENCE RULES: - ONLY use the exact labels listed in AVAILABLE DOCUMENTS below, or result labels from previous actions - When generating multiple actions, you may only use as input documents those that are already present in AVAILABLE DOCUMENTS or produced by actions that come earlier in the list. Do NOT use as input any document label that will be produced by a later action. -- If AVAILABLE DOCUMENTS shows "NO DOCUMENTS AVAILABLE", you CANNOT create document extraction actions. Instead, create actions that generate new content or inform the user that documents are needed, if you miss something. +- If there are no documents available, you CANNOT create document extraction actions. Instead, prefer using web actions (web.search, web.scrape, web.crawl) when external information can satisfy the request; only generate a status/information report if the task truly requires user-provided documents. CURRENT WORKFLOW CONTEXT: - Current Round: {current_round} @@ -470,18 +478,30 @@ PREVIOUS TASK HANDOVER CONTEXT: ACTION GENERATION PRINCIPLES: - Create meaningful actions per task step -- Use comprehensive AI prompts for document processing - Focus on business outcomes, not technical operations - Combine related operations into single actions when possible -- Use the task's AI prompt if provided, or create a comprehensive one -- Each action should produce meaningful, usable outputs +- Select the method that best fulfills the objective based on context (do not default to any specific method). +- Each action must be self-contained and executable with the provided parameters - For document extraction, ensure prompts are specific and detailed -- Include validation steps in extraction prompts +- Include validation steps in extraction prompts where relevant - If this is a retry, learn from previous failures and improve the approach - Address specific issues mentioned in previous review feedback - When specifying expectedDocumentFormats, ensure AI prompts explicitly request pure data without markdown formatting - Generate user-friendly messages for each action in the user's language ({user_language}) +PARAMETER COMPLETENESS REQUIREMENTS: +- Every parameter must contain all information needed to execute without implicit context +- Use explicit, concrete values (units, languages, formats, limits, date ranges, IDs) when applicable +- For search-like parameters (if any method requires a query), derive the query from the task objective AND ALL success criteria dimensions. Include: + - Key entities and domain terms from the objective + - All distinct facets from success_criteria (e.g., valuation AND AI potential AND know-how needs) + - Geography/localization (e.g., Schweiz/Suisse/Switzerland; use multilingual synonyms when helpful) + - Time horizon or recency if relevant + - Boolean operators and synonyms to increase precision (use AND/OR, quotes, parentheses) + - Avoid single-topic or generic queries focused only on one facet (e.g., pure valuation metrics) + - When facets are truly distinct, create 1–3 focused actions with precise queries rather than one vague catch-all +- Document list parameters must reference only existing labels or prior action outputs; do not reference future outputs + USER LANGUAGE: {user_language} - All user messages must be generated in this language. DOCUMENT ROUTING GUIDANCE: @@ -494,7 +514,7 @@ DOCUMENT ROUTING GUIDANCE: INSTRUCTIONS: - Generate actions to accomplish this task step using available documents, connections, and previous results - Use docItem for single documents and docList for groups of documents as shown in AVAILABLE DOCUMENTS -- If AVAILABLE DOCUMENTS shows "NO DOCUMENTS AVAILABLE", you cannot create document extraction actions. Instead, create actions that generate new content or inform the user that documents are needed. +- If there are no documents available, do not create document extraction actions. Select methods strictly based on the task objective; choose web actions when external information is required. Otherwise, generate a status/information report requesting needed inputs. - Always pass documentList as a LIST of references (docItem and/or docList) - this list CANNOT be empty for document extraction actions - For referencing documents from previous actions, use the format "round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}" - For resultLabel, use the format: "round{current_round}_task{{task_id}}_action{{action_number}}_{{short_label}}" where: @@ -639,23 +659,9 @@ EXAMPLES OF GOOD ACTIONS: ] }} -6. When no documents are available (NO DOCUMENTS AVAILABLE scenario): -{{ - "method": "document", - "action": "generateReport", - "parameters": {{ - "documentList": [], - "title": "Workflow Status Report" - }}, - "resultLabel": "round{current_round}_task{current_task}_action1_status_report", - "description": "Generate a status report informing the user that no documents are available for processing and requesting document upload or alternative input.", - "userMessage": "Ich erstelle einen Statusbericht, der Sie darüber informiert, dass keine Dokumente zur Verarbeitung verfügbar sind und um Dokumente oder alternative Eingaben bittet." -}} - IMPORTANT NOTES: - Respond with ONLY the JSON object. Do not include any explanatory text. - Before creating any document extraction action, verify that AVAILABLE DOCUMENTS contains actual document references. -- If AVAILABLE DOCUMENTS shows "NO DOCUMENTS AVAILABLE", use example 6 above to create a status report action instead of document extraction. - Always include a user-friendly userMessage for each action in the user's language ({user_language}). - The examples above show German user messages as reference - adapt the language to match the USER LANGUAGE specified above.""" diff --git a/modules/connectors/connectorWebTavily.py b/modules/connectors/connectorWebTavily.py index 97410493..59eb1396 100644 --- a/modules/connectors/connectorWebTavily.py +++ b/modules/connectors/connectorWebTavily.py @@ -81,7 +81,18 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): """ # Step 1: Search try: - search_results = await self._search(request.query, request.max_results) + search_results = await self._search( + query=request.query, + max_results=request.max_results, + search_depth=request.search_depth, + time_range=request.time_range, + topic=request.topic, + include_domains=request.include_domains, + exclude_domains=request.exclude_domains, + language=request.language, + include_answer=request.include_answer, + include_raw_content=request.include_raw_content, + ) except Exception as e: return WebSearchActionResult(success=False, error=str(e)) @@ -113,14 +124,29 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): """Turns a query in a list of urls with extracted content.""" # Step 1: Search try: - search_results = await self._search(request.query, request.max_results) + search_results = await self._search( + query=request.query, + max_results=request.max_results, + search_depth=request.search_depth, + time_range=request.time_range, + topic=request.topic, + include_domains=request.include_domains, + exclude_domains=request.exclude_domains, + language=request.language, + include_answer=request.include_answer, + include_raw_content=request.include_raw_content, + ) except Exception as e: return WebScrapeActionResult(success=False, error=str(e)) # Step 2: Crawl try: urls = [result.url for result in search_results] - crawl_results = await self._crawl(urls) + crawl_results = await self._crawl( + urls, + extract_depth=request.extract_depth, + format=request.format, + ) except Exception as e: return WebScrapeActionResult(success=False, error=str(e)) @@ -132,7 +158,19 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): return result - async def _search(self, query: str, max_results: int) -> list[TavilySearchResult]: + async def _search( + self, + query: str, + max_results: int, + search_depth: str | None = None, + time_range: str | None = None, + topic: str | None = None, + include_domains: list[str] | None = None, + exclude_domains: list[str] | None = None, + language: str | None = None, + include_answer: bool | None = None, + include_raw_content: bool | None = None, + ) -> list[TavilySearchResult]: """Calls the Tavily API to perform a web search.""" # Make sure max_results is within the allowed range min_results = get_web_search_min_results() @@ -141,7 +179,26 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): raise ValueError(f"max_results must be between {min_results} and {max_allowed_results}") # Perform actual API call - response = await self.client.search(query=query, max_results=max_results) + # Build kwargs only for provided options to avoid API rejections + kwargs: dict = {"query": query, "max_results": max_results} + if search_depth is not None: + kwargs["search_depth"] = search_depth + if time_range is not None: + kwargs["time_range"] = time_range + if topic is not None: + kwargs["topic"] = topic + if include_domains is not None: + kwargs["include_domains"] = include_domains + if exclude_domains is not None: + kwargs["exclude_domains"] = exclude_domains + if language is not None: + kwargs["language"] = language + if include_answer is not None: + kwargs["include_answer"] = include_answer + if include_raw_content is not None: + kwargs["include_raw_content"] = include_raw_content + + response = await self.client.search(**kwargs) return [ TavilySearchResult(title=result["title"], url=result["url"]) @@ -174,7 +231,12 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): success=True, documents=[document], resultLabel="web_search_results" ) - async def _crawl(self, urls: list) -> list[TavilyCrawlResult]: + async def _crawl( + self, + urls: list, + extract_depth: str | None = None, + format: str | None = None, + ) -> list[TavilyCrawlResult]: """Calls the Tavily API to extract text content from URLs with retry logic.""" import asyncio @@ -185,8 +247,13 @@ class ConnectorTavily(WebSearchBase, WebCrawlBase, WebScrapeBase): for attempt in range(max_retries + 1): try: # Use asyncio.wait_for for timeout + # Build kwargs for extract + kwargs_extract: dict = {"urls": urls} + kwargs_extract["extract_depth"] = extract_depth or "advanced" + kwargs_extract["format"] = format or "text" + response = await asyncio.wait_for( - self.client.extract(urls=urls, extract_depth="advanced", format="text"), + self.client.extract(**kwargs_extract), timeout=timeout ) diff --git a/modules/interfaces/interfaceWebModel.py b/modules/interfaces/interfaceWebModel.py index 26a16560..4f030e4e 100644 --- a/modules/interfaces/interfaceWebModel.py +++ b/modules/interfaces/interfaceWebModel.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from modules.interfaces.interfaceChatModel import ActionDocument, ActionResult from pydantic import BaseModel, Field, HttpUrl -from typing import List +from typing import List, Optional, Literal from modules.shared.configuration import APP_CONFIG @@ -31,6 +31,17 @@ def get_web_search_min_results() -> int: class WebSearchRequest(BaseModel): query: str = Field(min_length=1, max_length=get_web_search_max_query_length()) max_results: int = Field(ge=get_web_search_min_results(), le=get_web_search_max_results()) + # Tavily tuning options + search_depth: Optional[Literal["basic", "advanced"]] = Field(default=None) + time_range: Optional[Literal["d", "w", "m", "y"]] = Field( + default=None, description="Limit results to last day/week/month/year" + ) + topic: Optional[Literal["general", "news", "academic"]] = Field(default=None) + include_domains: Optional[List[str]] = Field(default=None) + exclude_domains: Optional[List[str]] = Field(default=None) + language: Optional[str] = Field(default=None, description="ISO language code like 'en', 'de'") + include_answer: Optional[bool] = Field(default=None) + include_raw_content: Optional[bool] = Field(default=None) class WebSearchResultItem(BaseModel): @@ -68,6 +79,9 @@ class WebSearchBase(ABC): class WebCrawlRequest(BaseModel): urls: List[HttpUrl] + # Tavily extract options + extract_depth: Optional[Literal["basic", "advanced"]] = Field(default=None) + format: Optional[Literal["text", "markdown"]] = Field(default=None) class WebCrawlResultItem(BaseModel): @@ -108,6 +122,18 @@ class WebCrawlBase(ABC): class WebScrapeRequest(BaseModel): query: str = Field(min_length=1, max_length=get_web_search_max_query_length()) max_results: int = Field(ge=get_web_search_min_results(), le=get_web_search_max_results()) + # Pass-through search options + search_depth: Optional[Literal["basic", "advanced"]] = Field(default=None) + time_range: Optional[Literal["d", "w", "m", "y"]] = Field(default=None) + topic: Optional[Literal["general", "news", "academic"]] = Field(default=None) + include_domains: Optional[List[str]] = Field(default=None) + exclude_domains: Optional[List[str]] = Field(default=None) + language: Optional[str] = Field(default=None) + include_answer: Optional[bool] = Field(default=None) + include_raw_content: Optional[bool] = Field(default=None) + # Extract options + extract_depth: Optional[Literal["basic", "advanced"]] = Field(default=None) + format: Optional[Literal["text", "markdown"]] = Field(default=None) class WebScrapeResultItem(BaseModel): diff --git a/modules/methods/methodAi.py b/modules/methods/methodAi.py index eda36f69..f947db83 100644 --- a/modules/methods/methodAi.py +++ b/modules/methods/methodAi.py @@ -41,6 +41,8 @@ class MethodAi(MethodBase): try: aiPrompt = parameters.get("aiPrompt") documentList = parameters.get("documentList", []) + if isinstance(documentList, str): + documentList = [documentList] expectedDocumentFormats = parameters.get("expectedDocumentFormats", []) processingMode = parameters.get("processingMode", "basic") includeMetadata = parameters.get("includeMetadata", True) @@ -171,10 +173,43 @@ class MethodAi(MethodBase): if context: logger.info(f"Including context from {len(documentList)} documents") + # Encourage longer, structured outputs with a min-length hint + min_tokens_hint = "\n\nPlease ensure the response is substantial and complete." + call_prompt = enhanced_prompt + min_tokens_hint + if processingMode in ["advanced", "detailed"]: - result = await self.service.callAiTextAdvanced(enhanced_prompt, context) + result = await self.service.callAiTextAdvanced(call_prompt, context) else: - result = await self.service.callAiTextBasic(enhanced_prompt, context) + result = await self.service.callAiTextBasic(call_prompt, context) + + # If expected JSON and too short/not JSON, retry with stricter JSON guardrails + if output_extension == ".json": + import json + cleaned = (result or "").strip() + if cleaned.startswith('```json'): + cleaned = cleaned[7:] + if cleaned.endswith('```'): + cleaned = cleaned[:-3] + cleaned = cleaned.strip() + needs_retry = False + try: + parsed = json.loads(cleaned) + # Heuristic: small dict -> possibly underfilled + if isinstance(parsed, dict) and len(parsed.keys()) <= 2: + needs_retry = True + except Exception: + needs_retry = True + + if needs_retry: + guardrail_prompt = ( + enhanced_prompt + + "\n\nCRITICAL: Return ONLY valid JSON, no markdown, no code fences. " + "Include all requested fields with detailed content." + ) + try: + result = await self.service.callAiTextAdvanced(guardrail_prompt, context) + except Exception: + result = cleaned # fallback to first attempt # Create result document fileName = f"ai_{processingMode}_{self._format_timestamp_for_filename()}{output_extension}" diff --git a/modules/methods/methodDocument.py b/modules/methods/methodDocument.py index 8cd3ac1c..54f45cb9 100644 --- a/modules/methods/methodDocument.py +++ b/modules/methods/methodDocument.py @@ -5,7 +5,6 @@ Handles document operations using the document service. import logging import os -import re from typing import Dict, Any, List, Optional from datetime import datetime, UTC @@ -34,13 +33,15 @@ class MethodDocument(MethodBase): Extract content from any document using AI prompt. Parameters: - documentList (str): Document list reference + documentList (list): Document list reference(s) aiPrompt (str): AI prompt for extraction expectedDocumentFormats (list, optional): Output formats includeMetadata (bool, optional): Include metadata (default: True) """ try: documentList = parameters.get("documentList") + if isinstance(documentList, str): + documentList = [documentList] aiPrompt = parameters.get("aiPrompt") expectedDocumentFormats = parameters.get("expectedDocumentFormats", []) includeMetadata = parameters.get("includeMetadata", True) @@ -188,6 +189,8 @@ class MethodDocument(MethodBase): """ try: document_list = parameters.get("documentList", []) + if isinstance(document_list, str): + document_list = [document_list] expected_document_formats = parameters.get("expectedDocumentFormats", []) original_documents = parameters.get("originalDocuments", []) include_metadata = parameters.get("includeMetadata", True) @@ -606,13 +609,15 @@ class MethodDocument(MethodBase): Generate HTML report from multiple documents using AI. Parameters: - documentList (str): Document list reference + documentList (list): Document list reference(s) prompt (str): AI prompt for report generation title (str, optional): Report title (default: "Summary Report") includeMetadata (bool, optional): Include metadata (default: True) """ try: documentList = parameters.get("documentList") + if isinstance(documentList, str): + documentList = [documentList] prompt = parameters.get("prompt") title = parameters.get("title", "Summary Report") includeMetadata = parameters.get("includeMetadata", True) @@ -708,13 +713,9 @@ class MethodDocument(MethodBase): logger.info(f" Skipping document with no readable text content") if not validDocuments: - # If no valid documents, create a simple report - html = ["" + title + ""] - html.append(f"

{title}

") - html.append(f"

Generated: {int(get_utc_timestamp())}

") - html.append("

No content available in the provided documents.

") - html.append("") - return '\n'.join(html) + # No readable content; return a minimal valid HTML document + timestamp = int(get_utc_timestamp()) + return f"{title}

{title}

Keine auswertbaren Inhalte gefunden.

Generated: {timestamp}

" # Create AI prompt for comprehensive report generation using user's prompt combinedContent = "\n\n".join(allContent) @@ -723,25 +724,34 @@ class MethodDocument(MethodBase): Report Title: {title} -Additional Requirements: -1. Create a professional, well-formatted HTML report -2. Include an executive summary at the beginning -3. Organize information logically with clear sections -4. Highlight key findings and insights -5. Include relevant data, statistics, and conclusions -6. Use proper HTML formatting with headers, lists, and styling -7. Make it readable and professional +OUTPUT POLICY: +- Return ONLY a complete, raw HTML document. +- Start with: +- Must include: , (with and ), and <body>. +- The response must be valid, self-contained HTML suitable for saving as .html. -Document Content: ----START OF DOCUMENT CONTENT----------------------------------------------- +Structure: +- Title and short subtitle +- Executive summary +- Sections with clear headings +- Use tables for structured data when helpful +- Key findings and recommendations +- Generation date and number of documents + +Quality and design requirements: +- Use clear, professional, and accessible styling in a <style> block +- Apply clean layout, spacing, and visual hierarchy for headings +- Keep HTML and CSS standards-compliant and lightweight + +SOURCE DOCUMENT CONTENT: +---START--- {combinedContent} ----END OF DOCUMENT CONTENT----------------------------------------------- -Generate a complete HTML report that addresses the user's specific requirements and integrates all the information into a cohesive, professional document. +---END--- """ # Call AI to generate the report logger.info(f"Generating AI report for {len(validDocuments)} documents") - aiReport = await self.service.callAiTextBasic(aiPrompt, combinedContent) + aiReport = await self.service.callAiTextAdvanced(aiPrompt, combinedContent) # If AI call fails, return error - AI is crucial for report generation if not aiReport or aiReport.strip() == "": @@ -751,39 +761,21 @@ Generate a complete HTML report that addresses the user's specific requirements # Clean up the AI response and ensure it's valid HTML aiReport = aiReport.strip() - # Strip fenced code blocks like ```html ... ``` if present + # Normalize: strip code fences if present if aiReport.startswith("```") and aiReport.endswith("```"): lines = aiReport.split('\n') if len(lines) >= 2: - # remove first and last fence lines (language tag allowed on first) aiReport = '\n'.join(lines[1:-1]).strip() - # Check if AI response starts with DOCTYPE or html tag (complete HTML document) - if aiReport.startswith('<!DOCTYPE') or aiReport.startswith('<html'): - # AI returned complete HTML document, use it directly - return aiReport - else: - # AI returned HTML content without document structure, wrap it - - # Check if AI response already contains a title/header - has_title = any(title.lower() in aiReport.lower() for title in [title, "outlook", "report", "status"]) - - # Wrap the AI content in proper HTML structure - html = ["<html><head><meta charset='utf-8'><title>" + title + ""] - - # Only add the title if the AI response doesn't already have one - if not has_title: - html.append(f"

{title}

") - - html.append(f"

Generated: {int(get_utc_timestamp())}

") - html.append(f"

Total Documents Analyzed: {len(validDocuments)}

") - html.append("
") - html.append(aiReport) - html.append("") - return '\n'.join(html) + cleaned = aiReport.strip() + + # Return exactly what we have (no wrapping) + return cleaned except Exception as e: logger.error(f"Error generating AI report: {str(e)}") # Re-raise the error - AI is crucial for report generation raise - \ No newline at end of file + + + diff --git a/modules/methods/methodOutlook.py b/modules/methods/methodOutlook.py index 23b17985..658b3982 100644 --- a/modules/methods/methodOutlook.py +++ b/modules/methods/methodOutlook.py @@ -1478,7 +1478,11 @@ class MethodOutlook(MethodBase): attachments = parameters.get("attachments", []) tone = parameters.get("tone", "professional") documentList = parameters.get("documentList", []) + if isinstance(documentList, str): + documentList = [documentList] attachmentDocumentList = parameters.get("attachmentDocumentList", []) + if isinstance(attachmentDocumentList, str): + attachmentDocumentList = [attachmentDocumentList] expectedDocumentFormats = parameters.get("expectedDocumentFormats", []) if not context: diff --git a/modules/methods/methodSharepoint.py b/modules/methods/methodSharepoint.py index ca99c06a..bcb92e0b 100644 --- a/modules/methods/methodSharepoint.py +++ b/modules/methods/methodSharepoint.py @@ -829,7 +829,7 @@ class MethodSharepoint(MethodBase): Read documents from SharePoint across all accessible sites Parameters: - documentList (str): Reference to the document list to read + documentList (list): Reference(s) to the document list to read connectionReference (str): Reference to the Microsoft connection pathObject (str, optional): Path object to locate documents. This can ONLY be a reference to a result from sharepoint.findDocumentPath action pathQuery (str, optional): Path query to locate documents, only if no pathObject is provided (e.g., "/Documents/Project1", "*" for all sites) @@ -837,6 +837,8 @@ class MethodSharepoint(MethodBase): """ try: documentList = parameters.get("documentList") + if isinstance(documentList, str): + documentList = [documentList] connectionReference = parameters.get("connectionReference") pathQuery = parameters.get("pathQuery", "*") pathObject = parameters.get("pathObject") @@ -886,8 +888,7 @@ class MethodSharepoint(MethodBase): return ActionResult.isFailure(error=f"Error resolving pathObject reference: {str(e)}") # Get documents from reference - ensure documentList is a list, not a string - if isinstance(documentList, str): - documentList = [documentList] # Convert string to list + # documentList is already normalized above chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList) if not chatDocuments: @@ -1107,13 +1108,15 @@ class MethodSharepoint(MethodBase): connectionReference (str): Reference to the Microsoft connection pathObject (str, optional): Path object to locate documents. This can ONLY be a reference to a result from sharepoint.findDocumentPath action pathQuery (str, optional): Path query to locate documents, only if no pathObject is provided (e.g., "/Documents/Project1", "*" for all sites) - documentList (str): Reference to the document list to upload + documentList (list): Reference(s) to the document list to upload fileNames (List[str]): List of names for the uploaded files """ try: connectionReference = parameters.get("connectionReference") pathQuery = parameters.get("pathQuery") documentList = parameters.get("documentList") + if isinstance(documentList, str): + documentList = [documentList] fileNames = parameters.get("fileNames") pathObject = parameters.get("pathObject") diff --git a/modules/methods/methodWeb.py b/modules/methods/methodWeb.py index 014498de..409b7151 100644 --- a/modules/methods/methodWeb.py +++ b/modules/methods/methodWeb.py @@ -25,40 +25,53 @@ class MethodWeb(MethodBase): @action async def search(self, parameters: Dict[str, Any]) -> ActionResult: - """Perform a web search and outputs a csv file with a list of found URLs - - Each result contains columns "url" and "title". + """Perform a web search and output a CSV with the found URLs. Each result row contains columns "url" and "title". Parameters: - query (str): Search query to perform - maxResults (int, optional): Maximum number of results (default: 10) + query (str, required): Search query. + maxResults (int, optional): Max number of results. Default: 10. + searchDepth ("basic"|"advanced", optional): Search depth. Default: provider default. + timeRange ("d"|"w"|"m"|"y", optional): Limit to last day/week/month/year. + topic ("general"|"news"|"academic", optional): Result domain preference. + includeDomains (list[str], optional): Only include these domains. + excludeDomains (list[str], optional): Exclude these domains. + language (str, optional): ISO code like "de", "en" to bias results. + includeAnswer (bool, optional): Ask provider to generate a short answer. + includeRawContent (bool, optional): Include raw content where possible. """ try: - # Prepare request data + # Prepare request data (generic, no region/language bias) + raw_query = parameters.get("query") + max_results = parameters.get("maxResults", 10) + + if not raw_query or not isinstance(raw_query, str): + return ActionResult(success=False, error="Search query is required") + web_search_request = WebSearchRequest( - query=parameters.get("query"), - max_results=parameters.get("maxResults", 10), + query=raw_query.strip(), + max_results=max_results, + search_depth=parameters.get("searchDepth"), + time_range=parameters.get("timeRange"), + topic=parameters.get("topic"), + include_domains=parameters.get("includeDomains"), + exclude_domains=parameters.get("excludeDomains"), + language=parameters.get("language"), + include_answer=parameters.get("includeAnswer"), + include_raw_content=parameters.get("includeRawContent"), ) # Perform request web_interface = await WebInterface.create() web_search_result = await web_interface.search(web_search_request) - # Convert search results to CSV format + # Convert search results to CSV format (generic) if web_search_result.success and web_search_result.documents: csv_content = web_interface.convert_web_search_result_to_csv(web_search_result) - - # Create CSV document csv_document = web_interface.create_csv_action_document( - csv_content, - f"web_search_results.csv" - ) - - return ActionResult( - success=True, - documents=[csv_document] + csv_content, f"web_search_results.csv" ) + return ActionResult(success=True, documents=[csv_document]) else: return web_search_result @@ -105,15 +118,21 @@ class MethodWeb(MethodBase): @action async def crawl(self, parameters: Dict[str, Any]) -> ActionResult: - """Crawls a list of URLs and extracts information from them. + """Crawl a list of URLs and extract text content. Parameters: - documentList (str): Document list reference containing URL lists from search results - expectedDocumentFormats (list, optional): Expected document formats with extension, mimeType, description + documentList (list[str]|str, required): Reference(s) to documents containing URLs (e.g., CSV from search). Can be a single ref or list. + expectedDocumentFormats (list, optional): Hint for downstream handling. + extractDepth ("basic"|"advanced", optional): Extraction depth. Default: "advanced". + format ("text"|"markdown", optional): Output format. Default: "text". """ try: document_list = parameters.get("documentList") + # Normalize to list if a single string reference is provided + if isinstance(document_list, str): + document_list = [document_list] + if not document_list: return ActionResult( success=False, error="No document list reference provided." @@ -214,24 +233,72 @@ class MethodWeb(MethodBase): unique_urls = list(dict.fromkeys(all_urls)) logger.info(f"Extracted {len(unique_urls)} unique URLs from {len(chat_documents)} documents") - # Prepare request data - web_crawl_request = WebCrawlRequest(urls=unique_urls) + # Prepare request data with normalization + allowed_extract_depth = {"basic", "advanced"} + allowed_formats = {"text", "markdown"} + extract_depth = parameters.get("extractDepth") + if extract_depth and extract_depth not in allowed_extract_depth: + logger.warning(f"Invalid extractDepth '{extract_depth}' provided. Falling back to 'advanced'.") + extract_depth = "advanced" + fmt = parameters.get("format") + if fmt and fmt not in allowed_formats: + logger.warning(f"Invalid format '{fmt}' provided. Falling back to 'text'.") + fmt = "text" + + web_crawl_request = WebCrawlRequest( + urls=unique_urls, + extract_depth=extract_depth, + format=fmt, + ) # Perform request web_interface = await WebInterface.create() web_crawl_result = await web_interface.crawl(web_crawl_request) - # Convert to proper JSON format + # Convert and enrich with concise summaries per URL for better context if web_crawl_result.success: - json_content = web_interface.convert_web_result_to_json(web_crawl_result) + try: + doc = web_crawl_result.documents[0] + results = getattr(doc.documentData, "results", []) + enriched = [] + # Summarize each result briefly using AI for added context + for item in results: + url = str(getattr(item, "url", "")) + content = str(getattr(item, "content", "")) + summary = "" + try: + if content: + prompt = ( + "Summarize the following webpage content in 3-5 concise bullet points. " + "Focus on key points, figures, named entities (companies/institutions), and location context. " + "Return only bullet points without any preface." + ) + context = content[:4000] + summary = await self.service.callAiTextBasic(prompt, context) + summary = summary.strip() + except Exception: + summary = "" + enriched.append({ + "url": url, + "summary": summary, + "snippet": content[:500] + }) + + import json as _json + payload = { + "success": True, + "total_count": len(enriched), + "results": enriched, + } + json_content = _json.dumps(payload, ensure_ascii=False, indent=2) + except Exception: + # Fallback to original conversion + json_content = web_interface.convert_web_result_to_json(web_crawl_result) + json_document = web_interface.create_json_action_document( - json_content, - f"web_crawl_results.json" - ) - return ActionResult( - success=True, - documents=[json_document] + json_content, f"web_crawl_results.json" ) + return ActionResult(success=True, documents=[json_document]) else: return web_crawl_result @@ -241,17 +308,44 @@ class MethodWeb(MethodBase): @action async def scrape(self, parameters: Dict[str, Any]) -> ActionResult: - """Scrapes web content by searching for URLs and then extracting their content. - - Combines search and crawl operations in one step. + """Search and then crawl the found URLs in one step. To use for market analysis, web research, internet searches Parameters: - query (str): Search query to perform - maxResults (int, optional): Maximum number of results (default: 10) + query (str, required): Search query. + maxResults (int, optional): Max number of results. Default: 10. + searchDepth ("basic"|"advanced", optional): Search depth. + timeRange ("d"|"w"|"m"|"y", optional): Time window. + topic ("general"|"news"|"academic", optional): Result domain preference. + includeDomains (list[str], optional): Only include these domains. + excludeDomains (list[str], optional): Exclude these domains. + language (str, optional): ISO language bias. + includeAnswer (bool, optional): Ask provider to include an answer. + includeRawContent (bool, optional): Include raw content where possible. + extractDepth ("basic"|"advanced", optional): Crawl extraction depth. Default: "advanced". + format ("text"|"markdown", optional): Crawl output format. Default: "text". """ try: query = parameters.get("query") max_results = parameters.get("maxResults", 10) + # Normalize optional enums to avoid validation errors + allowed_search_depth = {"basic", "advanced"} + allowed_extract_depth = {"basic", "advanced"} + allowed_formats = {"text", "markdown"} + + search_depth = parameters.get("searchDepth") + if search_depth and search_depth not in allowed_search_depth: + logger.warning(f"Invalid searchDepth '{search_depth}' provided. Falling back to None.") + search_depth = None + + extract_depth = parameters.get("extractDepth") + if extract_depth and extract_depth not in allowed_extract_depth: + logger.warning(f"Invalid extractDepth '{extract_depth}' provided. Falling back to 'advanced'.") + extract_depth = "advanced" + + fmt = parameters.get("format") + if fmt and fmt not in allowed_formats: + logger.warning(f"Invalid format '{fmt}' provided. Falling back to 'text'.") + fmt = "text" if not query: return ActionResult(success=False, error="Search query is required") @@ -260,6 +354,16 @@ class MethodWeb(MethodBase): web_scrape_request = WebScrapeRequest( query=query, max_results=max_results, + search_depth=search_depth, + time_range=parameters.get("timeRange"), + topic=parameters.get("topic"), + include_domains=parameters.get("includeDomains"), + exclude_domains=parameters.get("excludeDomains"), + language=parameters.get("language"), + include_answer=parameters.get("includeAnswer"), + include_raw_content=parameters.get("includeRawContent"), + extract_depth=extract_depth, + format=fmt, ) # Perform request diff --git a/modules/security/auth.py b/modules/security/auth.py index 2ccaaa74..5b882203 100644 --- a/modules/security/auth.py +++ b/modules/security/auth.py @@ -174,6 +174,19 @@ def _getUserBase(token: str = Depends(cookieAuth)) -> User: headers={"WWW-Authenticate": "Bearer"}, ) + # Guard: token may be None or malformed when cookie/header is missing or bad + if not token or not isinstance(token, str): + logger.warning("Missing JWT Token (no cookie/header)") + raise credentialsException + # Basic JWT format check (header.payload.signature) + try: + if token.count(".") != 2: + logger.warning("Malformed JWT token format") + raise credentialsException + except Exception: + # If anything odd happens while checking format, treat as invalid creds + raise credentialsException + try: # Decode token payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) diff --git a/modules/security/tokenManager.py b/modules/security/tokenManager.py index 58dabd03..c27c9939 100644 --- a/modules/security/tokenManager.py +++ b/modules/security/tokenManager.py @@ -163,6 +163,23 @@ class TokenManager: logger.debug(f"refresh_token: Starting refresh for token {old_token.id}, authority: {old_token.authority}") logger.debug(f"refresh_token: Token details: userId={old_token.userId}, connectionId={old_token.connectionId}, hasRefreshToken={bool(old_token.tokenRefresh)}") + # Cooldown: avoid refreshing too frequently if a workflow triggers refresh repeatedly + # Only allow a new refresh if at least 10 minutes passed since the token was created/refreshed + try: + now_ts = get_utc_timestamp() + created_ts = float(old_token.createdAt) if old_token.createdAt is not None else 0.0 + seconds_since_last_refresh = now_ts - created_ts + if seconds_since_last_refresh < 10 * 60: + logger.info( + f"refresh_token: Skipping refresh for connection {old_token.connectionId} due to cooldown. " + f"Last refresh {int(seconds_since_last_refresh)}s ago (< 600s)." + ) + # Return the existing token to avoid caller errors while preventing provider rate limits + return old_token + except Exception: + # If any issue reading timestamps, proceed with normal refresh to be safe + pass + if not old_token.tokenRefresh: logger.warning(f"No refresh token available for {old_token.authority}") return None From 30d0a8f70c02f767827239bb14f375ae54442ea6 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Mon, 22 Sep 2025 23:34:47 +0200 Subject: [PATCH 004/169] Full refactored workflow and features --- app.py | 6 +- modules/chat/managerChat.py | 115 ---- .../chatPlayground/mainChatPlayground.py | 29 + .../mainNeutralizePlayground.py} | 2 +- .../mainSyncDelta.py} | 0 modules/interfaces/interfaceAiCalls.py | 2 +- modules/interfaces/interfaceAppObjects.py | 2 +- modules/interfaces/interfaceChatModel.py | 92 ++- modules/interfaces/interfaceChatObjects.py | 237 +------ modules/routes/routeAdmin.py | 219 ------- modules/routes/routeChatPlayground.py | 132 ++++ modules/routes/routeDataNeutralization.py | 2 +- modules/routes/routeWorkflows.py | 96 +-- modules/{chat => services}/serviceCenter.py | 8 +- .../serviceDocument}/documentExtraction.py | 4 +- .../serviceDocument}/documentGeneration.py | 2 +- .../serviceDocument}/documentUtility.py | 0 .../serviceNeutralization}/neutralizer.py | 12 +- .../serviceNeutralization}/readme.md | 0 .../serviceNeutralization}/subParseString.py | 2 +- .../serviceNeutralization}/subPatterns.py | 0 .../subProcessBinary.py | 0 .../subProcessCommon.py | 0 .../serviceNeutralization}/subProcessList.py | 8 +- .../serviceNeutralization}/subProcessText.py | 2 +- .../_transfer}/executionState.py | 29 +- .../_transfer}/handlingTasks.py | 168 ++++- .../_transfer}/promptFactory.py | 96 ++- modules/{ => workflows}/methods/methodAi.py | 2 +- modules/{ => workflows}/methods/methodBase.py | 0 .../{ => workflows}/methods/methodDocument.py | 2 +- .../{ => workflows}/methods/methodOutlook.py | 2 +- .../methods/methodSharepoint.py | 2 +- modules/{ => workflows}/methods/methodWeb.py | 2 +- .../workflowManager.py} | 585 +++++++++++------- tests/methods/test_method_web.py | 2 +- tool_stats_durations_from_log.py | 2 +- 37 files changed, 924 insertions(+), 940 deletions(-) delete mode 100644 modules/chat/managerChat.py create mode 100644 modules/features/chatPlayground/mainChatPlayground.py rename modules/features/{featureNeutralizePlayground.py => neutralizePlayground/mainNeutralizePlayground.py} (99%) rename modules/features/{featureSyncDelta.py => syncDelta/mainSyncDelta.py} (100%) create mode 100644 modules/routes/routeChatPlayground.py rename modules/{chat => services}/serviceCenter.py (99%) rename modules/{chat/documents => services/serviceDocument}/documentExtraction.py (99%) rename modules/{chat/documents => services/serviceDocument}/documentGeneration.py (99%) rename modules/{chat/documents => services/serviceDocument}/documentUtility.py (100%) rename modules/{neutralizer => services/serviceNeutralization}/neutralizer.py (87%) rename modules/{neutralizer => services/serviceNeutralization}/readme.md (100%) rename modules/{neutralizer => services/serviceNeutralization}/subParseString.py (98%) rename modules/{neutralizer => services/serviceNeutralization}/subPatterns.py (100%) rename modules/{neutralizer => services/serviceNeutralization}/subProcessBinary.py (100%) rename modules/{neutralizer => services/serviceNeutralization}/subProcessCommon.py (100%) rename modules/{neutralizer => services/serviceNeutralization}/subProcessList.py (96%) rename modules/{neutralizer => services/serviceNeutralization}/subProcessText.py (97%) rename modules/{chat/handling => workflows/_transfer}/executionState.py (64%) rename modules/{chat/handling => workflows/_transfer}/handlingTasks.py (89%) rename modules/{chat/handling => workflows/_transfer}/promptFactory.py (91%) rename modules/{ => workflows}/methods/methodAi.py (99%) rename modules/{ => workflows}/methods/methodBase.py (100%) rename modules/{ => workflows}/methods/methodDocument.py (99%) rename modules/{ => workflows}/methods/methodOutlook.py (99%) rename modules/{ => workflows}/methods/methodSharepoint.py (99%) rename modules/{ => workflows}/methods/methodWeb.py (99%) rename modules/{features/featureChatPlayground.py => workflows/workflowManager.py} (55%) diff --git a/app.py b/app.py index 02e05076..5caa64f8 100644 --- a/app.py +++ b/app.py @@ -205,14 +205,10 @@ async def lifespan(app: FastAPI): # Startup logic logger.info("Application is starting up") - # Initialize root interface to ensure database is properly set up - from modules.interfaces.interfaceAppObjects import getRootInterface - getRootInterface() - # Setup APScheduler for JIRA sync scheduler = AsyncIOScheduler(timezone=ZoneInfo("Europe/Zurich")) try: - from modules.features.featureSyncDelta import perform_sync_jira_delta_group + from modules.features.syncDelta.mainSyncDelta import perform_sync_jira_delta_group # Schedule sync every 20 minutes (at minutes 00, 20, 40) scheduler.add_job( perform_sync_jira_delta_group, diff --git a/modules/chat/managerChat.py b/modules/chat/managerChat.py deleted file mode 100644 index 882d46e3..00000000 --- a/modules/chat/managerChat.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -from typing import Dict, Any, List -from modules.interfaces.interfaceAppModel import User -from modules.interfaces.interfaceChatModel import ChatWorkflow, UserInputRequest, TaskStep, TaskAction, ActionResult, ReviewResult, TaskPlan, WorkflowResult, TaskContext -from modules.interfaces.interfaceChatObjects import ChatObjects -from modules.chat.handling.handlingTasks import HandlingTasks, WorkflowStoppedException - -logger = logging.getLogger(__name__) - -# ===== STATE MANAGEMENT AND VALIDATION CLASSES ===== - -class ChatManager: - """Chat manager with improved AI integration and method handling""" - - def __init__(self, currentUser: User, chatInterface: ChatObjects): - self.currentUser = currentUser - self.chatInterface = chatInterface - self.workflow: ChatWorkflow = None - self.handlingTasks: HandlingTasks = None - - async def initialize(self, workflow: ChatWorkflow) -> None: - """Initialize chat manager with workflow""" - self.workflow = workflow - self.handlingTasks = HandlingTasks(self.chatInterface, self.currentUser, self.workflow) - - - async def executeUnifiedWorkflow(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> WorkflowResult: - """Unified Workflow Execution""" - try: - logger.info(f"Starting unified workflow execution for workflow {workflow.id}") - - # Phase 1: High-Level Task Planning - logger.info("Phase 1: Generating task plan") - task_plan = await self.handlingTasks.generateTaskPlan(userInput.prompt, workflow) - if not task_plan or not task_plan.tasks: - raise Exception("No tasks generated in task plan.") - - # Phase 2-5: For each task, execute and get results - total_tasks = len(task_plan.tasks) - logger.info(f"Phase 2: Executing {total_tasks} tasks") - all_task_results = [] - previous_results = [] - for idx, task_step in enumerate(task_plan.tasks): - # Pass task index to executeTask method - current_task_index = idx + 1 - - logger.info(f"Task {idx+1}/{total_tasks}: {task_step.objective}") - - # Create proper context object for this task - task_context = TaskContext( - task_step=task_step, - workflow=workflow, - workflow_id=workflow.id, - available_documents=None, - available_connections=None, - previous_results=previous_results, - previous_handover=None, - improvements=[], - retry_count=0, - previous_action_results=[], - previous_review_result=None, - is_regeneration=False, - failure_patterns=[], - failed_actions=[], - successful_actions=[], - criteria_progress={ - 'met_criteria': set(), - 'unmet_criteria': set(), - 'attempt_history': [] - } - ) - - # Execute task (this handles action generation, execution, and review internally) - task_result = await self.handlingTasks.executeTask(task_step, workflow, task_context, current_task_index, total_tasks) - # Handover - handover_data = await self.handlingTasks.prepareTaskHandover(task_step, [], task_result, workflow) - # Collect results - all_task_results.append({ - 'task_step': task_step, - 'task_result': task_result, - 'handover_data': handover_data - }) - # Update previous results for next task - if task_result.success and task_result.feedback: - previous_results.append(task_result.feedback) - - # Final workflow result - workflow_result = WorkflowResult( - status="completed", - completed_tasks=len(all_task_results), - total_tasks=len(task_plan.tasks), - execution_time=0.0, # TODO: Calculate actual execution time - final_results_count=len(all_task_results) - ) - logger.info(f"Unified workflow execution completed successfully for workflow {workflow.id}") - return workflow_result - except WorkflowStoppedException: - logger.info(f"Workflow {workflow.id} was stopped by user") - return WorkflowResult( - status="stopped", - completed_tasks=0, - total_tasks=0, - execution_time=0.0, - final_results_count=0 - ) - except Exception as e: - logger.error(f"Error in executeUnifiedWorkflow: {str(e)}") - return WorkflowResult( - status="failed", - completed_tasks=0, - total_tasks=0, - execution_time=0.0, - final_results_count=0, - error=str(e) - ) diff --git a/modules/features/chatPlayground/mainChatPlayground.py b/modules/features/chatPlayground/mainChatPlayground.py new file mode 100644 index 00000000..13eba835 --- /dev/null +++ b/modules/features/chatPlayground/mainChatPlayground.py @@ -0,0 +1,29 @@ +import logging +import asyncio +from typing import Optional + +from modules.interfaces.interfaceAppModel import User +from modules.interfaces.interfaceChatModel import ChatWorkflow, UserInputRequest +from modules.shared.timezoneUtils import get_utc_timestamp + +logger = logging.getLogger(__name__) + +async def chatStart(interfaceChat, currentUser: User, userInput: UserInputRequest, workflowId: Optional[str] = None) -> ChatWorkflow: + """Starts a new chat or continues an existing one, then launches processing asynchronously.""" + try: + from modules.workflows.workflowManager import WorkflowManager + workflowManager = WorkflowManager(interfaceChat, currentUser) + return await workflowManager.workflowStart(userInput, workflowId) + except Exception as e: + logger.error(f"Error starting chat: {str(e)}") + raise + +async def chatStop(interfaceChat, currentUser: User, workflowId: str) -> ChatWorkflow: + """Stops a running chat.""" + try: + from modules.workflows.workflowManager import WorkflowManager + workflowManager = WorkflowManager(interfaceChat, currentUser) + return await workflowManager.workflowStop(workflowId) + except Exception as e: + logger.error(f"Error stopping chat: {str(e)}") + raise diff --git a/modules/features/featureNeutralizePlayground.py b/modules/features/neutralizePlayground/mainNeutralizePlayground.py similarity index 99% rename from modules/features/featureNeutralizePlayground.py rename to modules/features/neutralizePlayground/mainNeutralizePlayground.py index e5c75a37..877ca8aa 100644 --- a/modules/features/featureNeutralizePlayground.py +++ b/modules/features/neutralizePlayground/mainNeutralizePlayground.py @@ -13,7 +13,7 @@ import mimetypes from modules.interfaces.interfaceAppObjects import getInterface from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes -from modules.neutralizer.neutralizer import DataAnonymizer +from modules.services.serviceNeutralization.neutralizer import DataAnonymizer from modules.shared.timezoneUtils import get_utc_timestamp logger = logging.getLogger(__name__) diff --git a/modules/features/featureSyncDelta.py b/modules/features/syncDelta/mainSyncDelta.py similarity index 100% rename from modules/features/featureSyncDelta.py rename to modules/features/syncDelta/mainSyncDelta.py diff --git a/modules/interfaces/interfaceAiCalls.py b/modules/interfaces/interfaceAiCalls.py index f0bb67b4..6f0de9c9 100644 --- a/modules/interfaces/interfaceAiCalls.py +++ b/modules/interfaces/interfaceAiCalls.py @@ -2,7 +2,7 @@ import logging from typing import Dict, Any, List, Union, Optional from modules.connectors.connectorAiOpenai import AiOpenai, ContextLengthExceededException from modules.connectors.connectorAiAnthropic import AiAnthropic -from modules.chat.documents.documentExtraction import DocumentExtraction +from modules.services.serviceDocument.documentExtraction import DocumentExtraction from modules.interfaces.interfaceChatModel import ChatDocument logger = logging.getLogger(__name__) diff --git a/modules/interfaces/interfaceAppObjects.py b/modules/interfaces/interfaceAppObjects.py index ed8fdca1..ccd471f1 100644 --- a/modules/interfaces/interfaceAppObjects.py +++ b/modules/interfaces/interfaceAppObjects.py @@ -1141,7 +1141,7 @@ class AppObjects: def neutralizeText(self, text: str, file_id: Optional[str] = None) -> Dict[str, Any]: """Neutralize text content and store attribute mappings""" try: - from modules.neutralizer.neutralizer import DataAnonymizer + from modules.services.serviceNeutralization.neutralizer import DataAnonymizer # Get neutralization configuration to extract namesToParse config = self.getNeutralizationConfig() diff --git a/modules/interfaces/interfaceChatModel.py b/modules/interfaces/interfaceChatModel.py index ed71963a..9ead0fb2 100644 --- a/modules/interfaces/interfaceChatModel.py +++ b/modules/interfaces/interfaceChatModel.py @@ -80,6 +80,70 @@ register_model_labels( } ) +# ===== Minimal ReAct-style Workflow Models ===== + +class ActionSelection(BaseModel, ModelMixin): + """Model for selecting exactly one action in a step""" + method: str = Field(description="Method to execute (e.g., web, document, ai)") + name: str = Field(description="Action name within the method (e.g., search, extract)") + +register_model_labels( + "ActionSelection", + {"en": "Action Selection", "fr": "Sélection d'action"}, + { + "method": {"en": "Method", "fr": "Méthode"}, + "name": {"en": "Action Name", "fr": "Nom de l'action"} + } +) + +class ActionParameters(BaseModel, ModelMixin): + """Model for specifying only the parameters for the selected action""" + parameters: Dict[str, Any] = Field(default_factory=dict, description="Parameters to execute the selected action") + +register_model_labels( + "ActionParameters", + {"en": "Action Parameters", "fr": "Paramètres d'action"}, + { + "parameters": {"en": "Parameters", "fr": "Paramètres"} + } +) + +class ObservationPreview(BaseModel, ModelMixin): + """Compact preview item for observations""" + name: str = Field(description="Document name or URL label") + mime: str = Field(description="MIME type or kind") + snippet: str = Field(description="Short snippet or summary") + +register_model_labels( + "ObservationPreview", + {"en": "Observation Preview", "fr": "Aperçu d'observation"}, + { + "name": {"en": "Name", "fr": "Nom"}, + "mime": {"en": "MIME", "fr": "MIME"}, + "snippet": {"en": "Snippet", "fr": "Extrait"} + } +) + +class Observation(BaseModel, ModelMixin): + """Compact observation returned to the model after each action""" + success: bool = Field(description="Action execution success flag") + resultLabel: str = Field(description="Deterministic label for produced documents") + documentsCount: int = Field(description="Number of produced documents") + previews: List[ObservationPreview] = Field(default_factory=list, description="Compact previews of outputs") + notes: List[str] = Field(default_factory=list, description="Short notes or key facts") + +register_model_labels( + "Observation", + {"en": "Observation", "fr": "Observation"}, + { + "success": {"en": "Success", "fr": "Succès"}, + "resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"}, + "documentsCount": {"en": "Documents Count", "fr": "Nombre de documents"}, + "previews": {"en": "Previews", "fr": "Aperçus"}, + "notes": {"en": "Notes", "fr": "Notes"} + } +) + # ===== Base Enums and Simple Models ===== class TaskStatus(str, Enum): @@ -630,6 +694,25 @@ class ChatWorkflow(BaseModel, ModelMixin): frontend_readonly=True, frontend_required=False ) + # Workflow mode selection (e.g., Actionplan, React) + workflowMode: str = Field( + default="Actionplan", + description="Workflow mode selector", + frontend_type="select", + frontend_readonly=False, + frontend_required=False, + frontend_options=[ + {"value": "Actionplan", "label": {"en": "Action Plan", "fr": "Plan d'actions"}}, + {"value": "React", "label": {"en": "React", "fr": "Réactif"}} + ] + ) + maxSteps: int = Field( + default=5, + description="Maximum number of iterations in react mode", + frontend_type="integer", + frontend_readonly=False, + frontend_required=False + ) # Register labels for ChatWorkflow register_model_labels( @@ -650,11 +733,13 @@ register_model_labels( "logs": {"en": "Logs", "fr": "Journaux"}, "messages": {"en": "Messages", "fr": "Messages"}, "stats": {"en": "Statistics", "fr": "Statistiques"}, - "tasks": {"en": "Tasks", "fr": "Tâches"} + "tasks": {"en": "Tasks", "fr": "Tâches"}, + "workflowMode": {"en": "Workflow Mode", "fr": "Mode de workflow"}, + "maxSteps": {"en": "Max Steps", "fr": "Étapes max"} } ) -# ====== WORKFLOW SUPPORT MODELS (for managerChat.py compatibility) ====== +# ====== WORKFLOW SUPPORT MODELS ====== class TaskStep(BaseModel, ModelMixin): id: str @@ -763,6 +848,9 @@ class TaskContext(BaseModel, ModelMixin): # Criteria progress tracking for retries criteria_progress: Optional[dict] = None + # Iterative loop controls (moved to ChatWorkflow.workflowMode and ChatWorkflow.maxSteps) + # reactMode and maxSteps are now controlled at the workflow level + def getDocumentReferences(self) -> List[str]: """Get all available document references from previous handover""" docs = [] diff --git a/modules/interfaces/interfaceChatObjects.py b/modules/interfaces/interfaceChatObjects.py index 7b6806da..1b0a2af5 100644 --- a/modules/interfaces/interfaceChatObjects.py +++ b/modules/interfaces/interfaceChatObjects.py @@ -748,10 +748,9 @@ class ChatObjects: except Exception as e: logger.error(f"Error removing file {fileId} from message {messageId}: {str(e)}") return False - # Document methods - + def getDocuments(self, messageId: str) -> List[ChatDocument]: """Returns documents for a message from normalized table.""" try: @@ -910,7 +909,7 @@ class ChatObjects: msg_timestamp = msg.get("publishedAt", get_utc_timestamp()) if afterTimestamp is not None and msg_timestamp <= afterTimestamp: continue - + # Load documents for each message documents = self.getDocuments(msg["id"]) @@ -952,7 +951,7 @@ class ChatObjects: log_timestamp = log.get("timestamp", get_utc_timestamp()) if afterTimestamp is not None and log_timestamp <= afterTimestamp: continue - + chat_log = ChatLog(**log) items.append({ "type": "log", @@ -967,7 +966,7 @@ class ChatObjects: stat_timestamp = stat.get("_createdAt", get_utc_timestamp()) if afterTimestamp is not None and stat_timestamp <= afterTimestamp: continue - + chat_stat = ChatStat(**stat) items.append({ "type": "stat", @@ -980,234 +979,6 @@ class ChatObjects: return {"items": items} - def updateWorkflowStats(self, workflowId: str, bytesSent: int = 0, bytesReceived: int = 0) -> bool: - """Updates workflow statistics during execution with incremental values.""" - try: - # Get current workflow - workflow = self.getWorkflow(workflowId) - if not workflow: - logger.error(f"Workflow {workflowId} not found for stats update") - return False - - if not self._canModify(ChatWorkflow, workflowId): - logger.error(f"No permission to update workflow {workflowId} stats") - return False - - # Get current stats from normalized table - currentStats = self.getWorkflowStats(workflowId) - if currentStats: - current_bytes_sent = currentStats.bytesSent or 0 - current_bytes_received = currentStats.bytesReceived or 0 - current_processing_time = currentStats.processingTime or 0 - else: - current_bytes_sent = 0 - current_bytes_received = 0 - current_processing_time = 0 - - # Calculate processing time as duration since workflow start - if workflow and workflow.startedAt: - try: - start_time = int(float(workflow.startedAt)) - current_time = int(get_utc_timestamp()) - processing_time = current_time - start_time - - # Ensure processing time is reasonable - if processing_time < 0: - processing_time = 0 - elif processing_time > 86400 * 365: # More than 1 year - processing_time = 0 - except Exception as e: - logger.warning(f"Error calculating processing time: {str(e)}") - processing_time = current_processing_time - else: - processing_time = current_processing_time - - # Update stats with incremental values - new_bytes_sent = current_bytes_sent + bytesSent - new_bytes_received = current_bytes_received + bytesReceived - new_token_count = new_bytes_sent + new_bytes_received - - # Create or update stats record in normalized table - stats_record = { - "workflowId": workflowId, - "processingTime": processing_time, - "tokenCount": new_token_count, - "bytesSent": new_bytes_sent, - "bytesReceived": new_bytes_received, - "successRate": None, - "errorCount": None - } - - # Create new stats record - self.db.recordCreate(ChatStat, stats_record) - - - return True - - except Exception as e: - logger.error(f"Error updating workflow stats: {str(e)}") - return False - - - # Workflow Actions - - async def workflowStart(self, currentUser: User, userInput: UserInputRequest, workflowId: Optional[str] = None) -> ChatWorkflow: - """ - Starts a new workflow or continues an existing one. - - Args: - userInput: The user input request containing workflow initialization data - workflowId: Optional ID of an existing workflow to continue - - Returns: - ChatWorkflow object representing the started/continued workflow - """ - try: - # Get current timestamp - currentTime = get_utc_timestamp() - - if workflowId: - # Continue existing workflow - load complete state including messages - workflow = self.getWorkflow(workflowId) - if not workflow: - raise ValueError(f"Workflow {workflowId} not found") - - # Check if workflow is currently running and stop it first - if workflow.status == "running": - logger.info(f"Stopping running workflow {workflowId} before processing new prompt") - - # Stop the running workflow - workflow.status = "stopped" - workflow.lastActivity = currentTime - self.updateWorkflow(workflowId, { - "status": "stopped", - "lastActivity": currentTime - }) - - # Add log entry for workflow stop - self.createLog({ - "workflowId": workflowId, - "message": "Workflow stopped for new prompt", - "type": "info", - "status": "stopped", - "progress": 100 - }) - - # Wait a moment for any running processes to detect the stop - await asyncio.sleep(0.1) - - # Update workflow - increment round for existing workflows - newRound = workflow.currentRound + 1 - self.updateWorkflow(workflowId, { - "status": "running", # Set status back to running for resumed workflows - "lastActivity": currentTime, - "currentRound": newRound - }) - - # Reload workflow object to get updated currentRound from database - workflow = self.getWorkflow(workflowId) - if not workflow: - raise ValueError(f"Failed to reload workflow {workflowId} after update") - - # Add log entry for workflow resumption - self.createLog({ - "workflowId": workflowId, - "message": f"Workflow resumed (round {workflow.currentRound})", - "type": "info", - "status": "running", - "progress": 0 - }) - - else: - # Create new workflow - workflowData = { - "name": "New Workflow", # Default name since UserInputRequest doesn't have a name field - "status": "running", - "startedAt": currentTime, - "lastActivity": currentTime, - "currentRound": 0, # Default value, will be set to 1 in workflowStart() - "currentTask": 0, - "currentAction": 0, - "totalTasks": 0, - "totalActions": 0, - "mandateId": self.mandateId, - "messageIds": [], - "stats": { - "processingTime": None, - "tokenCount": None, - "bytesSent": None, - "bytesReceived": None, - "successRate": None, - "errorCount": None - } - } - - # Create workflow - workflow = self.createWorkflow(workflowData) - - # Set currentRound to 1 for new workflows - workflow.currentRound = 1 - self.updateWorkflow(workflow.id, {"currentRound": 1}) - - # Initialize stats for the new workflow - self.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0) - - # Remove the 'Workflow started' log entry - - # Start workflow processing - from modules.features.featureChatPlayground import WorkflowManager - workflowManager = WorkflowManager(self, currentUser) - - # Start the workflow processing asynchronously - # The workflow will be updated with progress data during execution - asyncio.create_task(workflowManager.workflowProcess(userInput, workflow)) - - return workflow - - except Exception as e: - logger.error(f"Error starting workflow: {str(e)}") - raise - - async def workflowStop(self, workflowId: str) -> ChatWorkflow: - """ - Stops a running workflow (State 8: Workflow Stopped). - - Args: - workflowId: ID of the workflow to stop - - Returns: - Updated ChatWorkflow object - """ - try: - # Load workflow state - workflow = self.getWorkflow(workflowId) - if not workflow: - raise ValueError(f"Workflow {workflowId} not found") - - # Update workflow status - workflow.status = "stopped" - workflow.lastActivity = get_utc_timestamp() - - # Update in database - self.updateWorkflow(workflowId, { - "status": "stopped", - "lastActivity": workflow.lastActivity - }) - - # Add log entry - self.createLog({ - "workflowId": workflowId, - "message": "Workflow stopped", - "type": "warning", - "status": "stopped", - "progress": 100 - }) - - return workflow - - except Exception as e: - logger.error(f"Error stopping workflow: {str(e)}") - raise def getInterface(currentUser: Optional[User] = None) -> 'ChatObjects': """ diff --git a/modules/routes/routeAdmin.py b/modules/routes/routeAdmin.py index 60663598..4ddfcf84 100644 --- a/modules/routes/routeAdmin.py +++ b/modules/routes/routeAdmin.py @@ -12,8 +12,6 @@ from modules.shared.configuration import APP_CONFIG from modules.security.auth import limiter, getCurrentUser from modules.interfaces.interfaceAppModel import User from modules.interfaces.interfaceAppObjects import getRootInterface -from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface -from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface # Static folder setup - using absolute path from app root baseDir = FilePath(__file__).parent.parent.parent # Go up to gateway root @@ -31,43 +29,6 @@ router = APIRouter( # Mount static files router.mount("/static", StaticFiles(directory=str(staticFolder), html=True), name="static") -def get_interface_for_database(database_name: str, currentUser: User): - """ - Get the appropriate interface based on database name. - - Args: - database_name: Name of the database - currentUser: Current user for interface initialization - - Returns: - Interface object for the specified database - - Raises: - HTTPException: If database name is unknown or interface cannot be created - """ - # Get database names from configuration - appDbName = APP_CONFIG.get("DB_APP_DATABASE") - chatDbName = APP_CONFIG.get("DB_CHAT_DATABASE") - managementDbName = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") - - if not appDbName: - raise HTTPException(status_code=500, detail="DB_APP_DATABASE configuration is required") - - # Map database names to their corresponding interfaces - if database_name == appDbName: - return getRootInterface() - elif chatDbName and database_name == chatDbName: - return getChatInterface(currentUser) - elif managementDbName and database_name == managementDbName: - return getComponentInterface(currentUser) - else: - available_dbs = [appDbName] - if chatDbName: - available_dbs.append(chatDbName) - if managementDbName: - available_dbs.append(managementDbName) - raise HTTPException(status_code=400, detail=f"Unknown database. Available: {', '.join(available_dbs)}") - @router.get("/") @limiter.limit("30/minute") async def root(request: Request) -> Dict[str, str]: @@ -117,183 +78,3 @@ async def options_route(request: Request, fullPath: str) -> Response: async def favicon(request: Request) -> FileResponse: return FileResponse(str(staticFolder / "favicon.ico"), media_type="image/x-icon") -# ---------------------- -# Log Management -# ---------------------- - -@router.get("/api/logs/app") -@limiter.limit("10/minute") -async def download_app_log(request: Request, currentUser: User = Depends(getCurrentUser)) -> FileResponse: - """Download the current day's application log file""" - # Check if user has admin privileges - if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): - raise HTTPException(status_code=403, detail="Admin privileges required") - - # Get log directory from config - logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR") - if not logDir: - raise HTTPException(status_code=500, detail="APP_LOGGING_LOG_DIR configuration is required") - - if not os.path.isabs(logDir): - # If relative path, make it relative to the gateway directory - gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - logDir = os.path.join(gatewayDir, logDir) - - # Get current date for log file - today = datetime.now().strftime("%Y%m%d") - logFile = os.path.join(logDir, f"log_app_{today}.log") - - if not os.path.exists(logFile): - raise HTTPException(status_code=404, detail=f"Application log file for today not found: {logFile}") - - return FileResponse( - path=logFile, - filename=f"log_app_{today}.log", - media_type="text/plain" - ) - -@router.get("/api/logs/audit") -@limiter.limit("10/minute") -async def download_audit_log(request: Request, currentUser: User = Depends(getCurrentUser)) -> FileResponse: - """Download the current day's audit log file""" - # Check if user has admin privileges - if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): - raise HTTPException(status_code=403, detail="Admin privileges required") - - # Get log directory from config - logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR") - if not logDir: - raise HTTPException(status_code=500, detail="APP_LOGGING_LOG_DIR configuration is required") - - if not os.path.isabs(logDir): - # If relative path, make it relative to the gateway directory - gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - logDir = os.path.join(gatewayDir, logDir) - - # Get current date for log file - today = datetime.now().strftime("%Y%m%d") - logFile = os.path.join(logDir, f"log_audit_{today}.log") - - if not os.path.exists(logFile): - raise HTTPException(status_code=404, detail=f"Audit log file for today not found: {logFile}") - - return FileResponse( - path=logFile, - filename=f"log_audit_{today}.log", - media_type="text/plain" - ) - -# ---------------------- -# Database Management -# ---------------------- - -@router.get("/api/databases") -@limiter.limit("10/minute") -async def list_databases(request: Request, currentUser: User = Depends(getCurrentUser)) -> Dict[str, Any]: - """List available databases""" - # Check if user has admin privileges - if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): - raise HTTPException(status_code=403, detail="Admin privileges required") - - try: - # Get configured database names from configuration - databases = [] - - # App database - required configuration - appDb = APP_CONFIG.get("DB_APP_DATABASE") - if not appDb: - raise HTTPException(status_code=500, detail="DB_APP_DATABASE configuration is required") - databases.append(appDb) - - # Chat database - optional configuration - chatDb = APP_CONFIG.get("DB_CHAT_DATABASE") - if chatDb and chatDb not in databases: - databases.append(chatDb) - - # Management database - optional configuration - managementDb = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") - if managementDb and managementDb not in databases: - databases.append(managementDb) - - return {"databases": databases} - except HTTPException: - raise - except Exception as e: - logger.error(f"Error listing databases: {e}") - raise HTTPException(status_code=500, detail="Failed to list databases") - -@router.get("/api/databases/{database_name}/tables") -@limiter.limit("10/minute") -async def list_tables( - request: Request, - database_name: str, - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, Any]: - """List tables in a specific database""" - # Check if user has admin privileges - if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): - raise HTTPException(status_code=403, detail="Admin privileges required") - - try: - # Get the appropriate interface based on database name - interface = get_interface_for_database(database_name, currentUser) - - # Check if interface and database connection exist - if not interface or not interface.db: - raise HTTPException(status_code=500, detail="Database interface not available") - - # Get tables from database - tables = interface.db.getTables() - - return {"database": database_name, "tables": tables} - except HTTPException: - raise - except Exception as e: - logger.error(f"Error listing tables for database {database_name}: {e}") - raise HTTPException(status_code=500, detail=f"Failed to list tables for database {database_name}") - -@router.post("/api/databases/{database_name}/tables/drop") -@limiter.limit("5/minute") -async def drop_table( - request: Request, - database_name: str, - currentUser: User = Depends(getCurrentUser), - payload: Dict[str, Any] = Body(...) -) -> Dict[str, Any]: - """Drop a specific table from a database""" - # Check if user has admin privileges - if not hasattr(currentUser, 'privilege') or currentUser.privilege not in ('admin', 'sysadmin'): - raise HTTPException(status_code=403, detail="Admin privileges required") - - table_name = payload.get("table") - if not table_name: - raise HTTPException(status_code=400, detail="Table name is required") - - try: - # Get the appropriate interface based on database name - interface = get_interface_for_database(database_name, currentUser) - - # Check if interface and database connection exist - if not interface or not interface.db: - raise HTTPException(status_code=500, detail="Database interface not available") - - # Check if table exists - tables = interface.db.getTables() - if table_name not in tables: - raise HTTPException(status_code=404, detail=f"Table '{table_name}' not found in database '{database_name}'") - - # Drop the table - with interface.db.connection.cursor() as cursor: - cursor.execute(f'DROP TABLE IF EXISTS "{table_name}" CASCADE') - interface.db.connection.commit() - - logger.warning(f"Admin drop_table executed by {currentUser.id}: dropped table '{table_name}' from database '{database_name}'") - return {"message": f"Table '{table_name}' dropped successfully from database '{database_name}'"} - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error dropping table {table_name} from database {database_name}: {e}") - if 'interface' in locals() and interface.db.connection: - interface.db.connection.rollback() - raise HTTPException(status_code=500, detail=f"Failed to drop table '{table_name}' from database '{database_name}'") diff --git a/modules/routes/routeChatPlayground.py b/modules/routes/routeChatPlayground.py new file mode 100644 index 00000000..24bc91a3 --- /dev/null +++ b/modules/routes/routeChatPlayground.py @@ -0,0 +1,132 @@ +""" +Chat Playground routes for the backend API. +Implements the endpoints for chat playground workflow management. +""" + +import logging +from typing import Optional, Dict, Any +from fastapi import APIRouter, HTTPException, Depends, Body, Path, Query, Request +from datetime import datetime + +# Import auth modules +from modules.security.auth import limiter, getCurrentUser + +# Import interfaces +import modules.interfaces.interfaceChatObjects as interfaceChatObjects +from modules.interfaces.interfaceChatObjects import getInterface + +# Import models +from modules.interfaces.interfaceChatModel import ( + ChatWorkflow, + UserInputRequest +) +from modules.interfaces.interfaceAppModel import User + +# Import workflow control functions +from modules.features.chatPlayground.mainChatPlayground import chatStart, chatStop + +# Configure logger +logger = logging.getLogger(__name__) + +# Create router for chat playground endpoints +router = APIRouter( + prefix="/api/chat/playground", + tags=["Chat Playground"], + responses={404: {"description": "Not found"}} +) + +def getServiceChat(currentUser: User): + return interfaceChatObjects.getInterface(currentUser) + +# Workflow start endpoint +@router.post("/start", response_model=ChatWorkflow) +@limiter.limit("120/minute") +async def start_workflow( + request: Request, + workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"), + userInput: UserInputRequest = Body(...), + currentUser: User = Depends(getCurrentUser) +) -> ChatWorkflow: + """ + Starts a new workflow or continues an existing one. + Corresponds to State 1 in the state machine documentation. + """ + try: + # Get service center + interfaceChat = getServiceChat(currentUser) + + # Start or continue workflow using playground controller + workflow = await chatStart(interfaceChat, currentUser, userInput, workflowId) + + return workflow + + except Exception as e: + logger.error(f"Error in start_workflow: {str(e)}") + raise HTTPException( + status_code=500, + detail=str(e) + ) + +# State 8: Workflow Stopped endpoint +@router.post("/{workflowId}/stop", response_model=ChatWorkflow) +@limiter.limit("120/minute") +async def stop_workflow( + request: Request, + workflowId: str = Path(..., description="ID of the workflow to stop"), + currentUser: User = Depends(getCurrentUser) +) -> ChatWorkflow: + """Stops a running workflow.""" + try: + # Get service center + interfaceChat = getServiceChat(currentUser) + + # Stop workflow using playground controller + workflow = await chatStop(interfaceChat, currentUser, workflowId) + + return workflow + + except Exception as e: + logger.error(f"Error in stop_workflow: {str(e)}") + raise HTTPException( + status_code=500, + detail=str(e) + ) + +# Unified Chat Data Endpoint for Polling +@router.get("/{workflowId}/chatData") +@limiter.limit("120/minute") +async def get_workflow_chat_data( + request: Request, + workflowId: str = Path(..., description="ID of the workflow"), + afterTimestamp: Optional[float] = Query(None, description="Unix timestamp to get data after"), + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + """ + Get unified chat data (messages, logs, stats) for a workflow with timestamp-based selective data transfer. + Returns all data types in chronological order based on _createdAt timestamp. + """ + try: + # Get service center + interfaceChat = getServiceChat(currentUser) + + # Verify workflow exists + workflow = interfaceChat.getWorkflow(workflowId) + if not workflow: + raise HTTPException( + status_code=404, + detail=f"Workflow with ID {workflowId} not found" + ) + + # Get unified chat data using the new method + chatData = interfaceChat.getUnifiedChatData(workflowId, afterTimestamp) + + return chatData + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting unified chat data: {str(e)}", exc_info=True) + raise HTTPException( + status_code=500, + detail=f"Error getting unified chat data: {str(e)}" + ) diff --git a/modules/routes/routeDataNeutralization.py b/modules/routes/routeDataNeutralization.py index 697c6f1c..939c4422 100644 --- a/modules/routes/routeDataNeutralization.py +++ b/modules/routes/routeDataNeutralization.py @@ -7,7 +7,7 @@ from modules.security.auth import limiter, getCurrentUser # Import interfaces from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes -from modules.features.featureNeutralizePlayground import NeutralizationService +from modules.features.neutralizePlayground.mainNeutralizePlayground import NeutralizationService # Configure logger logger = logging.getLogger(__name__) diff --git a/modules/routes/routeWorkflows.py b/modules/routes/routeWorkflows.py index fe70e347..7b9dd8f9 100644 --- a/modules/routes/routeWorkflows.py +++ b/modules/routes/routeWorkflows.py @@ -24,13 +24,13 @@ from modules.interfaces.interfaceChatModel import ( ChatMessage, ChatLog, ChatStat, - ChatDocument, - UserInputRequest + ChatDocument ) from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse from modules.interfaces.interfaceAppModel import User from modules.shared.timezoneUtils import get_utc_timestamp + # Configure logger logger = logging.getLogger(__name__) @@ -276,59 +276,6 @@ async def get_workflow_messages( detail=f"Error getting workflow messages: {str(e)}" ) -# State 1: Workflow Initialization endpoint -@router.post("/start", response_model=ChatWorkflow) -@limiter.limit("120/minute") -async def start_workflow( - request: Request, - workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"), - userInput: UserInputRequest = Body(...), - currentUser: User = Depends(getCurrentUser) -) -> ChatWorkflow: - """ - Starts a new workflow or continues an existing one. - Corresponds to State 1 in the state machine documentation. - """ - try: - # Get service center - interfaceChat = getServiceChat(currentUser) - - # Start or continue workflow using ChatObjects - workflow = await interfaceChat.workflowStart(currentUser, userInput, workflowId) - - return workflow - - except Exception as e: - logger.error(f"Error in start_workflow: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=str(e) - ) - -# State 8: Workflow Stopped endpoint -@router.post("/{workflowId}/stop", response_model=ChatWorkflow) -@limiter.limit("120/minute") -async def stop_workflow( - request: Request, - workflowId: str = Path(..., description="ID of the workflow to stop"), - currentUser: User = Depends(getCurrentUser) -) -> ChatWorkflow: - """Stops a running workflow.""" - try: - # Get service center - interfaceChat = getServiceChat(currentUser) - - # Stop workflow using ChatObjects - workflow = await interfaceChat.workflowStop(workflowId) - - return workflow - - except Exception as e: - logger.error(f"Error in stop_workflow: {str(e)}") - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=str(e) - ) # State 11: Workflow Reset/Deletion endpoint @router.delete("/{workflowId}", response_model=Dict[str, Any]) @@ -383,45 +330,6 @@ async def delete_workflow( ) -# Unified Chat Data Endpoint for Polling -@router.get("/{workflowId}/chatData") -@limiter.limit("120/minute") -async def get_workflow_chat_data( - request: Request, - workflowId: str = Path(..., description="ID of the workflow"), - afterTimestamp: Optional[float] = Query(None, description="Unix timestamp to get data after"), - currentUser: User = Depends(getCurrentUser) -) -> Dict[str, Any]: - """ - Get unified chat data (messages, logs, stats) for a workflow with timestamp-based selective data transfer. - Returns all data types in chronological order based on _createdAt timestamp. - """ - try: - # Get service center - interfaceChat = getServiceChat(currentUser) - - # Verify workflow exists - workflow = interfaceChat.getWorkflow(workflowId) - if not workflow: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Workflow with ID {workflowId} not found" - ) - - # Get unified chat data using the new method - chatData = interfaceChat.getUnifiedChatData(workflowId, afterTimestamp) - - return chatData - - except HTTPException: - raise - except Exception as e: - logger.error(f"Error getting unified chat data: {str(e)}", exc_info=True) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error getting unified chat data: {str(e)}" - ) - # Document Management Endpoints @router.delete("/{workflowId}/messages/{messageId}", response_model=Dict[str, Any]) diff --git a/modules/chat/serviceCenter.py b/modules/services/serviceCenter.py similarity index 99% rename from modules/chat/serviceCenter.py rename to modules/services/serviceCenter.py index 55648ead..85b04ed4 100644 --- a/modules/chat/serviceCenter.py +++ b/modules/services/serviceCenter.py @@ -13,9 +13,9 @@ from modules.interfaces.interfaceChatObjects import getInterface as getChatObjec from modules.interfaces.interfaceChatModel import ActionResult from modules.interfaces.interfaceComponentObjects import getInterface as getComponentObjects from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects -from modules.chat.documents.documentExtraction import DocumentExtraction -from modules.chat.documents.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData -from modules.methods.methodBase import MethodBase +from modules.services.serviceDocument.documentExtraction import DocumentExtraction +from modules.services.serviceDocument.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData +from modules.workflows.methods.methodBase import MethodBase from modules.shared.timezoneUtils import get_utc_timestamp import uuid @@ -57,7 +57,7 @@ class ServiceCenter: if not isPkg and name.startswith('method'): try: # Import the module - module = importlib.import_module(f'modules.methods.{name}') + module = importlib.import_module(f'modules.workflows.methods.{name}') # Find all classes in the module that inherit from MethodBase for itemName, item in inspect.getmembers(module): diff --git a/modules/chat/documents/documentExtraction.py b/modules/services/serviceDocument/documentExtraction.py similarity index 99% rename from modules/chat/documents/documentExtraction.py rename to modules/services/serviceDocument/documentExtraction.py index b6165b9e..0a73e46a 100644 --- a/modules/chat/documents/documentExtraction.py +++ b/modules/services/serviceDocument/documentExtraction.py @@ -9,7 +9,7 @@ from pathlib import Path import xml.etree.ElementTree as ET from bs4 import BeautifulSoup import uuid -from modules.chat.documents.documentUtility import ( +from modules.services.serviceDocument.documentUtility import ( getFileExtension, getMimeTypeFromExtension, detectMimeTypeFromContent, @@ -22,7 +22,7 @@ from modules.interfaces.interfaceChatModel import ( ContentItem, ContentMetadata ) -from modules.neutralizer.neutralizer import DataAnonymizer +from modules.services.serviceNeutralization.neutralizer import DataAnonymizer from modules.shared.configuration import APP_CONFIG logger = logging.getLogger(__name__) diff --git a/modules/chat/documents/documentGeneration.py b/modules/services/serviceDocument/documentGeneration.py similarity index 99% rename from modules/chat/documents/documentGeneration.py rename to modules/services/serviceDocument/documentGeneration.py index 2d844ed3..16ab7b16 100644 --- a/modules/chat/documents/documentGeneration.py +++ b/modules/services/serviceDocument/documentGeneration.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional from datetime import datetime, UTC import re from modules.shared.timezoneUtils import get_utc_timestamp -from modules.chat.documents.documentUtility import ( +from modules.services.serviceDocument.documentUtility import ( getFileExtension, getMimeTypeFromExtension, detectMimeTypeFromContent, diff --git a/modules/chat/documents/documentUtility.py b/modules/services/serviceDocument/documentUtility.py similarity index 100% rename from modules/chat/documents/documentUtility.py rename to modules/services/serviceDocument/documentUtility.py diff --git a/modules/neutralizer/neutralizer.py b/modules/services/serviceNeutralization/neutralizer.py similarity index 87% rename from modules/neutralizer/neutralizer.py rename to modules/services/serviceNeutralization/neutralizer.py index f8677465..e284ae00 100644 --- a/modules/neutralizer/neutralizer.py +++ b/modules/services/serviceNeutralization/neutralizer.py @@ -8,12 +8,12 @@ import logging from typing import Dict, List, Any # Import all necessary classes and functions -from modules.neutralizer.subProcessCommon import ProcessResult, CommonUtils -from modules.neutralizer.subProcessText import TextProcessor, PlainText -from modules.neutralizer.subProcessList import ListProcessor, TableData -from modules.neutralizer.subProcessBinary import BinaryProcessor, BinaryData -from modules.neutralizer.subParseString import StringParser -from modules.neutralizer.subPatterns import Pattern, HeaderPatterns, DataPatterns, TextTablePatterns +from modules.services.serviceNeutralization.subProcessCommon import ProcessResult, CommonUtils +from modules.services.serviceNeutralization.subProcessText import TextProcessor, PlainText +from modules.services.serviceNeutralization.subProcessList import ListProcessor, TableData +from modules.services.serviceNeutralization.subProcessBinary import BinaryProcessor, BinaryData +from modules.services.serviceNeutralization.subParseString import StringParser +from modules.services.serviceNeutralization.subPatterns import Pattern, HeaderPatterns, DataPatterns, TextTablePatterns # Configure logging logger = logging.getLogger(__name__) diff --git a/modules/neutralizer/readme.md b/modules/services/serviceNeutralization/readme.md similarity index 100% rename from modules/neutralizer/readme.md rename to modules/services/serviceNeutralization/readme.md diff --git a/modules/neutralizer/subParseString.py b/modules/services/serviceNeutralization/subParseString.py similarity index 98% rename from modules/neutralizer/subParseString.py rename to modules/services/serviceNeutralization/subParseString.py index a2b39333..fd9f54cc 100644 --- a/modules/neutralizer/subParseString.py +++ b/modules/services/serviceNeutralization/subParseString.py @@ -6,7 +6,7 @@ Handles pattern matching and replacement for emails, phones, addresses, IDs and import re import uuid from typing import Dict, List, Tuple, Any -from modules.neutralizer.subPatterns import DataPatterns, find_patterns_in_text +from modules.services.serviceNeutralization.subPatterns import DataPatterns, find_patterns_in_text class StringParser: """Handles string parsing and replacement operations""" diff --git a/modules/neutralizer/subPatterns.py b/modules/services/serviceNeutralization/subPatterns.py similarity index 100% rename from modules/neutralizer/subPatterns.py rename to modules/services/serviceNeutralization/subPatterns.py diff --git a/modules/neutralizer/subProcessBinary.py b/modules/services/serviceNeutralization/subProcessBinary.py similarity index 100% rename from modules/neutralizer/subProcessBinary.py rename to modules/services/serviceNeutralization/subProcessBinary.py diff --git a/modules/neutralizer/subProcessCommon.py b/modules/services/serviceNeutralization/subProcessCommon.py similarity index 100% rename from modules/neutralizer/subProcessCommon.py rename to modules/services/serviceNeutralization/subProcessCommon.py diff --git a/modules/neutralizer/subProcessList.py b/modules/services/serviceNeutralization/subProcessList.py similarity index 96% rename from modules/neutralizer/subProcessList.py rename to modules/services/serviceNeutralization/subProcessList.py index 58981333..e4ac91f7 100644 --- a/modules/neutralizer/subProcessList.py +++ b/modules/services/serviceNeutralization/subProcessList.py @@ -9,8 +9,8 @@ import xml.etree.ElementTree as ET from typing import Dict, List, Any, Union from dataclasses import dataclass from io import StringIO -from modules.neutralizer.subParseString import StringParser -from modules.neutralizer.subPatterns import get_pattern_for_header, HeaderPatterns +from modules.services.serviceNeutralization.subParseString import StringParser +from modules.services.serviceNeutralization.subPatterns import get_pattern_for_header, HeaderPatterns @dataclass class TableData: @@ -156,7 +156,7 @@ class ListProcessor: processed_attrs[attr_name] = self.string_parser.mapping[attr_value] else: # Check if attribute value matches any data patterns - from modules.neutralizer.subPatterns import find_patterns_in_text, DataPatterns + from modules.services.serviceNeutralization.subPatterns import find_patterns_in_text, DataPatterns matches = find_patterns_in_text(attr_value, DataPatterns.patterns) if matches: pattern_name = matches[0][0] @@ -191,7 +191,7 @@ class ListProcessor: # Skip if already a placeholder if not self.string_parser.is_placeholder(text): # Check if text matches any patterns - from modules.neutralizer.subPatterns import find_patterns_in_text, DataPatterns + from modules.services.serviceNeutralization.subPatterns import find_patterns_in_text, DataPatterns pattern_matches = find_patterns_in_text(text, DataPatterns.patterns) if pattern_matches: diff --git a/modules/neutralizer/subProcessText.py b/modules/services/serviceNeutralization/subProcessText.py similarity index 97% rename from modules/neutralizer/subProcessText.py rename to modules/services/serviceNeutralization/subProcessText.py index c9ad872f..20dfe291 100644 --- a/modules/neutralizer/subProcessText.py +++ b/modules/services/serviceNeutralization/subProcessText.py @@ -5,7 +5,7 @@ Handles plain text processing without header information from typing import Dict, List, Any from dataclasses import dataclass -from modules.neutralizer.subParseString import StringParser +from modules.services.serviceNeutralization.subParseString import StringParser @dataclass class PlainText: diff --git a/modules/chat/handling/executionState.py b/modules/workflows/_transfer/executionState.py similarity index 64% rename from modules/chat/handling/executionState.py rename to modules/workflows/_transfer/executionState.py index 1f806745..1d9b1963 100644 --- a/modules/chat/handling/executionState.py +++ b/modules/workflows/_transfer/executionState.py @@ -1,5 +1,5 @@ # executionState.py -# Contains all execution state management logic extracted from managerChat.py +# Contains all execution state management logic import logging from typing import List @@ -18,6 +18,9 @@ class TaskExecutionState: self.current_action_index = 0 self.retry_count = 0 self.max_retries = 3 + # Iterative loop (react mode) + self.current_step = 0 + self.max_steps = 5 def addSuccessfulAction(self, action_result: ActionResult): """Add a successful action to the state""" @@ -52,4 +55,26 @@ class TaskExecutionState: patterns.append("format_issues") elif "permission" in error or "access denied" in error: patterns.append("permission_issues") - return list(set(patterns)) \ No newline at end of file + return list(set(patterns)) + +def should_continue(observation, review=None, current_step: int = 0, max_steps: int = 5) -> bool: + """Helper to decide if the iterative loop should continue + - Stop if review indicates 'stop' or success criteria are met + - Stop on failure with no retry path + - Stop if max steps reached + """ + try: + if current_step >= max_steps: + return False + if review and isinstance(review, dict): + decision = review.get('decision') or review.get('status') + if decision in ('stop', 'success'): + return False + # If observation exists but indicates hard failure with no documents repeatedly + if observation and isinstance(observation, dict): + if observation.get('success') is False and observation.get('documentsCount', 0) == 0: + # allow next step once; the caller can cap by max_steps + return True + return True + except Exception: + return False \ No newline at end of file diff --git a/modules/chat/handling/handlingTasks.py b/modules/workflows/_transfer/handlingTasks.py similarity index 89% rename from modules/chat/handling/handlingTasks.py rename to modules/workflows/_transfer/handlingTasks.py index 98cca8bb..5346e2d1 100644 --- a/modules/chat/handling/handlingTasks.py +++ b/modules/workflows/_transfer/handlingTasks.py @@ -12,13 +12,16 @@ from modules.interfaces.interfaceChatModel import ( ) from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects from modules.shared.timezoneUtils import get_utc_timestamp -from modules.chat.handling.executionState import TaskExecutionState -from modules.chat.handling.promptFactory import ( +from modules.workflows._transfer.executionState import TaskExecutionState +from modules.workflows._transfer.promptFactory import ( createTaskPlanningPrompt, createActionDefinitionPrompt, - createResultReviewPrompt + createResultReviewPrompt, + createActionSelectionPrompt, + createActionParameterPrompt, + createRefinementPrompt ) -from modules.chat.documents.documentGeneration import DocumentGenerator +from modules.services.serviceDocument.documentGeneration import DocumentGenerator import uuid logger = logging.getLogger(__name__) @@ -32,7 +35,7 @@ class HandlingTasks: self.chatInterface = chatInterface self.currentUser = currentUser self.workflow = workflow - from modules.chat.serviceCenter import ServiceCenter + from modules.services.serviceCenter import ServiceCenter self.service = ServiceCenter(currentUser, workflow) self.documentGenerator = DocumentGenerator(self.service) @@ -430,8 +433,95 @@ class HandlingTasks: logger.error(f"Error in generateTaskActions: {str(e)}") return [] + # ===== React-mode iterative functions ===== + + async def plan_select(self, context: TaskContext) -> Dict[str, Any]: + """Plan: select exactly one action. Returns {"action": {method, name}}""" + prompt = createActionSelectionPrompt(context, self.service) + self.service.writeTraceLog("React Plan Selection Prompt", prompt) + response = await self.service.callAiTextAdvanced(prompt) + self.service.writeTraceLog("React Plan Selection Response", response) + json_start = response.find('{') if response else -1 + json_end = response.rfind('}') + 1 if response else 0 + if json_start == -1 or json_end == 0: + raise ValueError("No JSON in selection response") + selection = json.loads(response[json_start:json_end]) + if 'action' not in selection or not isinstance(selection['action'], dict): + raise ValueError("Selection missing 'action'") + return selection + + async def act_execute(self, context: TaskContext, selection: Dict[str, Any], task_step: TaskStep, workflow, step_index: int) -> ActionResult: + """Act: request minimal parameters then execute selected action.""" + action = selection.get('action', {}) + params_prompt = createActionParameterPrompt(context, action, self.service) + self.service.writeTraceLog("React Parameters Prompt", params_prompt) + params_resp = await self.service.callAiTextAdvanced(params_prompt) + self.service.writeTraceLog("React Parameters Response", params_resp) + js = params_resp[params_resp.find('{'):params_resp.rfind('}')+1] if params_resp else '{}' + try: + param_obj = json.loads(js) + except Exception: + param_obj = {"parameters": {}} + parameters = param_obj.get('parameters', {}) if isinstance(param_obj, dict) else {} + + # Apply minimal defaults in-code (language) + if 'language' not in parameters and hasattr(self.service, 'user') and getattr(self.service.user, 'language', None): + parameters['language'] = self.service.user.language + + # Build a synthetic TaskAction for execution routing and labels + current_round = getattr(self.workflow, 'currentRound', 0) + current_task = getattr(self.workflow, 'currentTask', 0) + result_label = f"round{current_round}_task{current_task}_action{step_index}_results" + task_action = self.createTaskAction({ + "execMethod": action.get('method', ''), + "execAction": action.get('name', ''), + "execParameters": parameters, + "execResultLabel": result_label, + "status": TaskStatus.PENDING + }) + # Execute using existing single action flow + return await self.executeSingleAction(task_action, workflow, task_step, current_task, step_index, 1) + + def observe_build(self, action_result: ActionResult) -> Dict[str, Any]: + """Observe: build compact observation object from ActionResult""" + previews = [] + if action_result and action_result.documents: + for doc in action_result.documents[:5]: + name = getattr(doc, 'documentName', '') + mime = getattr(doc, 'mimeType', '') + snippet = '' + data = getattr(doc, 'documentData', None) + if isinstance(data, str): + snippet = data[:200] + elif isinstance(data, dict): + snippet = str(data)[:200] + previews.append({"name": name, "mime": mime, "snippet": snippet}) + observation = { + "success": bool(action_result.success), + "resultLabel": action_result.resultLabel or "", + "documentsCount": len(action_result.documents) if action_result.documents else 0, + "previews": previews, + "notes": [] + } + return observation + + async def refine_decide(self, context: TaskContext, observation: Dict[str, Any]) -> Dict[str, Any]: + """Refine: decide continue or stop, with reason""" + prompt = createRefinementPrompt(context, observation) + self.service.writeTraceLog("React Refinement Prompt", prompt) + resp = await self.service.callAiTextAdvanced(prompt) + self.service.writeTraceLog("React Refinement Response", resp) + js = resp[resp.find('{'):resp.rfind('}')+1] if resp else '{}' + try: + decision = json.loads(js) + except Exception: + decision = {"decision": "continue", "reason": "default"} + return decision + async def executeTask(self, task_step, workflow, context, task_index=None, total_tasks=None) -> TaskResult: - """Execute all actions for a task step, with state management and retries.""" + """Execute all actions for a task step, with state management and retries. + When workflow.workflowMode is 'React', run compact plan–act–observe–refine loop. + """ logger.info(f"=== STARTING TASK {task_index or '?'}: {task_step.objective} ===") # PHASE 4: Update workflow object before executing task @@ -476,6 +566,70 @@ class HandlingTasks: logger.info(f"Task start message created for task {task_index}") state = TaskExecutionState(task_step) + # React mode path - check workflow mode instead of context + if isinstance(context, TaskContext) and hasattr(context, 'workflow') and context.workflow and getattr(context.workflow, 'workflowMode', 'Actionplan') == 'React': + state.max_steps = max(1, int(getattr(context.workflow, 'maxSteps', 5))) + step = 1 + last_review_dict = None + while step <= state.max_steps: + self._checkWorkflowStopped() + # Update workflow[currentAction] for UI + self.updateWorkflowBeforeExecutingAction(step) + self.service.setWorkflowContext(action_number=step) + try: + t0 = time.time() + selection = await self.plan_select(context) + result = await self.act_execute(context, selection, task_step, workflow, step) + observation = self.observe_build(result) + # Attach deterministic label for clarity + observation['resultLabel'] = result.resultLabel + decision = await self.refine_decide(context, observation) + # Telemetry: simple duration per step + duration = time.time() - t0 + self.chatInterface.createLog({ + "workflowId": workflow.id, + "message": f"react_step_duration_sec={duration:.3f}", + "type": "info" + }) + last_review_dict = decision + # Simple messaging per iteration + msg = { + "workflowId": workflow.id, + "role": "assistant", + "message": f"🔁 Step {step}/{state.max_steps}: {selection.get('action',{}).get('method','')}.{selection.get('action',{}).get('name','')} → {'✅' if result.success else '❌'}", + "status": "step", + "sequenceNr": len(workflow.messages) + 1, + "publishedAt": get_utc_timestamp(), + "documentsLabel": observation.get('resultLabel'), + "documents": [], + "roundNumber": workflow.currentRound, + "taskNumber": task_index, + "actionNumber": step, + "actionProgress": "success" if result.success else "fail" + } + self.chatInterface.createMessage(msg) + except Exception as e: + logger.error(f"React step {step} error: {e}") + break + + from modules.workflows._transfer.executionState import should_continue + if not should_continue(observation, last_review_dict, step, state.max_steps): + break + step += 1 + + # Summarize task result for react mode + status = TaskStatus.COMPLETED + success = True + feedback = last_review_dict.get('reason') if isinstance(last_review_dict, dict) else 'Completed' + if isinstance(last_review_dict, dict) and last_review_dict.get('decision') == 'stop': + success = True + return TaskResult( + taskId=task_step.id, + status=status, + success=success, + feedback=feedback, + error=None if success else feedback + ) retry_context = context max_retries = state.max_retries for attempt in range(max_retries): @@ -1511,4 +1665,4 @@ class HandlingTasks: logger.info("Workflow reset for new session - all values set to initial state and updated in database") except Exception as e: - logger.error(f"Error resetting workflow for new session: {str(e)}") \ No newline at end of file + logger.error(f"Error resetting workflow for new session: {str(e)}") diff --git a/modules/chat/handling/promptFactory.py b/modules/workflows/_transfer/promptFactory.py similarity index 91% rename from modules/chat/handling/promptFactory.py rename to modules/workflows/_transfer/promptFactory.py index c15979e2..3cf3f5b5 100644 --- a/modules/chat/handling/promptFactory.py +++ b/modules/workflows/_transfer/promptFactory.py @@ -1,16 +1,16 @@ # promptFactory.py -# Contains all prompt creation functions extracted from managerChat.py +# Contains all prompt creation functions import json import logging from typing import Any, Dict, List from modules.interfaces.interfaceChatModel import TaskContext, ReviewContext -from modules.chat.documents.documentUtility import getFileExtension +from modules.services.serviceDocument.documentUtility import getFileExtension # Set up logger logger = logging.getLogger(__name__) -# Prompt creation helpers extracted from managerChat.py +# Prompt creation helpers def _getAvailableDocuments(workflow) -> str: """ @@ -831,3 +831,93 @@ USER LANGUAGE: {user_language} - All user messages must be generated in this lan NOTE: Respond with ONLY the JSON object. Do not include any explanatory text.""" return prompt + +# ===== New compact prompts for React-style workflow ===== + +def _build_tiny_catalog(service) -> str: + """Return minimal tool catalog: method -> { action -> [paramNames] }""" + try: + method_signatures = service.getMethodsList() + except Exception: + method_signatures = [] + catalog: Dict[str, Dict[str, List[str]]] = {} + for sig in method_signatures: + if '.' not in sig or '(' not in sig or ')' not in sig: + continue + method, rest = sig.split('.', 1) + action = rest.split('(')[0] + params_str = rest[rest.find('(')+1:rest.find(')')].strip() + param_names = [] + if params_str: + for p in params_str.split(','): + name = p.strip().split(':')[0].split('=')[0].strip() + if name: + param_names.append(name) + catalog.setdefault(method, {})[action] = param_names + return json.dumps(catalog, separators=(',', ':'), ensure_ascii=False) + +def createActionSelectionPrompt(context: TaskContext, service) -> str: + """Prompt that returns exactly one action selection: {"action":{"method":"..","name":".."}}""" + user_language = service.user.language if service and service.user else 'en' + tiny_catalog = _build_tiny_catalog(service) + objective = context.task_step.objective if context and context.task_step else '' + available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available" + return f"""Select exactly one action to advance the task. + +OBJECTIVE: {objective} +AVAILABLE DOCUMENTS: {available_docs} +USER LANGUAGE: {user_language} + +MINIMAL TOOL CATALOG (method -> action -> [parameterNames]): +{tiny_catalog} + +BUSINESS RULES: +- Pick exactly one action per step. +- Derive choice from objective and success criteria. +- Prefer user language. +- Keep it minimal; avoid provider specifics. + +RESPONSE FORMAT (JSON only): +{{"action":{{"method":"web","name":"search"}}}} +""" + +def createActionParameterPrompt(context: TaskContext, selected_action: Dict[str, str], service=None) -> str: + """Prompt that returns only parameters for the selected action: {"parameters":{...}}""" + user_language = service.user.language if service and service.user else 'en' + method = selected_action.get('method', '') if selected_action else '' + name = selected_action.get('name', '') if selected_action else '' + available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available" + return f"""Provide only the required parameters for this action. + +SELECTED ACTION: {method}.{name} +OBJECTIVE: {context.task_step.objective if context and context.task_step else ''} +AVAILABLE DOCUMENTS: {available_docs} +USER LANGUAGE: {user_language} + +RULES: +- Return only the parameters object. +- Include user language if relevant. +- Reference documents only by exact labels available. +- Avoid unnecessary fields; host applies defaults. + +RESPONSE FORMAT (JSON only): +{{"parameters":{{}}}} +""" + +def createRefinementPrompt(context: TaskContext, observation: Dict[str, Any]) -> str: + """Prompt that decides to continue or stop based on observation: {"decision":"continue|stop","reason":".."} """ + user_language = context.workflow.messages[-1].role if False else (getattr(context.workflow, 'user_language', None) or (getattr(context.workflow, 'language', None))) # not used, keep minimal + objective = context.task_step.objective if context and context.task_step else '' + return f"""Decide next step based on observation. + +OBJECTIVE: {objective} +OBSERVATION: +{json.dumps(observation, ensure_ascii=False)} + +RULES: +- If criteria are met or no further action helps, decide stop. +- Else decide continue. + +RESPONSE FORMAT (JSON only): +{{"decision":"continue","reason":"Need more data"}} +""" \ No newline at end of file diff --git a/modules/methods/methodAi.py b/modules/workflows/methods/methodAi.py similarity index 99% rename from modules/methods/methodAi.py rename to modules/workflows/methods/methodAi.py index f947db83..ffd9d58e 100644 --- a/modules/methods/methodAi.py +++ b/modules/workflows/methods/methodAi.py @@ -7,7 +7,7 @@ import logging from typing import Dict, Any, List, Optional from datetime import datetime, UTC -from modules.methods.methodBase import MethodBase, action +from modules.workflows.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodBase.py b/modules/workflows/methods/methodBase.py similarity index 100% rename from modules/methods/methodBase.py rename to modules/workflows/methods/methodBase.py diff --git a/modules/methods/methodDocument.py b/modules/workflows/methods/methodDocument.py similarity index 99% rename from modules/methods/methodDocument.py rename to modules/workflows/methods/methodDocument.py index 54f45cb9..23fc8b10 100644 --- a/modules/methods/methodDocument.py +++ b/modules/workflows/methods/methodDocument.py @@ -8,7 +8,7 @@ import os from typing import Dict, Any, List, Optional from datetime import datetime, UTC -from modules.methods.methodBase import MethodBase, action +from modules.workflows.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodOutlook.py b/modules/workflows/methods/methodOutlook.py similarity index 99% rename from modules/methods/methodOutlook.py rename to modules/workflows/methods/methodOutlook.py index 658b3982..6e2c6440 100644 --- a/modules/methods/methodOutlook.py +++ b/modules/workflows/methods/methodOutlook.py @@ -81,7 +81,7 @@ from datetime import datetime, UTC import json import uuid -from modules.methods.methodBase import MethodBase, action +from modules.workflows.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.interfaces.interfaceAppModel import ConnectionStatus from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodSharepoint.py b/modules/workflows/methods/methodSharepoint.py similarity index 99% rename from modules/methods/methodSharepoint.py rename to modules/workflows/methods/methodSharepoint.py index bcb92e0b..d474992e 100644 --- a/modules/methods/methodSharepoint.py +++ b/modules/workflows/methods/methodSharepoint.py @@ -13,7 +13,7 @@ from urllib.parse import urlparse import aiohttp import asyncio -from modules.methods.methodBase import MethodBase, action +from modules.workflows.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult from modules.shared.timezoneUtils import get_utc_timestamp diff --git a/modules/methods/methodWeb.py b/modules/workflows/methods/methodWeb.py similarity index 99% rename from modules/methods/methodWeb.py rename to modules/workflows/methods/methodWeb.py index 409b7151..0de6d26c 100644 --- a/modules/methods/methodWeb.py +++ b/modules/workflows/methods/methodWeb.py @@ -2,7 +2,7 @@ import logging import csv import io from typing import Any, Dict -from modules.methods.methodBase import MethodBase, action +from modules.workflows.methods.methodBase import MethodBase, action from modules.interfaces.interfaceChatModel import ActionResult, ActionDocument from modules.interfaces.interfaceWebObjects import WebInterface from modules.interfaces.interfaceWebModel import ( diff --git a/modules/features/featureChatPlayground.py b/modules/workflows/workflowManager.py similarity index 55% rename from modules/features/featureChatPlayground.py rename to modules/workflows/workflowManager.py index 85d8c2d4..0d34b5b9 100644 --- a/modules/features/featureChatPlayground.py +++ b/modules/workflows/workflowManager.py @@ -8,8 +8,7 @@ from modules.interfaces.interfaceAppObjects import User from modules.interfaces.interfaceChatModel import (UserInputRequest, ChatMessage, ChatWorkflow, TaskItem, TaskStatus) from modules.interfaces.interfaceChatObjects import ChatObjects -from modules.chat.managerChat import ChatManager -from modules.chat.handling.handlingTasks import WorkflowStoppedException +from modules.workflows._transfer.handlingTasks import HandlingTasks, WorkflowStoppedException from modules.interfaces.interfaceChatModel import WorkflowResult from modules.shared.timezoneUtils import get_utc_timestamp @@ -20,125 +19,135 @@ class WorkflowManager: def __init__(self, chatInterface: ChatObjects, currentUser: User): self.chatInterface = chatInterface - self.chatManager = ChatManager(currentUser, chatInterface) self.currentUser = currentUser + self.handlingTasks = None - async def workflowProcess(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> None: - """Process a workflow with user input using unified workflow phases""" + async def workflowStart(self, userInput: UserInputRequest, workflowId: Optional[str] = None) -> ChatWorkflow: + """Starts a new workflow or continues an existing one, then launches processing.""" try: - # Initialize chat manager - await self.chatManager.initialize(workflow) - - # Set user language - self.chatManager.handlingTasks.service.setUserLanguage(userInput.userLanguage) - - # Send first message - message = await self._sendFirstMessage(userInput, workflow) - - # Execute unified workflow - workflow_result = await self.chatManager.executeUnifiedWorkflow(userInput, workflow) - - # Process workflow results - await self._processWorkflowResults(workflow, workflow_result, message) - - # Only send last message for successful workflows - # Stopped/failed workflows get their final messages in _processWorkflowResults - if workflow_result.status == 'success': - await self._sendLastMessage(workflow) - - except WorkflowStoppedException: - logger.info("Workflow stopped by user") - # Update workflow status to stopped + currentTime = get_utc_timestamp() + + if workflowId: + workflow = self.chatInterface.getWorkflow(workflowId) + if not workflow: + raise ValueError(f"Workflow {workflowId} not found") + + if workflow.status == "running": + logger.info(f"Stopping running workflow {workflowId} before processing new prompt") + workflow.status = "stopped" + workflow.lastActivity = currentTime + self.chatInterface.updateWorkflow(workflowId, { + "status": "stopped", + "lastActivity": currentTime + }) + self.chatInterface.createLog({ + "workflowId": workflowId, + "message": "Workflow stopped for new prompt", + "type": "info", + "status": "stopped", + "progress": 100 + }) + await asyncio.sleep(0.1) + + newRound = workflow.currentRound + 1 + self.chatInterface.updateWorkflow(workflowId, { + "status": "running", + "lastActivity": currentTime, + "currentRound": newRound + }) + + workflow = self.chatInterface.getWorkflow(workflowId) + if not workflow: + raise ValueError(f"Failed to reload workflow {workflowId} after update") + + self.chatInterface.createLog({ + "workflowId": workflowId, + "message": f"Workflow resumed (round {workflow.currentRound})", + "type": "info", + "status": "running", + "progress": 0 + }) + else: + workflowData = { + "name": "New Workflow", + "status": "running", + "startedAt": currentTime, + "lastActivity": currentTime, + "currentRound": 0, + "currentTask": 0, + "currentAction": 0, + "totalTasks": 0, + "totalActions": 0, + "mandateId": self.chatInterface.mandateId, + "messageIds": [], + "stats": { + "processingTime": None, + "tokenCount": None, + "bytesSent": None, + "bytesReceived": None, + "successRate": None, + "errorCount": None + } + } + + workflow = self.chatInterface.createWorkflow(workflowData) + workflow.currentRound = 1 + self.chatInterface.updateWorkflow(workflow.id, {"currentRound": 1}) + self.chatInterface.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0) + + # Start workflow processing asynchronously + asyncio.create_task(self._workflowProcess(userInput, workflow)) + + return workflow + except Exception as e: + logger.error(f"Error starting workflow: {str(e)}") + raise + + async def workflowStop(self, workflowId: str) -> ChatWorkflow: + """Stops a running workflow.""" + try: + workflow = self.chatInterface.getWorkflow(workflowId) + if not workflow: + raise ValueError(f"Workflow {workflowId} not found") + workflow.status = "stopped" workflow.lastActivity = get_utc_timestamp() - self.chatInterface.updateWorkflow(workflow.id, { + self.chatInterface.updateWorkflow(workflowId, { "status": "stopped", - "lastActivity": workflow.lastActivity, - "totalTasks": workflow.totalTasks, - "totalActions": workflow.totalActions + "lastActivity": workflow.lastActivity }) - - # Create final stopped message - stopped_message = { - "workflowId": workflow.id, - "role": "assistant", - "message": "🛑 Workflow stopped by user", - "status": "last", - "sequenceNr": len(workflow.messages) + 1, - "publishedAt": get_utc_timestamp(), - "documentsLabel": "workflow_stopped", - "documents": [], - # Add workflow context fields - "roundNumber": workflow.currentRound, - "taskNumber": 0, - "actionNumber": 0, - # Add progress status - "taskProgress": "pending", - "actionProgress": "pending" - } - message = self.chatInterface.createMessage(stopped_message) - if message: - workflow.messages.append(message) - - # Add log entry self.chatInterface.createLog({ - "workflowId": workflow.id, - "message": "Workflow stopped by user", + "workflowId": workflowId, + "message": "Workflow stopped", "type": "warning", "status": "stopped", "progress": 100 }) + return workflow + except Exception as e: + logger.error(f"Error stopping workflow: {str(e)}") + raise + + async def _workflowProcess(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> None: + """Process a workflow with user input""" + try: + self.handlingTasks = HandlingTasks(self.chatInterface, self.currentUser, workflow) + self.handlingTasks.service.setUserLanguage(userInput.userLanguage) + message = await self._sendFirstMessage(userInput, workflow) + task_plan = await self._planTasks(userInput, workflow) + workflow_result = await self._executeTasks(task_plan, workflow) + await self._processWorkflowResults(workflow, workflow_result, message) + + except WorkflowStoppedException: + self._handleWorkflowStop(workflow) except Exception as e: - logger.error(f"Workflow processing error: {str(e)}") - - # Update workflow status to failed - workflow.status = "failed" - workflow.lastActivity = get_utc_timestamp() - self.chatInterface.updateWorkflow(workflow.id, { - "status": "failed", - "lastActivity": workflow.lastActivity, - "totalTasks": workflow.totalTasks, - "totalActions": workflow.totalActions - }) - - # Create error message - error_message = { - "workflowId": workflow.id, - "role": "assistant", - "message": f"Workflow processing failed: {str(e)}", - "status": "last", - "sequenceNr": len(workflow.messages) + 1, - "publishedAt": get_utc_timestamp(), - "documentsLabel": "workflow_error", - "documents": [], - # Add workflow context fields - "roundNumber": workflow.currentRound, - "taskNumber": 0, - "actionNumber": 0, - # Add progress status - "taskProgress": "fail", - "actionProgress": "fail" - } - message = self.chatInterface.createMessage(error_message) - if message: - workflow.messages.append(message) - - # Add error log entry - self.chatInterface.createLog({ - "workflowId": workflow.id, - "message": f"Workflow failed: {str(e)}", - "type": "error", - "status": "failed", - "progress": 100 - }) - - raise + self._handleWorkflowError(workflow, e) async def _sendFirstMessage(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> ChatMessage: """Send first message to start workflow""" try: - self.chatManager.handlingTasks._checkWorkflowStopped() + self.handlingTasks._checkWorkflowStopped() # Create initial message using interface # Generate the correct documentsLabel that matches what getDocumentReferenceString will create @@ -171,12 +180,12 @@ class WorkflowManager: workflow.messages.append(message) # Clear trace log for new workflow session - self.chatManager.handlingTasks.service.clearTraceLog() + self.handlingTasks.service.clearTraceLog() # Add documents if any, now with messageId if userInput.listFileId: # Process file IDs and add to message data - documents = await self.chatManager.handlingTasks.service.processFileIds(userInput.listFileId, message.id) + documents = await self.handlingTasks.service.processFileIds(userInput.listFileId, message.id) message.documents = documents # Update the message with documents in database self.chatInterface.updateMessage(message.id, {"documents": [doc.to_dict() for doc in documents]}) @@ -188,97 +197,76 @@ class WorkflowManager: except Exception as e: logger.error(f"Error sending first message: {str(e)}") raise - - async def _generateWorkflowFeedback(self, workflow: ChatWorkflow) -> str: - """Generate feedback message for workflow completion""" - try: - self.chatManager.handlingTasks._checkWorkflowStopped() - - # Count messages by role - user_messages = [msg for msg in workflow.messages if msg.role == 'user'] - assistant_messages = [msg for msg in workflow.messages if msg.role == 'assistant'] - - # Generate summary feedback - feedback = f"Workflow completed.\n\n" - feedback += f"Processed {len(user_messages)} user inputs and generated {len(assistant_messages)} responses.\n" - - # Add final status - if workflow.status == "completed": - feedback += "All tasks completed successfully." - elif workflow.status == "partial": - feedback += "Some tasks completed with partial success." - else: - feedback += f"Workflow status: {workflow.status}" - - return feedback - - except Exception as e: - logger.error(f"Error generating workflow feedback: {str(e)}") - return "Workflow processing completed." - async def _sendLastMessage(self, workflow: ChatWorkflow) -> None: - """Send last message to complete workflow (only for successful workflows)""" - try: - # Safety check: ensure this is only called for successful workflows - if workflow.status in ['stopped', 'failed']: - logger.warning(f"Attempted to send last message for {workflow.status} workflow {workflow.id}") - return - - # Generate feedback - feedback = await self._generateWorkflowFeedback(workflow) - - # Create last message using interface - messageData = { - "workflowId": workflow.id, - "role": "assistant", - "message": feedback, - "status": "last", - "sequenceNr": len(workflow.messages) + 1, - "publishedAt": get_utc_timestamp(), - "documentsLabel": "workflow_feedback", - "documents": [], - # Add workflow context fields - "roundNumber": workflow.currentRound, - "taskNumber": 0, - "actionNumber": 0, - # Add progress status - "taskProgress": "success", - "actionProgress": "success" - } - - # Create message using interface - message = self.chatInterface.createMessage(messageData) - if message: - workflow.messages.append(message) - - # Update workflow status to completed - workflow.status = "completed" - workflow.lastActivity = get_utc_timestamp() - - # Update workflow in database - self.chatInterface.updateWorkflow(workflow.id, { - "status": "completed", - "lastActivity": workflow.lastActivity + async def _planTasks(self, userInput: UserInputRequest, workflow: ChatWorkflow): + """Generate task plan for workflow execution""" + handling = self.handlingTasks + # Generate task plan first (shared for both modes) + task_plan = await handling.generateTaskPlan(userInput.prompt, workflow) + if not task_plan or not task_plan.tasks: + raise Exception("No tasks generated in task plan.") + logger.info(f"Executing workflow mode={getattr(workflow, 'workflowMode', 'Actionplan')} with {len(task_plan.tasks)} tasks") + return task_plan + + async def _executeTasks(self, task_plan, workflow: ChatWorkflow) -> WorkflowResult: + """Execute all tasks in the task plan""" + handling = self.handlingTasks + total_tasks = len(task_plan.tasks) + all_task_results: List = [] + previous_results: List[str] = [] + + for idx, task_step in enumerate(task_plan.tasks): + current_task_index = idx + 1 + logger.info(f"Task {current_task_index}/{total_tasks}: {task_step.objective}") + + # Build TaskContext (mode-specific behavior is inside HandlingTasks) + from modules.interfaces.interfaceChatModel import TaskContext + task_context = TaskContext( + task_step=task_step, + workflow=workflow, + workflow_id=workflow.id, + available_documents=None, + available_connections=None, + previous_results=previous_results, + previous_handover=None, + improvements=[], + retry_count=0, + previous_action_results=[], + previous_review_result=None, + is_regeneration=False, + failure_patterns=[], + failed_actions=[], + successful_actions=[], + criteria_progress={ + 'met_criteria': set(), + 'unmet_criteria': set(), + 'attempt_history': [] + } + ) + + task_result = await handling.executeTask(task_step, workflow, task_context, current_task_index, total_tasks) + handover_data = await handling.prepareTaskHandover(task_step, [], task_result, workflow) + all_task_results.append({ + 'task_step': task_step, + 'task_result': task_result, + 'handover_data': handover_data }) - - # Add completion log entry - self.chatInterface.createLog({ - "workflowId": workflow.id, - "message": "Workflow completed", - "type": "success", - "status": "completed", - "progress": 100 - }) - - except Exception as e: - logger.error(f"Error sending last message: {str(e)}") - raise + if task_result.success and task_result.feedback: + previous_results.append(task_result.feedback) + + return WorkflowResult( + status="completed", + completed_tasks=len(all_task_results), + total_tasks=total_tasks, + execution_time=0.0, + final_results_count=len(all_task_results) + ) async def _processWorkflowResults(self, workflow: ChatWorkflow, workflow_result: WorkflowResult, initial_message: ChatMessage) -> None: """Process workflow results and create appropriate messages""" try: try: - self.chatManager.handlingTasks._checkWorkflowStopped() + self.handlingTasks._checkWorkflowStopped() except WorkflowStoppedException: logger.info(f"Workflow {workflow.id} was stopped during result processing") @@ -398,47 +386,8 @@ class WorkflowManager: }) return - # For successful workflows, create a simple completion message - summary_message = { - "workflowId": workflow.id, - "role": "assistant", - "message": f"Workflow completed successfully.", - "status": "last", - "sequenceNr": len(workflow.messages) + 1, - "publishedAt": get_utc_timestamp(), - "documentsLabel": "workflow_completion", - "documents": [], - # Add workflow context fields - "roundNumber": workflow.currentRound, - "taskNumber": 0, - "actionNumber": 0, - # Add progress status - "taskProgress": "success", - "actionProgress": "success" - } - - message = self.chatInterface.createMessage(summary_message) - if message: - workflow.messages.append(message) - - # Update workflow status to completed for successful workflows - workflow.status = "completed" - workflow.lastActivity = get_utc_timestamp() - self.chatInterface.updateWorkflow(workflow.id, { - "status": "completed", - "lastActivity": workflow.lastActivity, - "totalTasks": workflow.totalTasks, - "totalActions": workflow.totalActions - }) - - # Add completion log entry - self.chatInterface.createLog({ - "workflowId": workflow.id, - "message": "Workflow completed successfully", - "type": "success", - "status": "completed", - "progress": 100 - }) + # For successful workflows, send detailed completion message + await self._sendLastMessage(workflow) except Exception as e: logger.error(f"Error processing workflow results: {str(e)}") @@ -474,3 +423,179 @@ class WorkflowManager: "totalActions": workflow.totalActions }) + async def _sendLastMessage(self, workflow: ChatWorkflow) -> None: + """Send last message to complete workflow (only for successful workflows)""" + try: + # Safety check: ensure this is only called for successful workflows + if workflow.status in ['stopped', 'failed']: + logger.warning(f"Attempted to send last message for {workflow.status} workflow {workflow.id}") + return + + # Generate feedback + feedback = await self._generateWorkflowFeedback(workflow) + + # Create last message using interface + messageData = { + "workflowId": workflow.id, + "role": "assistant", + "message": feedback, + "status": "last", + "sequenceNr": len(workflow.messages) + 1, + "publishedAt": get_utc_timestamp(), + "documentsLabel": "workflow_feedback", + "documents": [], + # Add workflow context fields + "roundNumber": workflow.currentRound, + "taskNumber": 0, + "actionNumber": 0, + # Add progress status + "taskProgress": "success", + "actionProgress": "success" + } + + # Create message using interface + message = self.chatInterface.createMessage(messageData) + if message: + workflow.messages.append(message) + + # Update workflow status to completed + workflow.status = "completed" + workflow.lastActivity = get_utc_timestamp() + + # Update workflow in database + self.chatInterface.updateWorkflow(workflow.id, { + "status": "completed", + "lastActivity": workflow.lastActivity + }) + + # Add completion log entry + self.chatInterface.createLog({ + "workflowId": workflow.id, + "message": "Workflow completed", + "type": "success", + "status": "completed", + "progress": 100 + }) + + except Exception as e: + logger.error(f"Error sending last message: {str(e)}") + raise + + async def _generateWorkflowFeedback(self, workflow: ChatWorkflow) -> str: + """Generate feedback message for workflow completion""" + try: + self.handlingTasks._checkWorkflowStopped() + + # Count messages by role + user_messages = [msg for msg in workflow.messages if msg.role == 'user'] + assistant_messages = [msg for msg in workflow.messages if msg.role == 'assistant'] + + # Generate summary feedback + feedback = f"Workflow completed.\n\n" + feedback += f"Processed {len(user_messages)} user inputs and generated {len(assistant_messages)} responses.\n" + + # Add final status + if workflow.status == "completed": + feedback += "All tasks completed successfully." + elif workflow.status == "partial": + feedback += "Some tasks completed with partial success." + else: + feedback += f"Workflow status: {workflow.status}" + + return feedback + + except Exception as e: + logger.error(f"Error generating workflow feedback: {str(e)}") + return "Workflow processing completed." + + def _handleWorkflowStop(self, workflow: ChatWorkflow) -> None: + """Handle workflow stop exception""" + logger.info("Workflow stopped by user") + + # Update workflow status to stopped + workflow.status = "stopped" + workflow.lastActivity = get_utc_timestamp() + self.chatInterface.updateWorkflow(workflow.id, { + "status": "stopped", + "lastActivity": workflow.lastActivity, + "totalTasks": workflow.totalTasks, + "totalActions": workflow.totalActions + }) + + # Create final stopped message + stopped_message = { + "workflowId": workflow.id, + "role": "assistant", + "message": "🛑 Workflow stopped by user", + "status": "last", + "sequenceNr": len(workflow.messages) + 1, + "publishedAt": get_utc_timestamp(), + "documentsLabel": "workflow_stopped", + "documents": [], + # Add workflow context fields + "roundNumber": workflow.currentRound, + "taskNumber": 0, + "actionNumber": 0, + # Add progress status + "taskProgress": "pending", + "actionProgress": "pending" + } + message = self.chatInterface.createMessage(stopped_message) + if message: + workflow.messages.append(message) + + # Add log entry + self.chatInterface.createLog({ + "workflowId": workflow.id, + "message": "Workflow stopped by user", + "type": "warning", + "status": "stopped", + "progress": 100 + }) + + def _handleWorkflowError(self, workflow: ChatWorkflow, error: Exception) -> None: + """Handle workflow error exception""" + logger.error(f"Workflow processing error: {str(error)}") + + # Update workflow status to failed + workflow.status = "failed" + workflow.lastActivity = get_utc_timestamp() + self.chatInterface.updateWorkflow(workflow.id, { + "status": "failed", + "lastActivity": workflow.lastActivity, + "totalTasks": workflow.totalTasks, + "totalActions": workflow.totalActions + }) + + # Create error message + error_message = { + "workflowId": workflow.id, + "role": "assistant", + "message": f"Workflow processing failed: {str(error)}", + "status": "last", + "sequenceNr": len(workflow.messages) + 1, + "publishedAt": get_utc_timestamp(), + "documentsLabel": "workflow_error", + "documents": [], + # Add workflow context fields + "roundNumber": workflow.currentRound, + "taskNumber": 0, + "actionNumber": 0, + # Add progress status + "taskProgress": "fail", + "actionProgress": "fail" + } + message = self.chatInterface.createMessage(error_message) + if message: + workflow.messages.append(message) + + # Add error log entry + self.chatInterface.createLog({ + "workflowId": workflow.id, + "message": f"Workflow failed: {str(error)}", + "type": "error", + "status": "failed", + "progress": 100 + }) + + raise diff --git a/tests/methods/test_method_web.py b/tests/methods/test_method_web.py index 27344ab3..0d1509e2 100644 --- a/tests/methods/test_method_web.py +++ b/tests/methods/test_method_web.py @@ -5,7 +5,7 @@ import logging import pytest from unittest.mock import patch -from modules.methods.methodWeb import MethodWeb +from modules.workflows.methods.methodWeb import MethodWeb from tests.fixtures.tavily_responses import ( RESPONSE_SEARCH_HOW_OLD_IS_EARTH_NO_ANSWER, RESPONSE_EXTRACT_HOW_OLD_IS_EARTH_NO_ANSWER, diff --git a/tool_stats_durations_from_log.py b/tool_stats_durations_from_log.py index 103cd5be..483af2d2 100644 --- a/tool_stats_durations_from_log.py +++ b/tool_stats_durations_from_log.py @@ -13,7 +13,7 @@ def parse_line(line: str) -> Tuple[Optional[str], Optional[str], Optional[dateti Extract (logger, function, timestamp) from a log line. Expected format examples (single line): - 2025-09-18 16:35:04 - INFO - modules.chat.handling.handlingTasks - Task 1 - Starting action 3/4 - D:\\Athi\\...\\handlingTasks.py:572 - executeTask + 2025-09-18 16:35:04 - INFO - modules.workflows._transfer.handlingTasks - Task 1 - Starting action 3/4 - D:\\Athi\\...\\handlingTasks.py:572 - executeTask Returns (logger, function, timestamp_dt) or (None, None, None) if not matched. """ From 1019cb7a6511240c19148374bb17311f998983e2 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 23 Sep 2025 00:36:24 +0200 Subject: [PATCH 005/169] CHAT 2.0 - Iterative mode --- app.py | 3 + .../chatPlayground/mainChatPlayground.py | 18 +- modules/interfaces/interfaceChatObjects.py | 56 +++++- modules/routes/routeChatPlayground.py | 6 +- modules/routes/routeSecurityAdmin.py | 186 ++++++++++++++++-- modules/services/serviceCenter.py | 4 +- modules/workflows/_transfer/handlingTasks.py | 10 +- modules/workflows/_transfer/promptFactory.py | 10 + modules/workflows/workflowManager.py | 24 ++- 9 files changed, 292 insertions(+), 25 deletions(-) diff --git a/app.py b/app.py index 5caa64f8..a86f0d37 100644 --- a/app.py +++ b/app.py @@ -309,6 +309,9 @@ app.include_router(connectionsRouter) from modules.routes.routeWorkflows import router as workflowRouter app.include_router(workflowRouter) +from modules.routes.routeChatPlayground import router as chatPlaygroundRouter +app.include_router(chatPlaygroundRouter) + from modules.routes.routeSecurityLocal import router as localRouter app.include_router(localRouter) diff --git a/modules/features/chatPlayground/mainChatPlayground.py b/modules/features/chatPlayground/mainChatPlayground.py index 13eba835..07d43043 100644 --- a/modules/features/chatPlayground/mainChatPlayground.py +++ b/modules/features/chatPlayground/mainChatPlayground.py @@ -8,12 +8,24 @@ from modules.shared.timezoneUtils import get_utc_timestamp logger = logging.getLogger(__name__) -async def chatStart(interfaceChat, currentUser: User, userInput: UserInputRequest, workflowId: Optional[str] = None) -> ChatWorkflow: - """Starts a new chat or continues an existing one, then launches processing asynchronously.""" +async def chatStart(interfaceChat, currentUser: User, userInput: UserInputRequest, workflowId: Optional[str] = None, workflowMode: str = "Actionplan") -> ChatWorkflow: + """ + Starts a new chat or continues an existing one, then launches processing asynchronously. + + Args: + interfaceChat: Chat interface instance + currentUser: Current user + userInput: User input request + workflowId: Optional workflow ID to continue existing workflow + workflowMode: "Actionplan" for traditional task planning, "React" for iterative react-style processing + + Example usage for React mode: + workflow = await chatStart(interfaceChat, currentUser, userInput, workflowMode="React") + """ try: from modules.workflows.workflowManager import WorkflowManager workflowManager = WorkflowManager(interfaceChat, currentUser) - return await workflowManager.workflowStart(userInput, workflowId) + return await workflowManager.workflowStart(userInput, workflowId, workflowMode) except Exception as e: logger.error(f"Error starting chat: {str(e)}") raise diff --git a/modules/interfaces/interfaceChatObjects.py b/modules/interfaces/interfaceChatObjects.py index 1b0a2af5..a79b2284 100644 --- a/modules/interfaces/interfaceChatObjects.py +++ b/modules/interfaces/interfaceChatObjects.py @@ -270,7 +270,9 @@ class ChatObjects: logs=[], messages=[], stats=None, - mandateId=created.get("mandateId", self.currentUser.mandateId) + mandateId=created.get("mandateId", self.currentUser.mandateId), + workflowMode=created.get("workflowMode", "Actionplan"), + maxSteps=created.get("maxSteps", 1) ) def updateWorkflow(self, workflowId: str, workflowData: Dict[str, Any]) -> ChatWorkflow: @@ -885,6 +887,58 @@ class ChatObjects: stats.sort(key=lambda x: x.get("created_at", ""), reverse=True) return ChatStat(**stats[0]) + def updateWorkflowStats(self, workflowId: str, bytesSent: int = 0, bytesReceived: int = 0, tokenCount: int = 0) -> None: + """ + Updates workflow statistics in the database. + + Args: + workflowId: ID of the workflow to update + bytesSent: Bytes sent (incremental) + bytesReceived: Bytes received (incremental) + tokenCount: Token count (incremental, default 0) + """ + try: + # Check workflow access first + workflow = self.getWorkflow(workflowId) + if not workflow: + logger.warning(f"No access to workflow {workflowId} for stats update") + return + + if not self._canModify(ChatWorkflow, workflowId): + logger.warning(f"No permission to modify workflow {workflowId} for stats update") + return + + # Get existing stats or create new ones + existing_stats = self.getWorkflowStats(workflowId) + + if existing_stats: + # Update existing stats + updated_stats = { + "bytesSent": (existing_stats.bytesSent or 0) + bytesSent, + "bytesReceived": (existing_stats.bytesReceived or 0) + bytesReceived, + "tokenCount": (existing_stats.tokenCount or 0) + tokenCount, + "lastUpdated": get_utc_timestamp() + } + + # Update the stats record + self.db.recordModify(ChatStat, existing_stats.id, updated_stats) + else: + # Create new stats record + new_stats = { + "workflowId": workflowId, + "bytesSent": bytesSent, + "bytesReceived": bytesReceived, + "tokenCount": tokenCount, + "lastUpdated": get_utc_timestamp() + } + + self.db.recordCreate(ChatStat, new_stats) + + logger.debug(f"Updated workflow stats for {workflowId}: +{bytesSent} sent, +{bytesReceived} received, +{tokenCount} tokens") + + except Exception as e: + logger.error(f"Error updating workflow stats for {workflowId}: {str(e)}") + def getUnifiedChatData(self, workflowId: str, afterTimestamp: Optional[float] = None) -> Dict[str, Any]: """ Returns unified chat data (messages, logs, stats) for a workflow in chronological order. diff --git a/modules/routes/routeChatPlayground.py b/modules/routes/routeChatPlayground.py index 24bc91a3..186e65a8 100644 --- a/modules/routes/routeChatPlayground.py +++ b/modules/routes/routeChatPlayground.py @@ -44,19 +44,23 @@ def getServiceChat(currentUser: User): async def start_workflow( request: Request, workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"), + workflowMode: str = Query("Actionplan", description="Workflow mode: 'Actionplan' or 'React'"), userInput: UserInputRequest = Body(...), currentUser: User = Depends(getCurrentUser) ) -> ChatWorkflow: """ Starts a new workflow or continues an existing one. Corresponds to State 1 in the state machine documentation. + + Args: + workflowMode: "Actionplan" for traditional task planning, "React" for iterative react-style processing """ try: # Get service center interfaceChat = getServiceChat(currentUser) # Start or continue workflow using playground controller - workflow = await chatStart(interfaceChat, currentUser, userInput, workflowId) + workflow = await chatStart(interfaceChat, currentUser, userInput, workflowId, workflowMode) return workflow diff --git a/modules/routes/routeSecurityAdmin.py b/modules/routes/routeSecurityAdmin.py index 2b509a0e..a7203965 100644 --- a/modules/routes/routeSecurityAdmin.py +++ b/modules/routes/routeSecurityAdmin.py @@ -13,7 +13,7 @@ logger = logging.getLogger(__name__) router = APIRouter( prefix="/api/admin", - tags=["Admin"], + tags=["Security Administration"], responses={ 404: {"description": "Not found"}, 400: {"description": "Bad request"}, @@ -248,9 +248,145 @@ async def list_databases( currentUser: User = Depends(getCurrentUser) ) -> Dict[str, Any]: _ensure_admin_scope(currentUser) - # For safety, expose only configured database name - db_name = APP_CONFIG.get("DB_DATABASE") or APP_CONFIG.get("DB_NAME") or "poweron" - return {"databases": [db_name]} + + # Get database names from configuration for each interface + databases = [] + + # App database (interfaceAppObjects.py) + app_db = APP_CONFIG.get("DB_APP_DATABASE") + if app_db: + databases.append(app_db) + + # Chat database (interfaceChatObjects.py) + chat_db = APP_CONFIG.get("DB_CHAT_DATABASE") + if chat_db: + databases.append(chat_db) + + # Management database (interfaceComponentObjects.py) + management_db = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") + if management_db: + databases.append(management_db) + + # Fallback to default if no databases configured + if not databases: + databases = ["poweron"] + + return {"databases": databases} + + +@router.get("/databases/{database_name}/tables") +@limiter.limit("30/minute") +async def get_database_tables( + request: Request, + database_name: str, + currentUser: User = Depends(getCurrentUser) +) -> Dict[str, Any]: + _ensure_admin_scope(currentUser) + + # Get all configured database names + configured_dbs = [] + app_db = APP_CONFIG.get("DB_APP_DATABASE") + if app_db: + configured_dbs.append(app_db) + chat_db = APP_CONFIG.get("DB_CHAT_DATABASE") + if chat_db: + configured_dbs.append(chat_db) + management_db = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") + if management_db: + configured_dbs.append(management_db) + + if not configured_dbs: + configured_dbs = ["poweron"] + + if database_name not in configured_dbs: + raise HTTPException(status_code=400, detail=f"Invalid database name. Available databases: {configured_dbs}") + + try: + # Use the appropriate interface based on database name + if database_name == app_db: + appInterface = getRootInterface() + tables = appInterface.db.getTables() + elif database_name == chat_db: + from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface + chatInterface = getChatInterface(currentUser) + tables = chatInterface.db.getTables() + elif database_name == management_db: + from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface + componentInterface = getComponentInterface(currentUser) + tables = componentInterface.db.getTables() + else: + raise HTTPException(status_code=400, detail="Database not found") + + return {"tables": tables} + except Exception as e: + logger.error(f"Error getting database tables: {str(e)}") + raise HTTPException(status_code=500, detail="Failed to get database tables") + + +@router.post("/databases/{database_name}/tables/{table_name}/drop") +@limiter.limit("10/minute") +async def drop_table( + request: Request, + database_name: str, + table_name: str, + currentUser: User = Depends(getCurrentUser), + payload: Dict[str, Any] = Body(...) +) -> Dict[str, Any]: + _ensure_admin_scope(currentUser) + + # Get all configured database names + configured_dbs = [] + app_db = APP_CONFIG.get("DB_APP_DATABASE") + if app_db: + configured_dbs.append(app_db) + chat_db = APP_CONFIG.get("DB_CHAT_DATABASE") + if chat_db: + configured_dbs.append(chat_db) + management_db = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") + if management_db: + configured_dbs.append(management_db) + + if not configured_dbs: + configured_dbs = ["poweron"] + + if database_name not in configured_dbs: + raise HTTPException(status_code=400, detail=f"Invalid database name. Available databases: {configured_dbs}") + + try: + # Use the appropriate interface based on database name + if database_name == app_db: + interface = getRootInterface() + elif database_name == chat_db: + from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface + interface = getChatInterface(currentUser) + elif database_name == management_db: + from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface + interface = getComponentInterface(currentUser) + else: + raise HTTPException(status_code=400, detail="Database not found") + + conn = interface.db.connection + with conn.cursor() as cursor: + # Check if table exists + cursor.execute(""" + SELECT table_name FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = %s + """, (table_name,)) + if not cursor.fetchone(): + raise HTTPException(status_code=404, detail="Table not found") + + # Drop the table + cursor.execute(f'DROP TABLE IF EXISTS "{table_name}" CASCADE') + conn.commit() + logger.warning(f"Admin drop_table executed by {currentUser.id}: dropped table '{table_name}' from database '{database_name}'") + return {"message": f"Table '{table_name}' dropped successfully from database '{database_name}'"} + except HTTPException: + raise + except Exception as e: + logger.error(f"Error dropping table: {str(e)}") + if 'interface' in locals() and interface and interface.db and interface.db.connection: + interface.db.connection.rollback() + raise HTTPException(status_code=500, detail="Failed to drop table") @router.post("/databases/drop") @@ -262,13 +398,39 @@ async def drop_database( ) -> Dict[str, Any]: _ensure_admin_scope(currentUser) db_name = payload.get("database") - configured_db = APP_CONFIG.get("DB_DATABASE") or APP_CONFIG.get("DB_NAME") or "poweron" - if not db_name or db_name != configured_db: - raise HTTPException(status_code=400, detail="Invalid database name") + + # Get all configured database names + configured_dbs = [] + app_db = APP_CONFIG.get("DB_APP_DATABASE") + if app_db: + configured_dbs.append(app_db) + chat_db = APP_CONFIG.get("DB_CHAT_DATABASE") + if chat_db: + configured_dbs.append(chat_db) + management_db = APP_CONFIG.get("DB_MANAGEMENT_DATABASE") + if management_db: + configured_dbs.append(management_db) + + if not configured_dbs: + configured_dbs = ["poweron"] + + if not db_name or db_name not in configured_dbs: + raise HTTPException(status_code=400, detail=f"Invalid database name. Available databases: {configured_dbs}") try: - appInterface = getRootInterface() - conn = appInterface.db.connection + # Use the appropriate interface based on database name + if db_name == app_db: + interface = getRootInterface() + elif db_name == chat_db: + from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface + interface = getChatInterface(currentUser) + elif db_name == management_db: + from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface + interface = getComponentInterface(currentUser) + else: + raise HTTPException(status_code=400, detail="Database not found") + + conn = interface.db.connection with conn.cursor() as cursor: # Drop all user tables (public schema) except system table cursor.execute(""" @@ -281,12 +443,12 @@ async def drop_database( cursor.execute(f'DROP TABLE IF EXISTS "{tbl}" CASCADE') dropped.append(tbl) conn.commit() - logger.warning(f"Admin drop_database executed by {currentUser.id}: dropped tables: {dropped}") + logger.warning(f"Admin drop_database executed by {currentUser.id}: dropped tables from '{db_name}': {dropped}") return {"droppedTables": dropped} except Exception as e: logger.error(f"Error dropping database tables: {str(e)}") - if appInterface and appInterface.db and appInterface.db.connection: - appInterface.db.connection.rollback() + if 'interface' in locals() and interface and interface.db and interface.db.connection: + interface.db.connection.rollback() raise HTTPException(status_code=500, detail="Failed to drop database tables") diff --git a/modules/services/serviceCenter.py b/modules/services/serviceCenter.py index 85b04ed4..0b999772 100644 --- a/modules/services/serviceCenter.py +++ b/modules/services/serviceCenter.py @@ -47,10 +47,10 @@ class ServiceCenter: self._discoverMethods() def _discoverMethods(self): - """Dynamically discover all method classes and their actions in modules.methods package""" + """Dynamically discover all method classes and their actions in modules methods package""" try: # Import the methods package - methodsPackage = importlib.import_module('modules.methods') + methodsPackage = importlib.import_module('modules.workflows.methods') # Discover all modules in the package for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__): diff --git a/modules/workflows/_transfer/handlingTasks.py b/modules/workflows/_transfer/handlingTasks.py index 5346e2d1..7e64be58 100644 --- a/modules/workflows/_transfer/handlingTasks.py +++ b/modules/workflows/_transfer/handlingTasks.py @@ -567,7 +567,10 @@ class HandlingTasks: state = TaskExecutionState(task_step) # React mode path - check workflow mode instead of context - if isinstance(context, TaskContext) and hasattr(context, 'workflow') and context.workflow and getattr(context.workflow, 'workflowMode', 'Actionplan') == 'React': + workflow_mode = getattr(context.workflow, 'workflowMode', 'Actionplan') if context.workflow else 'Actionplan' + logger.info(f"Task execution - workflow mode: {workflow_mode}") + if isinstance(context, TaskContext) and hasattr(context, 'workflow') and context.workflow and workflow_mode == 'React': + logger.info(f"Using React mode execution with max_steps: {getattr(context.workflow, 'maxSteps', 5)}") state.max_steps = max(1, int(getattr(context.workflow, 'maxSteps', 5))) step = 1 last_review_dict = None @@ -579,6 +582,7 @@ class HandlingTasks: try: t0 = time.time() selection = await self.plan_select(context) + logger.info(f"React step {step}: Selected action: {selection}") result = await self.act_execute(context, selection, task_step, workflow, step) observation = self.observe_build(result) # Attach deterministic label for clarity @@ -630,6 +634,10 @@ class HandlingTasks: feedback=feedback, error=None if success else feedback ) + else: + # Actionplan mode execution + logger.info(f"Using Actionplan mode execution") + retry_context = context max_retries = state.max_retries for attempt in range(max_retries): diff --git a/modules/workflows/_transfer/promptFactory.py b/modules/workflows/_transfer/promptFactory.py index 3cf3f5b5..38686c57 100644 --- a/modules/workflows/_transfer/promptFactory.py +++ b/modules/workflows/_transfer/promptFactory.py @@ -887,9 +887,17 @@ def createActionParameterPrompt(context: TaskContext, selected_action: Dict[str, method = selected_action.get('method', '') if selected_action else '' name = selected_action.get('name', '') if selected_action else '' available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available" + + # Get action signature from service center + action_signature = "" + if service and hasattr(service, 'methods') and method in service.methods: + method_instance = service.methods[method]['instance'] + action_signature = method_instance.getActionSignature(name) + return f"""Provide only the required parameters for this action. SELECTED ACTION: {method}.{name} +ACTION SIGNATURE: {action_signature} OBJECTIVE: {context.task_step.objective if context and context.task_step else ''} AVAILABLE DOCUMENTS: {available_docs} USER LANGUAGE: {user_language} @@ -899,6 +907,8 @@ RULES: - Include user language if relevant. - Reference documents only by exact labels available. - Avoid unnecessary fields; host applies defaults. +- Use the ACTION SIGNATURE above to understand what parameters are required. +- Convert the objective into appropriate parameter values as needed. RESPONSE FORMAT (JSON only): {{"parameters":{{}}}} diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py index 0d34b5b9..7caf958a 100644 --- a/modules/workflows/workflowManager.py +++ b/modules/workflows/workflowManager.py @@ -21,10 +21,14 @@ class WorkflowManager: self.chatInterface = chatInterface self.currentUser = currentUser self.handlingTasks = None - - async def workflowStart(self, userInput: UserInputRequest, workflowId: Optional[str] = None) -> ChatWorkflow: + + # Exported functions + + async def workflowStart(self, userInput: UserInputRequest, workflowId: Optional[str] = None, workflowMode: str = "Actionplan") -> ChatWorkflow: """Starts a new workflow or continues an existing one, then launches processing.""" try: + # Debug log to check workflowMode parameter + logger.info(f"WorkflowManager received workflowMode: {workflowMode}") currentTime = get_utc_timestamp() if workflowId: @@ -80,6 +84,8 @@ class WorkflowManager: "totalActions": 0, "mandateId": self.chatInterface.mandateId, "messageIds": [], + "workflowMode": workflowMode, + "maxSteps": 5 if workflowMode == "React" else 1, # Set maxSteps for React mode "stats": { "processingTime": None, "tokenCount": None, @@ -91,6 +97,8 @@ class WorkflowManager: } workflow = self.chatInterface.createWorkflow(workflowData) + logger.info(f"Created workflow with mode: {getattr(workflow, 'workflowMode', 'NOT_SET')}") + logger.info(f"Workflow data passed: {workflowData.get('workflowMode', 'NOT_IN_DATA')}") workflow.currentRound = 1 self.chatInterface.updateWorkflow(workflow.id, {"currentRound": 1}) self.chatInterface.updateWorkflowStats(workflow.id, bytesSent=0, bytesReceived=0) @@ -127,7 +135,9 @@ class WorkflowManager: except Exception as e: logger.error(f"Error stopping workflow: {str(e)}") raise - + + # Main processor + async def _workflowProcess(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> None: """Process a workflow with user input""" try: @@ -143,7 +153,9 @@ class WorkflowManager: except Exception as e: self._handleWorkflowError(workflow, e) - + + # Helper functions + async def _sendFirstMessage(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> ChatMessage: """Send first message to start workflow""" try: @@ -205,7 +217,9 @@ class WorkflowManager: task_plan = await handling.generateTaskPlan(userInput.prompt, workflow) if not task_plan or not task_plan.tasks: raise Exception("No tasks generated in task plan.") - logger.info(f"Executing workflow mode={getattr(workflow, 'workflowMode', 'Actionplan')} with {len(task_plan.tasks)} tasks") + workflow_mode = getattr(workflow, 'workflowMode', 'Actionplan') + logger.info(f"Workflow object attributes: {workflow.__dict__ if hasattr(workflow, '__dict__') else 'No __dict__'}") + logger.info(f"Executing workflow mode={workflow_mode} with {len(task_plan.tasks)} tasks") return task_plan async def _executeTasks(self, task_plan, workflow: ChatWorkflow) -> WorkflowResult: From 472353fea03ef15b64c395f3a925f4cc6f1a9bf8 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Tue, 23 Sep 2025 22:47:54 +0200 Subject: [PATCH 006/169] Refactor full workflow engine 3.0 --- app.py | 49 +- .../chatPlayground/mainChatPlayground.py | 11 +- .../mainNeutralizePlayground.py | 752 +++------- modules/features/syncDelta/mainSyncDelta.py | 31 +- modules/interfaces/interfaceAiCalls.py | 527 ------- modules/interfaces/interfaceAiModel.py | 30 + modules/interfaces/interfaceAiObjects.py | 117 ++ modules/interfaces/interfaceAppObjects.py | 543 +++----- modules/interfaces/interfaceTicketObjects.py | 8 +- modules/routes/routeDataNeutralization.py | 2 +- modules/routes/routeSecurityGoogle.py | 74 +- modules/routes/routeSecurityLocal.py | 18 +- modules/routes/routeSecurityMsft.py | 9 +- modules/security/auth.py | 101 +- modules/security/jwtService.py | 72 + modules/security/tokenManager.py | 66 +- modules/security/tokenRefreshService.py | 8 +- modules/services/__init__.py | 100 ++ modules/services/serviceAi/mainServiceAi.py | 137 ++ modules/services/serviceCenter.py | 1206 ----------------- ...on.py => mainServiceDocumentExtraction.py} | 119 +- ...on.py => mainServiceDocumentGeneration.py} | 2 +- .../mainNeutralization.py | 206 +++ .../serviceNeutralization/neutralizer.py | 112 -- .../services/serviceNeutralization/readme.md | 91 -- .../serviceNeutralization/subProcessCommon.py | 14 + .../serviceNeutralization/subProcessText.py | 3 +- .../serviceSharepoint/mainSharepoint.py} | 2 +- .../serviceWorkflows/mainServiceWorkflows.py | 546 ++++++++ modules/shared/eventManagement.py | 120 ++ modules/workflows/methods/methodAi.py | 62 +- modules/workflows/methods/methodDocument.py | 47 +- modules/workflows/methods/methodOutlook.py | 39 +- modules/workflows/methods/methodSharepoint.py | 5 +- modules/workflows/methods/methodWeb.py | 19 +- .../executionState.py | 0 .../handlingTasks.py | 286 +++- .../promptFactory.py | 184 ++- modules/workflows/workflowManager.py | 120 +- tool_stats_durations_from_log.py | 2 +- 40 files changed, 2605 insertions(+), 3235 deletions(-) delete mode 100644 modules/interfaces/interfaceAiCalls.py create mode 100644 modules/interfaces/interfaceAiModel.py create mode 100644 modules/interfaces/interfaceAiObjects.py create mode 100644 modules/security/jwtService.py create mode 100644 modules/services/__init__.py create mode 100644 modules/services/serviceAi/mainServiceAi.py delete mode 100644 modules/services/serviceCenter.py rename modules/services/serviceDocument/{documentExtraction.py => mainServiceDocumentExtraction.py} (95%) rename modules/services/serviceDocument/{documentGeneration.py => mainServiceDocumentGeneration.py} (99%) create mode 100644 modules/services/serviceNeutralization/mainNeutralization.py delete mode 100644 modules/services/serviceNeutralization/neutralizer.py delete mode 100644 modules/services/serviceNeutralization/readme.md rename modules/{connectors/connectorSharepoint.py => services/serviceSharepoint/mainSharepoint.py} (99%) create mode 100644 modules/services/serviceWorkflows/mainServiceWorkflows.py create mode 100644 modules/shared/eventManagement.py rename modules/workflows/{_transfer => processing}/executionState.py (100%) rename modules/workflows/{_transfer => processing}/handlingTasks.py (88%) rename modules/workflows/{_transfer => processing}/promptFactory.py (83%) diff --git a/app.py b/app.py index a86f0d37..ccf0c7d1 100644 --- a/app.py +++ b/app.py @@ -4,7 +4,7 @@ os.environ["NUMEXPR_MAX_THREADS"] = "12" from fastapi import FastAPI, HTTPException, Depends, Body, status, Response from fastapi.middleware.cors import CORSMiddleware from contextlib import asynccontextmanager -from zoneinfo import ZoneInfo + import logging from logging.handlers import RotatingFileHandler @@ -12,8 +12,7 @@ from datetime import timedelta, datetime import pathlib from modules.shared.configuration import APP_CONFIG -from apscheduler.schedulers.asyncio import AsyncIOScheduler -from apscheduler.triggers.cron import CronTrigger +from modules.shared.eventManagement import eventManager class DailyRotatingFileHandler(RotatingFileHandler): @@ -202,46 +201,15 @@ instanceLabel = APP_CONFIG.get("APP_ENV_LABEL") # Define lifespan context manager for application startup/shutdown events @asynccontextmanager async def lifespan(app: FastAPI): - # Startup logic logger.info("Application is starting up") - - # Setup APScheduler for JIRA sync - scheduler = AsyncIOScheduler(timezone=ZoneInfo("Europe/Zurich")) - try: - from modules.features.syncDelta.mainSyncDelta import perform_sync_jira_delta_group - # Schedule sync every 20 minutes (at minutes 00, 20, 40) - scheduler.add_job( - perform_sync_jira_delta_group, - CronTrigger(minute="0,20,40"), - id="jira_delta_group_sync", - replace_existing=True, - coalesce=True, - max_instances=1, - misfire_grace_time=1800, - ) - scheduler.start() - logger.info("APScheduler started (jira_delta_group_sync every 20 minutes at 00, 20, 40)") - - # Run initial sync on startup (non-blocking failure) - try: - logger.info("Running initial JIRA sync on app startup...") - await perform_sync_jira_delta_group() - logger.info("Initial JIRA sync completed successfully") - except Exception as e: - logger.error(f"Initial JIRA sync failed: {str(e)}") - except Exception as e: - logger.error(f"Failed to initialize scheduler or JIRA sync: {str(e)}") - + eventManager.start() yield - - # Shutdown logic + eventManager.stop() logger.info("Application has been shut down") - try: - if 'scheduler' in locals() and scheduler.running: - scheduler.shutdown(wait=False) - logger.info("APScheduler stopped") - except Exception as e: - logger.error(f"Error shutting down scheduler: {str(e)}") + + + + # START APP app = FastAPI( @@ -250,7 +218,6 @@ app = FastAPI( lifespan=lifespan ) - # Parse CORS origins from environment variable def get_allowed_origins(): origins_str = APP_CONFIG.get("APP_ALLOWED_ORIGINS", "http://localhost:8080") diff --git a/modules/features/chatPlayground/mainChatPlayground.py b/modules/features/chatPlayground/mainChatPlayground.py index 07d43043..6fa9ab1a 100644 --- a/modules/features/chatPlayground/mainChatPlayground.py +++ b/modules/features/chatPlayground/mainChatPlayground.py @@ -24,8 +24,11 @@ async def chatStart(interfaceChat, currentUser: User, userInput: UserInputReques """ try: from modules.workflows.workflowManager import WorkflowManager - workflowManager = WorkflowManager(interfaceChat, currentUser) - return await workflowManager.workflowStart(userInput, workflowId, workflowMode) + from modules.services import getInterface as getServices + services = getServices(currentUser, None) + workflowManager = WorkflowManager(services) + workflow = await workflowManager.workflowStart(userInput, workflowId, workflowMode) + return workflow except Exception as e: logger.error(f"Error starting chat: {str(e)}") raise @@ -34,7 +37,9 @@ async def chatStop(interfaceChat, currentUser: User, workflowId: str) -> ChatWor """Stops a running chat.""" try: from modules.workflows.workflowManager import WorkflowManager - workflowManager = WorkflowManager(interfaceChat, currentUser) + from modules.services import getInterface as getServices + services = getServices(currentUser, None) + workflowManager = WorkflowManager(services) return await workflowManager.workflowStop(workflowId) except Exception as e: logger.error(f"Error stopping chat: {str(e)}") diff --git a/modules/features/neutralizePlayground/mainNeutralizePlayground.py b/modules/features/neutralizePlayground/mainNeutralizePlayground.py index 877ca8aa..4b48a495 100644 --- a/modules/features/neutralizePlayground/mainNeutralizePlayground.py +++ b/modules/features/neutralizePlayground/mainNeutralizePlayground.py @@ -1,587 +1,285 @@ -""" -Data Neutralization Service -Handles file processing for data neutralization including SharePoint integration -""" - import logging -import os -import uuid -from typing import Dict, List, Any, Optional, Tuple -from datetime import datetime -from pathlib import Path -import mimetypes +from typing import Any, Dict, List, Optional -from modules.interfaces.interfaceAppObjects import getInterface -from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes -from modules.services.serviceNeutralization.neutralizer import DataAnonymizer -from modules.shared.timezoneUtils import get_utc_timestamp +from modules.interfaces.interfaceAppModel import User +from modules.services.serviceNeutralization.mainNeutralization import NeutralizationService logger = logging.getLogger(__name__) -class NeutralizationService: - """Service for handling data neutralization operations""" - - def __init__(self, current_user: User): - """Initialize the service with user context""" - self.current_user = current_user - self.app_interface = getInterface(current_user) - - def get_config(self) -> Optional[DataNeutraliserConfig]: - """Get the neutralization configuration for the current user's mandate""" - return self.app_interface.getNeutralizationConfig() - - def save_config(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig: - """Save or update the neutralization configuration""" - return self.app_interface.createOrUpdateNeutralizationConfig(config_data) - - def neutralize_text(self, text: str, file_id: Optional[str] = None) -> Dict[str, Any]: - """Neutralize text content and return results with attribute mappings""" - return self.app_interface.neutralizeText(text, file_id) - - def get_attributes(self, file_id: Optional[str] = None) -> List[DataNeutralizerAttributes]: - """Get neutralization attributes, optionally filtered by file ID""" - return self.app_interface.getNeutralizationAttributes(file_id) - - def resolve_text(self, text: str) -> str: - """Resolve UIDs in neutralized text back to original text""" - return self.app_interface.resolveNeutralizedText(text) - - async def process_sharepoint_files(self, source_path: str, target_path: str) -> Dict[str, Any]: - """ - Process files from SharePoint source path, neutralize them, and store in target path - - Args: - source_path: SharePoint path to read files from - target_path: SharePoint path to store neutralized files - - Returns: - Dictionary with processing results - """ + +class NeutralizationPlayground: + """Feature/UI wrapper around NeutralizationService for playground & routes.""" + + def __init__(self, currentUser: User): + self.currentUser = currentUser + self.service = NeutralizationService(currentUser) + + def processText(self, text: str) -> Dict[str, Any]: + return self.service.processText(text) + + def processFiles(self, fileIds: List[str]) -> Dict[str, Any]: + results: List[Dict[str, Any]] = [] + errors: List[str] = [] + for fileId in fileIds: + try: + res = self.service.processFile(fileId) + results.append({ + 'file_id': fileId, + 'neutralized_file_name': res.get('neutralized_file_name'), + 'attributes_count': len(res.get('attributes', [])) + }) + except Exception as e: + logger.error(f"Error processing file {fileId}: {str(e)}") + errors.append(f"{fileId}: {str(e)}") + return { + 'success': len(errors) == 0, + 'total_files': len(fileIds), + 'successful_files': len(results), + 'failed_files': len(errors), + 'results': results, + 'errors': errors, + } + + async def processSharepointFiles(self, sourcePath: str, targetPath: str) -> Dict[str, Any]: + from modules.features.neutralizePlayground.sharepoint import SharepointProcessor + processor = SharepointProcessor(self.currentUser, self.service) + return await processor.processSharepointFiles(sourcePath, targetPath) + + # Cleanup attributes + def cleanAttributes(self, fileId: str) -> bool: + if not self.service.app_interface: + return False + return self.service.app_interface.deleteNeutralizationAttributes(fileId) + + # Stats + def getStats(self) -> Dict[str, Any]: try: - logger.info(f"Processing SharePoint files from {source_path} to {target_path}") - - # Get user's SharePoint connection that matches the source path - sharepoint_connection = await self._get_sharepoint_connection(source_path) - if not sharepoint_connection: + allAttributes = self.service._getAttributes() + patternCounts: Dict[str, int] = {} + for attr in allAttributes: + patternType = attr.patternType + patternCounts[patternType] = patternCounts.get(patternType, 0) + 1 + uniqueFiles = set(attr.fileId for attr in allAttributes if attr.fileId) + return { + 'total_attributes': len(allAttributes), + 'unique_files': len(uniqueFiles), + 'pattern_counts': patternCounts, + 'mandate_id': self.currentUser.mandateId if self.currentUser else None, + } + except Exception as e: + logger.error(f"Error getting stats: {str(e)}") + return { + 'total_attributes': 0, + 'unique_files': 0, + 'pattern_counts': {}, + 'error': str(e), + } + + +# Internal SharePoint helper module separated to keep feature logic tidy +class SharepointProcessor: + def __init__(self, currentUser: User, service: NeutralizationService): + self.currentUser = currentUser + self.service = service + + async def processSharepointFiles(self, sourcePath: str, targetPath: str) -> Dict[str, Any]: + try: + logger.info(f"Processing SharePoint files from {sourcePath} to {targetPath}") + connection = await self._getSharepointConnection(sourcePath) + if not connection: return { - "success": False, - "message": "No SharePoint connection found for user", - "processed_files": 0, - "errors": ["No SharePoint connection found"] + 'success': False, + 'message': 'No SharePoint connection found for user', + 'processed_files': 0, + 'errors': ['No SharePoint connection found'], } - - logger.info(f"Using SharePoint connection: {sharepoint_connection.get('id')} for path: {source_path}") - - # Get SharePoint access token - sharepoint_token = self.app_interface.getConnectionToken(sharepoint_connection["id"]) - if not sharepoint_token: + from modules.security.tokenManager import TokenManager + token = TokenManager().getFreshToken(self.service.app_interface, connection['id']) + if not token: return { - "success": False, - "message": "No SharePoint access token found", - "processed_files": 0, - "errors": ["No SharePoint access token found"] + 'success': False, + 'message': 'No SharePoint access token found', + 'processed_files': 0, + 'errors': ['No SharePoint access token found'], } - - # Process files asynchronously - return await self._process_sharepoint_files_async( - source_path, target_path, sharepoint_token.tokenAccess - ) - + return await self._processSharepointFilesAsync(sourcePath, targetPath, token.tokenAccess) except Exception as e: logger.error(f"Error processing SharePoint files: {str(e)}") return { - "success": False, - "message": f"Error processing SharePoint files: {str(e)}", - "processed_files": 0, - "errors": [str(e)] + 'success': False, + 'message': f'Error processing SharePoint files: {str(e)}', + 'processed_files': 0, + 'errors': [str(e)], } - - async def _get_sharepoint_connection(self, sharepoint_path: str = None): - """Get user's SharePoint connection that matches the given path""" + + async def _getSharepointConnection(self, sharepointPath: str = None): try: - # Get all user connections from modules.interfaces.interfaceAppModel import UserConnection - connections = self.app_interface.db.getRecordset( + connections = self.service.app_interface.db.getRecordset( UserConnection, - recordFilter={"userId": self.app_interface.userId} + recordFilter={"userId": self.service.app_interface.userId} ) - - # Find all Microsoft connections - msft_connections = [conn for conn in connections if conn.get("authority") == "msft"] - - if not msft_connections: - logger.warning("No Microsoft connections found for user") + msftConnections = [c for c in connections if c.get('authority') == 'msft'] + if not msftConnections: + logger.warning('No Microsoft connections found for user') return None - - if len(msft_connections) == 1: - logger.info(f"Found single Microsoft connection: {msft_connections[0].get('id')}") - return msft_connections[0] - - # If multiple connections and we have a path, try to match - if sharepoint_path: - return await self._match_connection_to_path(msft_connections, sharepoint_path) - - # If no path provided, return the first one - logger.info(f"Multiple Microsoft connections found, using first one: {msft_connections[0].get('id')}") - return msft_connections[0] - - except Exception as e: - logger.error(f"Error getting SharePoint connection: {str(e)}") + if len(msftConnections) == 1: + logger.info(f"Found single Microsoft connection: {msftConnections[0].get('id')}") + return msftConnections[0] + if sharepointPath: + return await self._matchConnectionToPath(msftConnections, sharepointPath) + logger.info(f"Multiple Microsoft connections found, using first one: {msftConnections[0].get('id')}") + return msftConnections[0] + except Exception: + logger.error('Error getting SharePoint connection') return None - - async def _match_connection_to_path(self, connections: list, sharepoint_path: str): - """Match a connection to the SharePoint path by testing access""" + + async def _matchConnectionToPath(self, connections: list, sharepointPath: str): try: - # Extract domain from the path from urllib.parse import urlparse - parsed_url = urlparse(sharepoint_path) - target_domain = parsed_url.netloc.lower() - - logger.info(f"Looking for connection matching domain: {target_domain}") - - # Try each connection to see which one can access the site + targetDomain = urlparse(sharepointPath).netloc.lower() + logger.info(f"Looking for connection matching domain: {targetDomain}") + from modules.security.tokenManager import TokenManager for connection in connections: try: - # Get token for this connection - token = self.app_interface.getConnectionToken(connection["id"]) + token = TokenManager().getFreshToken(self.service.app_interface, connection['id']) if not token: continue - - # Test if this connection can access the SharePoint site - if await self._test_sharepoint_access(token.tokenAccess, sharepoint_path): - logger.info(f"Found matching connection for domain {target_domain}: {connection.get('id')}") + if await self._testSharepointAccess(token.tokenAccess, sharepointPath): + logger.info(f"Found matching connection for domain {targetDomain}: {connection.get('id')}") return connection - - except Exception as e: + except Exception: continue - - # If no specific match found, return the first connection - logger.warning(f"No specific connection match found for {target_domain}, using first available") + logger.warning(f"No specific connection match found for {targetDomain}, using first available") return connections[0] - - except Exception as e: - logger.error(f"Error matching connection to path: {str(e)}") + except Exception: + logger.error('Error matching connection to path') return connections[0] if connections else None - - async def _test_sharepoint_access(self, access_token: str, sharepoint_path: str) -> bool: - """Test if the access token can access the given SharePoint path""" + + async def _testSharepointAccess(self, accessToken: str, sharepointPath: str) -> bool: try: - return await self._test_sharepoint_access_async(access_token, sharepoint_path) - except Exception as e: + return await self._testSharepointAccessAsync(accessToken, sharepointPath) + except Exception: return False - - async def _test_sharepoint_access_async(self, access_token: str, sharepoint_path: str) -> bool: - """Async test for SharePoint access""" + + async def _testSharepointAccessAsync(self, accessToken: str, sharepointPath: str) -> bool: try: - from modules.connectors.connectorSharepoint import ConnectorSharepoint - - connector = ConnectorSharepoint(access_token=access_token) - - # Parse the path to get site URL - site_url, _ = self._parse_sharepoint_path(sharepoint_path) - if not site_url: + from modules.services.serviceSharepoint.mainSharepoint import SharepointService + connector = SharepointService(access_token=accessToken) + siteUrl, _ = self._parseSharepointPath(sharepointPath) + if not siteUrl: return False - - # Try to find the site - site_info = await connector.find_site_by_web_url(site_url) - return site_info is not None - - except Exception as e: + siteInfo = await connector.find_site_by_web_url(siteUrl) + return siteInfo is not None + except Exception: return False - - async def _process_sharepoint_files_async(self, source_path: str, target_path: str, access_token: str) -> Dict[str, Any]: - """Process SharePoint files asynchronously""" + + async def _processSharepointFilesAsync(self, sourcePath: str, targetPath: str, accessToken: str) -> Dict[str, Any]: try: import asyncio - from modules.connectors.connectorSharepoint import ConnectorSharepoint - - # Initialize SharePoint connector - connector = ConnectorSharepoint(access_token=access_token) - - # Parse source and target paths to extract site and folder info - source_site, source_folder = self._parse_sharepoint_path(source_path) - target_site, target_folder = self._parse_sharepoint_path(target_path) - - if not source_site or not target_site: - return { - "success": False, - "message": "Invalid SharePoint path format", - "processed_files": 0, - "errors": ["Invalid SharePoint path format"] - } - - # Find source site - source_site_info = await connector.find_site_by_web_url(source_site) - if not source_site_info: - return { - "success": False, - "message": f"Source site not found: {source_site}", - "processed_files": 0, - "errors": [f"Source site not found: {source_site}"] - } - - # Find target site - target_site_info = await connector.find_site_by_web_url(target_site) - if not target_site_info: - return { - "success": False, - "message": f"Target site not found: {target_site}", - "processed_files": 0, - "errors": [f"Target site not found: {target_site}"] - } - - # List files in source folder - logger.info(f"Listing files in folder: {source_folder} for site: {source_site_info['id']}") - files = await connector.list_folder_contents(source_site_info["id"], source_folder) - - # If no files found, try listing the root folder to see what's available + from modules.services.serviceSharepoint.mainSharepoint import SharepointService + connector = SharepointService(access_token=accessToken) + sourceSite, sourceFolder = self._parseSharepointPath(sourcePath) + targetSite, targetFolder = self._parseSharepointPath(targetPath) + if not sourceSite or not targetSite: + return {'success': False, 'message': 'Invalid SharePoint path format', 'processed_files': 0, 'errors': ['Invalid SharePoint path format']} + sourceSiteInfo = await connector.find_site_by_web_url(sourceSite) + if not sourceSiteInfo: + return {'success': False, 'message': f'Source site not found: {sourceSite}', 'processed_files': 0, 'errors': [f'Source site not found: {sourceSite}']} + targetSiteInfo = await connector.find_site_by_web_url(targetSite) + if not targetSiteInfo: + return {'success': False, 'message': f'Target site not found: {targetSite}', 'processed_files': 0, 'errors': [f'Target site not found: {targetSite}']} + logger.info(f"Listing files in folder: {sourceFolder} for site: {sourceSiteInfo['id']}") + files = await connector.list_folder_contents(sourceSiteInfo['id'], sourceFolder) if not files: - logger.warning(f"No files found in folder '{source_folder}', trying root folder") - files = await connector.list_folder_contents(source_site_info["id"], "") - + logger.warning(f"No files found in folder '{sourceFolder}', trying root folder") + files = await connector.list_folder_contents(sourceSiteInfo['id'], '') if files: - # List available folders for debugging - folders = [f for f in files if f.get("type") == "folder"] - folder_names = [f.get('name') for f in folders] - logger.info(f"Available folders in root: {folder_names}") - - # Format folder list for better UI display - folder_list = ", ".join(folder_names) if folder_names else "None" - + folders = [f for f in files if f.get('type') == 'folder'] + folderNames = [f.get('name') for f in folders] + logger.info(f"Available folders in root: {folderNames}") + folderList = ", ".join(folderNames) if folderNames else "None" return { - "success": False, - "message": f"Folder '{source_folder}' not found. Available folders in root: {folder_list}", - "processed_files": 0, - "errors": [f"Folder '{source_folder}' not found. Available folders: {folder_list}"], - "available_folders": folder_names + 'success': False, + 'message': f"Folder '{sourceFolder}' not found. Available folders in root: {folderList}", + 'processed_files': 0, + 'errors': [f"Folder '{sourceFolder}' not found. Available folders: {folderList}"], + 'available_folders': folderNames, } else: - return { - "success": False, - "message": f"No files found in source folder: {source_folder}", - "processed_files": 0, - "errors": [f"No files found in source folder: {source_folder}"] - } - - # Filter for text files only - text_files = [f for f in files if f.get("type") == "file" and self._is_text_file(f.get("name", ""))] - - if not text_files: - return { - "success": False, - "message": "No text files found in source folder", - "processed_files": 0, - "errors": ["No text files found in source folder"] - } - - # Process files in parallel for better performance - processed_files = [] - errors = [] - - # Create tasks for parallel processing - async def process_single_file(file_info): - """Process a single file - download, neutralize, upload""" + return {'success': False, 'message': f'No files found in source folder: {sourceFolder}', 'processed_files': 0, 'errors': [f'No files found in source folder: {sourceFolder}']} + + textFiles = [f for f in files if f.get('type') == 'file'] + processed: List[Dict[str, Any]] = [] + errors: List[str] = [] + + async def _processSingle(fileInfo: Dict[str, Any]): try: - # Download file - file_content = await connector.download_file(source_site_info["id"], file_info["id"]) - if not file_content: - return {"error": f"Failed to download file: {file_info['name']}"} - - # Convert to text + fileContent = await connector.download_file(sourceSiteInfo['id'], fileInfo['id']) + if not fileContent: + return {'error': f"Failed to download file: {fileInfo['name']}"} try: - text_content = file_content.decode('utf-8') + textContent = fileContent.decode('utf-8') except UnicodeDecodeError: - text_content = file_content.decode('latin-1') - - # Neutralize the text - neutralization_result = self.app_interface.neutralizeText(text_content, file_info["id"]) - - # Create neutralized filename - neutralized_filename = f"neutralized_{file_info['name']}" - - # Upload neutralized file - neutralized_content = neutralization_result["neutralized_text"].encode('utf-8') - upload_result = await connector.upload_file( - target_site_info["id"], - target_folder, - neutralized_filename, - neutralized_content - ) - - if "error" in upload_result: - return {"error": f"Failed to upload neutralized file: {neutralized_filename} - {upload_result['error']}"} - else: - return { - "success": True, - "original_name": file_info["name"], - "neutralized_name": neutralized_filename, - "attributes_count": len(neutralization_result.get("attributes", [])) - } - + textContent = fileContent.decode('latin-1') + result = self.service._neutralizeText(textContent, 'text') + neutralizedFilename = f"neutralized_{fileInfo['name']}" + uploadResult = await connector.upload_file(targetSiteInfo['id'], targetFolder, neutralizedFilename, result['neutralized_text'].encode('utf-8')) + if 'error' in uploadResult: + return {'error': f"Failed to upload neutralized file: {neutralizedFilename} - {uploadResult['error']}"} + return { + 'success': True, + 'original_name': fileInfo['name'], + 'neutralized_name': neutralizedFilename, + 'attributes_count': len(result.get('attributes', [])), + } except Exception as e: - error_msg = f"Error processing file {file_info['name']}: {str(e)}" - logger.error(error_msg) - return {"error": error_msg} - - # Process all files in parallel - logger.info(f"Processing {len(text_files)} files in parallel...") - tasks = [process_single_file(file_info) for file_info in text_files] + return {'error': f"Error processing file {fileInfo['name']}: {str(e)}"} + + tasks = [ _processSingle(f) for f in textFiles ] results = await asyncio.gather(*tasks, return_exceptions=True) - - # Process results - for i, result in enumerate(results): - if isinstance(result, Exception): - error_msg = f"Exception processing file {text_files[i]['name']}: {str(result)}" - errors.append(error_msg) - logger.error(error_msg) - elif isinstance(result, dict) and "error" in result: - errors.append(result["error"]) - elif isinstance(result, dict) and result.get("success"): - processed_files.append({ - "original_name": result["original_name"], - "neutralized_name": result["neutralized_name"], - "attributes_count": result["attributes_count"] + for i, r in enumerate(results): + if isinstance(r, Exception): + errors.append(f"Exception processing file {textFiles[i]['name']}: {str(r)}") + elif isinstance(r, dict) and 'error' in r: + errors.append(r['error']) + elif isinstance(r, dict) and r.get('success'): + processed.append({ + 'original_name': r['original_name'], + 'neutralized_name': r['neutralized_name'], + 'attributes_count': r['attributes_count'], }) - logger.info(f"Successfully processed file: {result['original_name']} -> {result['neutralized_name']}") else: - error_msg = f"Unknown result processing file {text_files[i]['name']}: {result}" - errors.append(error_msg) - logger.error(error_msg) - + errors.append(f"Unknown result processing file {textFiles[i]['name']}: {r}") return { - "success": len(processed_files) > 0, - "message": f"Processed {len(processed_files)} files successfully", - "processed_files": len(processed_files), - "files": processed_files, - "errors": errors + 'success': len(processed) > 0, + 'message': f"Processed {len(processed)} files successfully", + 'processed_files': len(processed), + 'files': processed, + 'errors': errors, } - except Exception as e: logger.error(f"Error in async SharePoint processing: {str(e)}") - return { - "success": False, - "message": f"Error in async SharePoint processing: {str(e)}", - "processed_files": 0, - "errors": [str(e)] - } - - def _parse_sharepoint_path(self, path: str) -> tuple[str, str]: - """Parse SharePoint path to extract site URL and folder path""" + return {'success': False, 'message': f'Error in async SharePoint processing: {str(e)}', 'processed_files': 0, 'errors': [str(e)]} + + def _parseSharepointPath(self, path: str) -> tuple[str, str]: try: - # Expected format: https://domain.sharepoint.com/sites/sitename/folder/path - if not path.startswith("https://"): + if not path.startswith('https://'): return None, None - - # Remove query parameters - if "?" in path: - path = path.split("?")[0] - - # Split by /sites/ - if "/sites/" not in path: + if '?' in path: + path = path.split('?')[0] + if '/sites/' not in path: return None, None - - parts = path.split("/sites/", 1) + parts = path.split('/sites/', 1) if len(parts) != 2: return None, None - - # Extract domain and site name - domain = parts[0].replace("https://", "") - site_name = parts[1].split("/")[0] - - # Create proper site URL for Graph API - site_url = f"https://{domain}/sites/{site_name}" - - # Extract folder path (everything after the site name) - folder_parts = parts[1].split("/")[1:] - folder_path = "/".join(folder_parts) if folder_parts else "" - - # URL decode the folder path + domain = parts[0].replace('https://', '') + siteName = parts[1].split('/')[0] + siteUrl = f"https://{domain}/sites/{siteName}" + folderParts = parts[1].split('/')[1:] from urllib.parse import unquote - folder_path = unquote(folder_path) - - - return site_url, folder_path - - except Exception as e: - logger.error(f"Error parsing SharePoint path '{path}': {str(e)}") + folderPath = unquote('/'.join(folderParts) if folderParts else '') + return siteUrl, folderPath + except Exception: + logger.error(f"Error parsing SharePoint path '{path}'") return None, None - - def _is_text_file(self, filename: str) -> bool: - """Check if file is a text file based on extension""" - text_extensions = [ - '.txt', '.csv', '.json', '.xml', '.md', '.log', - '.doc', '.docx', '.rtf', '.odt', # Document formats - '.html', '.htm', '.css', '.js', '.ts', '.py', '.java', '.cpp', '.c', '.h', # Code files - '.ini', '.cfg', '.conf', '.properties', # Config files - '.sql', '.yaml', '.yml', '.toml', # Data/config files - '.ps1', '.bat', '.sh', '.bash' # Script files - ] - return any(filename.lower().endswith(ext) for ext in text_extensions) - - def process_file_content(self, file_content: bytes, file_name: str, mime_type: str) -> Dict[str, Any]: - """ - Process file content for neutralization - - Args: - file_content: Binary file content - file_name: Name of the file - mime_type: MIME type of the file - - Returns: - Dictionary with neutralization results - """ - try: - # Determine content type based on MIME type - content_type = self._get_content_type_from_mime(mime_type) - - # Decode content to text - try: - text_content = file_content.decode('utf-8') - except UnicodeDecodeError: - # Try with different encodings - for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: - try: - text_content = file_content.decode(encoding) - break - except UnicodeDecodeError: - continue - else: - raise ValueError("Unable to decode file content") - - # Generate a temporary file ID for tracking - temp_file_id = str(uuid.uuid4()) - - # Neutralize the content - neutralization_result = self.neutralize_text(text_content, temp_file_id) - - # Encode the neutralized content back to bytes - neutralized_content = neutralization_result["neutralized_text"].encode('utf-8') - - # Generate neutralized file name - neutralized_file_name = f"neutralized_{file_name}" - - return { - "success": True, - "original_content": text_content, - "neutralized_content": neutralization_result["neutralized_text"], - "neutralized_file_name": neutralized_file_name, - "attributes": neutralization_result["attributes"], - "mapping": neutralization_result["mapping"], - "file_id": temp_file_id - } - - except Exception as e: - logger.error(f"Error processing file content: {str(e)}") - return { - "success": False, - "error": str(e), - "original_content": None, - "neutralized_content": None - } - - def _get_content_type_from_mime(self, mime_type: str) -> str: - """Determine content type from MIME type for neutralization processing""" - if mime_type.startswith('text/'): - return 'text' - elif mime_type in ['application/json', 'application/xml', 'text/xml']: - return 'json' if 'json' in mime_type else 'xml' - elif mime_type in ['text/csv', 'application/csv']: - return 'csv' - else: - return 'text' # Default to text processing - - def batch_neutralize_files(self, files_data: List[Dict[str, Any]]) -> Dict[str, Any]: - """ - Process multiple files for neutralization - - Args: - files_data: List of dictionaries containing file information - Each dict should have: content, name, mime_type - - Returns: - Dictionary with batch processing results - """ - try: - results = [] - total_files = len(files_data) - successful_files = 0 - errors = [] - - for file_data in files_data: - try: - result = self.process_file_content( - file_data['content'], - file_data['name'], - file_data['mime_type'] - ) - - if result['success']: - successful_files += 1 - results.append({ - 'file_name': file_data['name'], - 'neutralized_file_name': result['neutralized_file_name'], - 'file_id': result['file_id'], - 'attributes_count': len(result['attributes']) - }) - else: - errors.append(f"Failed to process {file_data['name']}: {result['error']}") - - except Exception as e: - error_msg = f"Error processing {file_data['name']}: {str(e)}" - errors.append(error_msg) - logger.error(error_msg) - - return { - "success": len(errors) == 0, - "total_files": total_files, - "successful_files": successful_files, - "failed_files": len(errors), - "results": results, - "errors": errors - } - - except Exception as e: - logger.error(f"Error in batch neutralization: {str(e)}") - return { - "success": False, - "total_files": len(files_data), - "successful_files": 0, - "failed_files": len(files_data), - "results": [], - "errors": [str(e)] - } - - def cleanup_file_attributes(self, file_id: str) -> bool: - """Clean up neutralization attributes for a specific file""" - return self.app_interface.deleteNeutralizationAttributes(file_id) - - def get_processing_stats(self) -> Dict[str, Any]: - """Get statistics about neutralization processing""" - try: - # Get all attributes for the current mandate - all_attributes = self.get_attributes() - - # Group by pattern type - pattern_counts = {} - for attr in all_attributes: - pattern_type = attr.patternType - pattern_counts[pattern_type] = pattern_counts.get(pattern_type, 0) + 1 - - # Get unique files - unique_files = set(attr.fileId for attr in all_attributes if attr.fileId) - - return { - "total_attributes": len(all_attributes), - "unique_files": len(unique_files), - "pattern_counts": pattern_counts, - "mandate_id": self.current_user.mandateId - } - - except Exception as e: - logger.error(f"Error getting processing stats: {str(e)}") - return { - "total_attributes": 0, - "unique_files": 0, - "pattern_counts": {}, - "error": str(e) - } diff --git a/modules/features/syncDelta/mainSyncDelta.py b/modules/features/syncDelta/mainSyncDelta.py index f2c5dbe6..3fac4f7b 100644 --- a/modules/features/syncDelta/mainSyncDelta.py +++ b/modules/features/syncDelta/mainSyncDelta.py @@ -11,7 +11,7 @@ import csv import io from datetime import datetime, UTC from typing import Dict, Any, List, Optional -from modules.connectors.connectorSharepoint import ConnectorSharepoint +from modules.services.serviceSharepoint.mainSharepoint import SharepointService from modules.connectors.connectorTicketJira import ConnectorTicketJira from modules.interfaces.interfaceAppObjects import getRootInterface from modules.interfaces.interfaceAppModel import UserInDB @@ -232,6 +232,10 @@ class ManagerSyncDelta: self.jira_connector = None self.sharepoint_connector = None self.target_site = None + # Initialize centralized services with root user + from modules.services import getInterface as getServices + root_user = self.root_interface.getUserByUsername("admin") + self.services = getServices(root_user, None) def get_sync_file_name(self) -> str: """Get the appropriate sync file name based on the sync mode.""" @@ -294,8 +298,9 @@ class ManagerSyncDelta: logger.info(f"Found SharePoint connection: {sharepoint_connection.id}") - # Get SharePoint token for this connection - sharepoint_token = self.root_interface.getConnectionToken(sharepoint_connection.id) + # Get fresh SharePoint token for this connection + from modules.security.tokenManager import TokenManager + sharepoint_token = TokenManager().getFreshToken(self.root_interface, sharepoint_connection.id) if not sharepoint_token: logger.error("No SharePoint token found for Delta Group user connection") return False @@ -303,7 +308,7 @@ class ManagerSyncDelta: logger.info(f"Found SharePoint token: {sharepoint_token.id}") # Initialize SharePoint connector with Graph API - self.sharepoint_connector = ConnectorSharepoint(access_token=sharepoint_token.tokenAccess) + self.sharepoint_connector = SharepointService(access_token=sharepoint_token.tokenAccess) # Resolve the site by hostname + site path to get the real site ID logger.info( @@ -552,3 +557,21 @@ async def perform_sync_jira_delta_group() -> bool: except Exception as e: logger.error(f"Error in perform_sync_jira_delta_group: {str(e)}") return False + +# Register scheduled job on import using the shared event manager +try: + from modules.shared.eventManagement import eventManager + + # Schedule sync every 20 minutes (at minutes 00, 20, 40) + eventManager.register_cron( + job_id="jira_delta_group_sync", + func=perform_sync_jira_delta_group, + cron_kwargs={"minute": "0,20,40"}, + replace_existing=True, + coalesce=True, + max_instances=1, + misfire_grace_time=1800, + ) + logger.info("Registered jira_delta_group_sync via EventManagement (every 20 minutes)") +except Exception as e: + logger.error(f"Failed to register jira_delta_group_sync: {str(e)}") \ No newline at end of file diff --git a/modules/interfaces/interfaceAiCalls.py b/modules/interfaces/interfaceAiCalls.py deleted file mode 100644 index 6f0de9c9..00000000 --- a/modules/interfaces/interfaceAiCalls.py +++ /dev/null @@ -1,527 +0,0 @@ -import logging -from typing import Dict, Any, List, Union, Optional -from modules.connectors.connectorAiOpenai import AiOpenai, ContextLengthExceededException -from modules.connectors.connectorAiAnthropic import AiAnthropic -from modules.services.serviceDocument.documentExtraction import DocumentExtraction -from modules.interfaces.interfaceChatModel import ChatDocument - -logger = logging.getLogger(__name__) - -# AI Model Registry with Performance Data -AI_MODELS = { - "openai_gpt4o": { - "connector": "openai", - "max_tokens": 128000, - "cost_per_1k_tokens": 0.03, # Input - "cost_per_1k_tokens_output": 0.06, # Output - "speed_rating": 8, # 1-10 - "quality_rating": 9, # 1-10 - "supports_images": True, - "supports_documents": True, - "context_length": 128000, - "model_name": "gpt-4o" - }, - "openai_gpt35": { - "connector": "openai", - "max_tokens": 16000, - "cost_per_1k_tokens": 0.0015, - "cost_per_1k_tokens_output": 0.002, - "speed_rating": 9, - "quality_rating": 7, - "supports_images": False, - "supports_documents": True, - "context_length": 16000, - "model_name": "gpt-3.5-turbo" - }, - "anthropic_claude": { - "connector": "anthropic", - "max_tokens": 200000, - "cost_per_1k_tokens": 0.015, - "cost_per_1k_tokens_output": 0.075, - "speed_rating": 7, - "quality_rating": 10, - "supports_images": True, - "supports_documents": True, - "context_length": 200000, - "model_name": "claude-3-sonnet-20240229" - } -} - -class AiCalls: - """Interface for AI service interactions with centralized call method""" - - def __init__(self): - self.openaiService = AiOpenai() - self.anthropicService = AiAnthropic() - self.document_extractor = DocumentExtraction() - - async def callAi( - self, - prompt: str, - documents: List[ChatDocument] = None, - operation_type: str = "general", - priority: str = "balanced", # "speed", "quality", "cost", "balanced" - compress_prompt: bool = True, - compress_documents: bool = True, - process_documents_individually: bool = False, - max_cost: float = None, - max_processing_time: int = None - ) -> str: - """ - Zentrale AI Call Methode mit intelligenter Modell-Auswahl und Content-Verarbeitung. - - Args: - prompt: Der Hauptprompt für die AI - documents: Liste von Dokumenten zur Verarbeitung - operation_type: Art der Operation ("general", "document_analysis", "image_analysis", etc.) - priority: Priorität für Modell-Auswahl ("speed", "quality", "cost", "balanced") - compress_prompt: Ob der Prompt komprimiert werden soll - compress_documents: Ob Dokumente komprimiert werden sollen - process_documents_individually: Ob Dokumente einzeln verarbeitet werden sollen - max_cost: Maximale Kosten für den Call - max_processing_time: Maximale Verarbeitungszeit in Sekunden - - Returns: - AI Response als String - """ - try: - # 1. Dokumente verarbeiten falls vorhanden - document_content = "" - if documents: - document_content = await self._process_documents_for_ai( - documents, - operation_type, - compress_documents, - process_documents_individually - ) - - # 2. Bestes Modell basierend auf Priorität und Content auswählen - selected_model = self._select_optimal_model( - prompt, - document_content, - priority, - operation_type, - max_cost, - max_processing_time - ) - - # 3. Content für das gewählte Modell optimieren - optimized_prompt, optimized_content = await self._optimize_content_for_model( - prompt, - document_content, - selected_model, - compress_prompt, - compress_documents - ) - - # 4. AI Call mit Failover ausführen - return await self._execute_ai_call_with_failover( - selected_model, - optimized_prompt, - optimized_content - ) - - except Exception as e: - logger.error(f"Error in centralized AI call: {str(e)}") - return f"Error: {str(e)}" - - def _select_optimal_model( - self, - prompt: str, - document_content: str, - priority: str, - operation_type: str, - max_cost: float = None, - max_processing_time: int = None - ) -> str: - """Wählt das optimale Modell basierend auf Priorität und Content aus""" - - # Content-Größe berechnen - total_content_size = len(prompt.encode('utf-8')) + len(document_content.encode('utf-8')) - - # Verfügbare Modelle filtern - available_models = {} - for model_name, model_info in AI_MODELS.items(): - # Prüfe ob Modell für Content-Größe geeignet ist - if total_content_size > model_info["context_length"] * 0.8: # 80% für Content - continue - - # Prüfe Kosten-Limit - if max_cost: - estimated_cost = self._estimate_cost(model_info, total_content_size) - if estimated_cost > max_cost: - continue - - # Prüfe Operation-Type Kompatibilität - if operation_type == "image_analysis" and not model_info["supports_images"]: - continue - - available_models[model_name] = model_info - - if not available_models: - # Fallback zum kleinsten Modell - return "openai_gpt35" - - # Modell basierend auf Priorität auswählen - if priority == "speed": - return max(available_models.keys(), key=lambda x: available_models[x]["speed_rating"]) - elif priority == "quality": - return max(available_models.keys(), key=lambda x: available_models[x]["quality_rating"]) - elif priority == "cost": - return min(available_models.keys(), key=lambda x: available_models[x]["cost_per_1k_tokens"]) - else: # balanced - # Gewichtete Bewertung: 40% Qualität, 30% Geschwindigkeit, 30% Kosten - def balanced_score(model_name): - model_info = available_models[model_name] - quality_score = model_info["quality_rating"] * 0.4 - speed_score = model_info["speed_rating"] * 0.3 - cost_score = (10 - (model_info["cost_per_1k_tokens"] * 1000)) * 0.3 # Niedrigere Kosten = höherer Score - return quality_score + speed_score + cost_score - - return max(available_models.keys(), key=balanced_score) - - def _estimate_cost(self, model_info: Dict, content_size: int) -> float: - """Schätzt die Kosten für einen AI Call""" - # Grobe Schätzung: 1 Token ≈ 4 Zeichen - estimated_tokens = content_size / 4 - input_cost = (estimated_tokens / 1000) * model_info["cost_per_1k_tokens"] - output_cost = (estimated_tokens / 1000) * model_info["cost_per_1k_tokens_output"] * 0.1 # 10% für Output - return input_cost + output_cost - - async def _process_documents_for_ai( - self, - documents: List[ChatDocument], - operation_type: str, - compress_documents: bool, - process_individually: bool - ) -> str: - """Verarbeitet Dokumente für AI Call mit documentExtraction.py""" - - if not documents: - return "" - - processed_contents = [] - - for doc in documents: - try: - # Extrahiere Content mit documentExtraction.py - extracted = await self.document_extractor.processFileData( - doc.fileData, - doc.fileName, - doc.mimeType, - prompt=f"Extract relevant content for {operation_type}", - documentId=doc.id, - enableAI=True - ) - - # Kombiniere alle Content-Items - doc_content = [] - for content_item in extracted.contents: - if content_item.data and content_item.data.strip(): - doc_content.append(content_item.data) - - if doc_content: - combined_doc_content = "\n\n".join(doc_content) - - # Komprimiere falls gewünscht - if compress_documents and len(combined_doc_content.encode('utf-8')) > 10000: # 10KB Limit - combined_doc_content = await self._compress_content( - combined_doc_content, - 10000, - "document" - ) - - processed_contents.append(f"Document: {doc.fileName}\n{combined_doc_content}") - - except Exception as e: - logger.warning(f"Error processing document {doc.fileName}: {str(e)}") - processed_contents.append(f"Document: {doc.fileName}\n[Error processing document: {str(e)}]") - - return "\n\n---\n\n".join(processed_contents) - - async def _optimize_content_for_model( - self, - prompt: str, - document_content: str, - model_name: str, - compress_prompt: bool, - compress_documents: bool - ) -> tuple[str, str]: - """Optimiert Content für das gewählte Modell""" - - model_info = AI_MODELS[model_name] - max_content_size = model_info["context_length"] * 0.7 # 70% für Content - - optimized_prompt = prompt - optimized_content = document_content - - # Prompt komprimieren falls gewünscht - if compress_prompt and len(prompt.encode('utf-8')) > 2000: # 2KB Limit für Prompt - optimized_prompt = await self._compress_content(prompt, 2000, "prompt") - - # Dokument-Content komprimieren falls gewünscht - if compress_documents and document_content: - content_size = len(document_content.encode('utf-8')) - if content_size > max_content_size: - optimized_content = await self._compress_content( - document_content, - int(max_content_size), - "document" - ) - - return optimized_prompt, optimized_content - - async def _compress_content(self, content: str, target_size: int, content_type: str) -> str: - """Komprimiert Content intelligent basierend auf Typ""" - - if len(content.encode('utf-8')) <= target_size: - return content - - try: - # Verwende AI für intelligente Kompression - compression_prompt = f""" - Komprimiere den folgenden {content_type} auf maximal {target_size} Zeichen, - behalte aber alle wichtigen Informationen bei: - - {content} - - Gib nur den komprimierten Inhalt zurück, ohne zusätzliche Erklärungen. - """ - - # Verwende das schnellste verfügbare Modell für Kompression - compression_model = "openai_gpt35" - model_info = AI_MODELS[compression_model] - connector = getattr(self, f"{model_info['connector']}Service") - - messages = [{"role": "user", "content": compression_prompt}] - - if model_info["connector"] == "openai": - compressed = await connector.callAiBasic(messages) - else: - response = await connector.callAiBasic(messages) - compressed = response["choices"][0]["message"]["content"] - - return compressed - - except Exception as e: - logger.warning(f"AI compression failed, using truncation: {str(e)}") - # Fallback: Einfache Truncation - return content[:target_size] + "... [truncated]" - - async def _execute_ai_call_with_failover( - self, - model_name: str, - prompt: str, - document_content: str - ) -> str: - """Führt AI Call mit automatischem Failover aus""" - - try: - model_info = AI_MODELS[model_name] - connector = getattr(self, f"{model_info['connector']}Service") - - # Messages vorbereiten - messages = [] - if document_content: - messages.append({ - "role": "system", - "content": f"Context from documents:\n{document_content}" - }) - - messages.append({ - "role": "user", - "content": prompt - }) - - # AI Call ausführen - if model_info["connector"] == "openai": - return await connector.callAiBasic(messages) - else: # anthropic - response = await connector.callAiBasic(messages) - return response["choices"][0]["message"]["content"] - - except ContextLengthExceededException: - logger.warning(f"Context length exceeded for {model_name}, trying fallback") - # Fallback zu Modell mit größerem Context - fallback_model = self._find_fallback_model(model_name) - if fallback_model: - return await self._execute_ai_call_with_failover(fallback_model, prompt, document_content) - else: - # Letzter Ausweg: Content weiter komprimieren - compressed_prompt = await self._compress_content(prompt, 1000, "prompt") - compressed_content = await self._compress_content(document_content, 5000, "document") - return await self._execute_ai_call_with_failover("openai_gpt35", compressed_prompt, compressed_content) - - except Exception as e: - logger.warning(f"AI call failed with {model_name}: {e}") - # Allgemeiner Fallback - return await self._execute_ai_call_with_failover("openai_gpt35", prompt, document_content) - - def _find_fallback_model(self, current_model: str) -> Optional[str]: - """Findet ein Fallback-Modell mit größerem Context""" - current_context = AI_MODELS[current_model]["context_length"] - - # Suche Modell mit größerem Context - for model_name, model_info in AI_MODELS.items(): - if model_info["context_length"] > current_context: - return model_name - - return None - - # Legacy methods - - async def callAiTextBasic(self, prompt: str, context: Optional[str] = None) -> str: - """ - Basic text processing - now uses centralized AI call method. - - Args: - prompt: The user prompt to process - context: Optional system context/prompt - - Returns: - The AI response as text - """ - # Combine context with prompt if provided - full_prompt = prompt - if context: - full_prompt = f"Context: {context}\n\nUser Request: {prompt}" - - # Use centralized AI call with speed priority for basic calls - return await self.callAi( - prompt=full_prompt, - priority="speed", - compress_prompt=True, - compress_documents=False - ) - - async def callAiTextAdvanced(self, prompt: str, context: Optional[str] = None, _is_fallback: bool = False) -> str: - """ - Advanced text processing - now uses centralized AI call method. - - Args: - prompt: The user prompt to process - context: Optional system context/prompt - _is_fallback: Internal flag (kept for compatibility) - - Returns: - The AI response as text - """ - # Combine context with prompt if provided - full_prompt = prompt - if context: - full_prompt = f"Context: {context}\n\nUser Request: {prompt}" - - # Use centralized AI call with quality priority for advanced calls - return await self.callAi( - prompt=full_prompt, - priority="quality", - compress_prompt=False, - compress_documents=False - ) - - async def callAiImageBasic(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None) -> str: - """ - Basic image processing - now uses centralized AI call method. - - Args: - prompt: The prompt for image analysis - imageData: The image data (file path or bytes) - mimeType: Optional MIME type of the image - - Returns: - The AI response as text - """ - try: - # For image processing, use the original connector directly - # as the centralized method doesn't handle images yet - return await self.openaiService.callAiImage(prompt, imageData, mimeType) - except Exception as e: - logger.error(f"Error in OpenAI image call: {str(e)}") - return f"Error: {str(e)}" - - async def callAiImageAdvanced(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None) -> str: - """ - Advanced image processing - now uses centralized AI call method. - - Args: - prompt: The prompt for image analysis - imageData: The image data (file path or bytes) - mimeType: Optional MIME type of the image - - Returns: - The AI response as text - """ - try: - # For image processing, use the original connector directly - # as the centralized method doesn't handle images yet - return await self.anthropicService.callAiImage(prompt, imageData, mimeType) - except Exception as e: - logger.error(f"Error in Anthropic image call: {str(e)}") - return f"Error: {str(e)}" - - # Convenience methods for common use cases - - async def callAiForDocumentAnalysis( - self, - prompt: str, - documents: List[ChatDocument], - priority: str = "balanced" - ) -> str: - """Convenience method for document analysis""" - return await self.callAi( - prompt=prompt, - documents=documents, - operation_type="document_analysis", - priority=priority, - compress_documents=True, - process_documents_individually=False - ) - - async def callAiForReportGeneration( - self, - prompt: str, - documents: List[ChatDocument], - priority: str = "quality" - ) -> str: - """Convenience method for report generation""" - return await self.callAi( - prompt=prompt, - documents=documents, - operation_type="report_generation", - priority=priority, - compress_documents=True, - process_documents_individually=True - ) - - async def callAiForEmailComposition( - self, - prompt: str, - documents: List[ChatDocument] = None, - priority: str = "speed" - ) -> str: - """Convenience method for email composition""" - return await self.callAi( - prompt=prompt, - documents=documents, - operation_type="email_composition", - priority=priority, - compress_prompt=True, - compress_documents=True - ) - - async def callAiForTaskPlanning( - self, - prompt: str, - documents: List[ChatDocument] = None, - priority: str = "balanced" - ) -> str: - """Convenience method for task planning""" - return await self.callAi( - prompt=prompt, - documents=documents, - operation_type="task_planning", - priority=priority, - compress_prompt=False, - compress_documents=True - ) - diff --git a/modules/interfaces/interfaceAiModel.py b/modules/interfaces/interfaceAiModel.py new file mode 100644 index 00000000..6bd541b2 --- /dev/null +++ b/modules/interfaces/interfaceAiModel.py @@ -0,0 +1,30 @@ +from typing import Optional +from pydantic import BaseModel, Field + + +class AiCallOptions(BaseModel): + """Options for centralized AI processing (no document extraction here).""" + + operationType: str = Field(default="general", description="Type of operation") + priority: str = Field(default="balanced", description="speed|quality|cost|balanced") + compressPrompt: bool = Field(default=True, description="Whether to compress the prompt") + compressContext: bool = Field(default=True, description="Whether to compress optional context") + maxCost: Optional[float] = Field(default=None, description="Max cost budget") + maxProcessingTime: Optional[int] = Field(default=None, description="Max processing time in seconds") + + +class AiCallRequest(BaseModel): + """Centralized AI call request payload for interface use.""" + + prompt: str = Field(description="The user prompt") + context: Optional[str] = Field(default=None, description="Optional external context (e.g., extracted docs)") + options: AiCallOptions = Field(default_factory=AiCallOptions) + + +class AiCallResponse(BaseModel): + """Standardized AI call response.""" + + content: str = Field(description="AI response content") + modelName: str = Field(description="Selected model name") + usedTokens: Optional[int] = Field(default=None, description="Estimated used tokens") + costEstimate: Optional[float] = Field(default=None, description="Estimated cost of the call") diff --git a/modules/interfaces/interfaceAiObjects.py b/modules/interfaces/interfaceAiObjects.py new file mode 100644 index 00000000..6f59eace --- /dev/null +++ b/modules/interfaces/interfaceAiObjects.py @@ -0,0 +1,117 @@ +import logging +from typing import Dict, Any, List + +from modules.connectors.connectorAiOpenai import AiOpenai +from modules.connectors.connectorAiAnthropic import AiAnthropic +from modules.interfaces.interfaceAiModel import AiCallOptions, AiCallRequest, AiCallResponse + + +logger = logging.getLogger(__name__) + + +# Local model registry (connectors specification) belongs in interface layer, not service +aiModels: Dict[str, Dict[str, Any]] = { + "openai_gpt4o": { + "connector": "openai", + "contextLength": 128000, + "costPer1kTokens": 0.03, + "costPer1kTokensOutput": 0.06, + "speedRating": 8, + "qualityRating": 9, + }, + "openai_gpt35": { + "connector": "openai", + "contextLength": 16000, + "costPer1kTokens": 0.0015, + "costPer1kTokensOutput": 0.002, + "speedRating": 9, + "qualityRating": 7, + }, + "anthropic_claude": { + "connector": "anthropic", + "contextLength": 200000, + "costPer1kTokens": 0.015, + "costPer1kTokensOutput": 0.075, + "speedRating": 7, + "qualityRating": 10, + }, +} + + +class AiObjects: + """Centralized AI interface: selects model and calls connector. No document handling.""" + + def __init__(self): + self.openaiService = AiOpenai() + self.anthropicService = AiAnthropic() + + def _estimateCost(self, modelInfo: Dict[str, Any], contentSize: int) -> float: + estimatedTokens = contentSize / 4 + inputCost = (estimatedTokens / 1000) * modelInfo["costPer1kTokens"] + outputCost = (estimatedTokens / 1000) * modelInfo["costPer1kTokensOutput"] * 0.1 + return inputCost + outputCost + + def _selectModel(self, prompt: str, context: str, options: AiCallOptions) -> str: + totalSize = len(prompt.encode("utf-8")) + len(context.encode("utf-8")) + candidates: Dict[str, Dict[str, Any]] = {} + for name, info in aiModels.items(): + if totalSize > info["contextLength"] * 0.8: + continue + if options.maxCost is not None: + if self._estimateCost(info, totalSize) > options.maxCost: + continue + candidates[name] = info + if not candidates: + return "openai_gpt35" + if options.priority == "speed": + return max(candidates, key=lambda k: candidates[k]["speedRating"]) + if options.priority == "quality": + return max(candidates, key=lambda k: candidates[k]["qualityRating"]) + if options.priority == "cost": + return min(candidates, key=lambda k: candidates[k]["costPer1kTokens"]) + def balancedScore(name: str) -> float: + info = candidates[name] + return info["qualityRating"] * 0.4 + info["speedRating"] * 0.3 + (10 - info["costPer1kTokens"] * 1000) * 0.3 + return max(candidates, key=balancedScore) + + def _connectorFor(self, modelName: str): + return self.openaiService if aiModels[modelName]["connector"] == "openai" else self.anthropicService + + async def call(self, request: AiCallRequest) -> AiCallResponse: + prompt = request.prompt + context = request.context or "" + options = request.options + + # Compress optionally (prompt/context) - simple truncation fallback kept here + def maybeTruncate(text: str, limit: int) -> str: + data = text.encode("utf-8") + if len(data) <= limit: + return text + return data[:limit].decode("utf-8", errors="ignore") + "... [truncated]" + + if options.compressPrompt and len(prompt.encode("utf-8")) > 2000: + prompt = maybeTruncate(prompt, 2000) + if options.compressContext and len(context.encode("utf-8")) > 70000: + context = maybeTruncate(context, 70000) + + modelName = self._selectModel(prompt, context, options) + + messages: List[Dict[str, Any]] = [] + if context: + messages.append({"role": "system", "content": f"Context from documents:\n{context}"}) + messages.append({"role": "user", "content": prompt}) + + connector = self._connectorFor(modelName) + if aiModels[modelName]["connector"] == "openai": + content = await connector.callAiBasic(messages) + else: + response = await connector.callAiBasic(messages) + content = response["choices"][0]["message"]["content"] + + # Estimate cost/tokens + totalSize = len((prompt + context).encode("utf-8")) + cost = self._estimateCost(aiModels[modelName], totalSize) + usedTokens = int(totalSize / 4) + + return AiCallResponse(content=content, modelName=modelName, usedTokens=usedTokens, costEstimate=cost) + diff --git a/modules/interfaces/interfaceAppObjects.py b/modules/interfaces/interfaceAppObjects.py index ccd471f1..069556cf 100644 --- a/modules/interfaces/interfaceAppObjects.py +++ b/modules/interfaces/interfaceAppObjects.py @@ -201,7 +201,6 @@ class AppObjects: """ return self.access.canModify(model_class, recordId) - def getInitialId(self, model_class: type) -> Optional[str]: """Returns the initial ID for a table.""" return self.db.getInitialId(model_class) @@ -268,105 +267,6 @@ class AppObjects: logger.error(f"Error getting user by ID: {str(e)}") return None - def getUserConnections(self, userId: str) -> List[UserConnection]: - """Returns all connections for a user.""" - try: - # Get connections for this user - connections = self.db.getRecordset(UserConnection, recordFilter={"userId": userId}) - - # Convert to UserConnection objects - result = [] - for conn_dict in connections: - try: - # Create UserConnection object - connection = UserConnection( - id=conn_dict["id"], - userId=conn_dict["userId"], - authority=conn_dict.get("authority"), - externalId=conn_dict.get("externalId", ""), - externalUsername=conn_dict.get("externalUsername", ""), - externalEmail=conn_dict.get("externalEmail"), - status=conn_dict.get("status", "pending"), - connectedAt=conn_dict.get("connectedAt"), - lastChecked=conn_dict.get("lastChecked"), - expiresAt=conn_dict.get("expiresAt") - ) - result.append(connection) - except Exception as e: - logger.error(f"Error converting connection dict to object: {str(e)}") - continue - return result - - except Exception as e: - logger.error(f"Error getting user connections: {str(e)}") - return [] - - def addUserConnection(self, userId: str, authority: AuthAuthority, externalId: str, - externalUsername: str, externalEmail: Optional[str] = None, - status: ConnectionStatus = ConnectionStatus.PENDING) -> UserConnection: - """ - Adds a new connection for a user. - - Args: - userId: The ID of the user - authority: The authentication authority (e.g., MSFT, GOOGLE) - externalId: The external ID from the authority - externalUsername: The username from the authority - externalEmail: Optional email from the authority - status: The connection status (defaults to PENDING) - - Returns: - The created UserConnection object - """ - try: - # Get the user - user = self.getUser(userId) - if not user: - raise ValueError(f"User not found: {userId}") - - # Create new connection with all required fields - connection = UserConnection( - id=str(uuid.uuid4()), - userId=userId, - authority=authority, - externalId=externalId, - externalUsername=externalUsername, - externalEmail=externalEmail, - status=status, - connectedAt=get_utc_timestamp(), - lastChecked=get_utc_timestamp(), - expiresAt=None # Optional field, set to None by default - ) - - # Save to connections table - self.db.recordCreate(UserConnection, connection) - - - return connection - - except Exception as e: - logger.error(f"Error adding user connection: {str(e)}") - raise ValueError(f"Failed to add user connection: {str(e)}") - - def removeUserConnection(self, connectionId: str) -> None: - """Remove a connection to an external service""" - try: - # Get connection - connections = self.db.getRecordset(UserConnection, recordFilter={ - "id": connectionId - }) - - if not connections: - raise ValueError(f"Connection {connectionId} not found") - - # Delete connection - self.db.recordDelete(UserConnection, connectionId) - - - except Exception as e: - logger.error(f"Error removing user connection: {str(e)}") - raise ValueError(f"Failed to remove user connection: {str(e)}") - def authenticateLocalUser(self, username: str, password: str) -> Optional[User]: """Authenticates a user by username and password using local authentication.""" # Clear the users table from cache and reload it @@ -551,6 +451,154 @@ class AppObjects: logger.error(f"Error deleting user: {str(e)}") raise ValueError(f"Failed to delete user: {str(e)}") + def _getInitialUser(self) -> Optional[Dict[str, Any]]: + """Get the initial user record directly from database without access control.""" + try: + initialUserId = self.getInitialId(UserInDB) + if not initialUserId: + return None + + users = self.db.getRecordset(UserInDB, recordFilter={"id": initialUserId}) + return users[0] if users else None + except Exception as e: + logger.error(f"Error getting initial user: {str(e)}") + return None + + def checkUsernameAvailability(self, checkData: Dict[str, Any]) -> Dict[str, Any]: + """Checks if a username is available for registration.""" + try: + username = checkData.get("username") + authenticationAuthority = checkData.get("authenticationAuthority", "local") + + if not username: + return { + "available": False, + "message": "Username is required" + } + + # Get user by username + user = self.getUserByUsername(username) + + # Check if user exists (User model instance) + if user is not None: + return { + "available": False, + "message": "Username is already taken" + } + + return { + "available": True, + "message": "Username is available" + } + + except Exception as e: + logger.error(f"Error checking username availability: {str(e)}") + return { + "available": False, + "message": f"Error checking username availability: {str(e)}" + } + + # Connection methods + + def getUserConnections(self, userId: str) -> List[UserConnection]: + """Returns all connections for a user.""" + try: + # Get connections for this user + connections = self.db.getRecordset(UserConnection, recordFilter={"userId": userId}) + + # Convert to UserConnection objects + result = [] + for conn_dict in connections: + try: + # Create UserConnection object + connection = UserConnection( + id=conn_dict["id"], + userId=conn_dict["userId"], + authority=conn_dict.get("authority"), + externalId=conn_dict.get("externalId", ""), + externalUsername=conn_dict.get("externalUsername", ""), + externalEmail=conn_dict.get("externalEmail"), + status=conn_dict.get("status", "pending"), + connectedAt=conn_dict.get("connectedAt"), + lastChecked=conn_dict.get("lastChecked"), + expiresAt=conn_dict.get("expiresAt") + ) + result.append(connection) + except Exception as e: + logger.error(f"Error converting connection dict to object: {str(e)}") + continue + return result + + except Exception as e: + logger.error(f"Error getting user connections: {str(e)}") + return [] + + def addUserConnection(self, userId: str, authority: AuthAuthority, externalId: str, + externalUsername: str, externalEmail: Optional[str] = None, + status: ConnectionStatus = ConnectionStatus.PENDING) -> UserConnection: + """ + Adds a new connection for a user. + + Args: + userId: The ID of the user + authority: The authentication authority (e.g., MSFT, GOOGLE) + externalId: The external ID from the authority + externalUsername: The username from the authority + externalEmail: Optional email from the authority + status: The connection status (defaults to PENDING) + + Returns: + The created UserConnection object + """ + try: + # Get the user + user = self.getUser(userId) + if not user: + raise ValueError(f"User not found: {userId}") + + # Create new connection with all required fields + connection = UserConnection( + id=str(uuid.uuid4()), + userId=userId, + authority=authority, + externalId=externalId, + externalUsername=externalUsername, + externalEmail=externalEmail, + status=status, + connectedAt=get_utc_timestamp(), + lastChecked=get_utc_timestamp(), + expiresAt=None # Optional field, set to None by default + ) + + # Save to connections table + self.db.recordCreate(UserConnection, connection) + + + return connection + + except Exception as e: + logger.error(f"Error adding user connection: {str(e)}") + raise ValueError(f"Failed to add user connection: {str(e)}") + + def removeUserConnection(self, connectionId: str) -> None: + """Remove a connection to an external service""" + try: + # Get connection + connections = self.db.getRecordset(UserConnection, recordFilter={ + "id": connectionId + }) + + if not connections: + raise ValueError(f"Connection {connectionId} not found") + + # Delete connection + self.db.recordDelete(UserConnection, connectionId) + + + except Exception as e: + logger.error(f"Error removing user connection: {str(e)}") + raise ValueError(f"Failed to remove user connection: {str(e)}") + # Mandate methods def getAllMandates(self) -> List[Mandate]: @@ -650,52 +698,7 @@ class AppObjects: logger.error(f"Error deleting mandate: {str(e)}") raise ValueError(f"Failed to delete mandate: {str(e)}") - def _getInitialUser(self) -> Optional[Dict[str, Any]]: - """Get the initial user record directly from database without access control.""" - try: - initialUserId = self.getInitialId(UserInDB) - if not initialUserId: - return None - - users = self.db.getRecordset(UserInDB, recordFilter={"id": initialUserId}) - return users[0] if users else None - except Exception as e: - logger.error(f"Error getting initial user: {str(e)}") - return None - - def checkUsernameAvailability(self, checkData: Dict[str, Any]) -> Dict[str, Any]: - """Checks if a username is available for registration.""" - try: - username = checkData.get("username") - authenticationAuthority = checkData.get("authenticationAuthority", "local") - - if not username: - return { - "available": False, - "message": "Username is required" - } - - # Get user by username - user = self.getUserByUsername(username) - - # Check if user exists (User model instance) - if user is not None: - return { - "available": False, - "message": "Username is already taken" - } - - return { - "available": True, - "message": "Username is available" - } - - except Exception as e: - logger.error(f"Error checking username availability: {str(e)}") - return { - "available": False, - "message": f"Error checking username availability: {str(e)}" - } + # Token methods def saveAccessToken(self, token: Token, replace_existing: bool = True) -> None: """Save an access token for the current user (must NOT have connectionId)""" @@ -803,56 +806,8 @@ class AppObjects: logger.error(f"Error saving connection token: {str(e)}") raise - def getAccessToken(self, authority: str, auto_refresh: bool = True) -> Optional[Token]: - """Get the latest valid access token for the current user and authority, optionally auto-refresh if expired""" - try: - # Validate that we're not looking for connection tokens - if not self.currentUser or not self.currentUser.id: - raise ValueError("No valid user context available for token retrieval") - - # Get access tokens for this user and authority (must NOT have connectionId) - tokens = self.db.getRecordset(Token, recordFilter={ - "userId": self.currentUser.id, - "authority": authority, - "connectionId": None # Ensure we only get access tokens - }) - - if not tokens: - return None - - # Sort by creation date and get the latest - tokens.sort(key=lambda x: x.get("createdAt", ""), reverse=True) - latest_token = Token(**tokens[0]) - - # Check if token is expired - if latest_token.expiresAt and latest_token.expiresAt < get_utc_timestamp(): - if auto_refresh: - # Import TokenManager here to avoid circular imports - from modules.security.tokenManager import TokenManager - token_manager = TokenManager() - - # Try to refresh the token - refreshed_token = token_manager.refresh_token(latest_token) - if refreshed_token: - # Save the new token (which will automatically replace old ones) - self.saveAccessToken(refreshed_token) - - return refreshed_token - else: - logger.warning(f"Failed to refresh expired access token for {authority}") - return None - else: - logger.warning(f"Access token for {authority} is expired (expiresAt: {latest_token.expiresAt})") - return None - - return latest_token - - except Exception as e: - logger.error(f"Error getting access token: {str(e)}") - return None - - def getConnectionToken(self, connectionId: str, auto_refresh: bool = True) -> Optional[Token]: - """Get the connection token for a specific connectionId, optionally auto-refresh if expired""" + def getConnectionToken(self, connectionId: str) -> Optional[Token]: + """Get the latest stored token for a specific connectionId (no refresh).""" try: # Validate connectionId if not connectionId: @@ -873,31 +828,7 @@ class AppObjects: tokens.sort(key=lambda x: x.get("expiresAt", 0), reverse=True) latest_token = Token(**tokens[0]) - # Check if token is expired or expires within 30 minutes - current_time = get_utc_timestamp() - thirty_minutes = 30 * 60 # 30 minutes in seconds - - if latest_token.expiresAt and latest_token.expiresAt < (current_time + thirty_minutes): - if auto_refresh: - # Import TokenManager here to avoid circular imports - from modules.security.tokenManager import TokenManager - token_manager = TokenManager() - - # Try to refresh the token - refreshed_token = token_manager.refresh_token(latest_token) - - if refreshed_token: - # Save the new token (which will automatically replace old ones) - self.saveConnectionToken(refreshed_token) - - logger.info(f"Proactively refreshed connection token for connectionId {connectionId} (expired in {latest_token.expiresAt - current_time} seconds)") - return refreshed_token - else: - logger.warning(f"Token refresh failed for connectionId {connectionId}") - return None - else: - logger.warning(f"Connection token for connectionId {connectionId} expires soon (expiresAt: {latest_token.expiresAt})") - return None + # No auto-refresh here. Callers should use a higher-level service to refresh when needed. return latest_token @@ -905,53 +836,6 @@ class AppObjects: logger.error(f"Error getting connection token for connectionId {connectionId}: {str(e)}") return None - def deleteAccessToken(self, authority: str) -> None: - """Delete all access tokens for the current user and authority""" - try: - # Validate user context - if not self.currentUser or not self.currentUser.id: - raise ValueError("No valid user context available for token deletion") - - # Get access tokens to delete (must NOT have connectionId) - tokens = self.db.getRecordset(Token, recordFilter={ - "userId": self.currentUser.id, - "authority": authority, - "connectionId": None # Ensure we only delete access tokens - }) - - # Delete each token - for token in tokens: - self.db.recordDelete(Token, token["id"]) - - - except Exception as e: - logger.error(f"Error deleting access token: {str(e)}") - raise - - def deleteConnectionTokenByConnectionId(self, connectionId: str) -> None: - """Delete all connection tokens for a specific connectionId""" - try: - # Validate connectionId - if not connectionId: - raise ValueError("connectionId is required for deleteConnectionTokenByConnectionId") - - # Get connection tokens to delete - tokens = self.db.getRecordset(Token, recordFilter={ - "connectionId": connectionId - }) - - # Delete each token - for token in tokens: - self.db.recordDelete(Token, token["id"]) - - - except Exception as e: - logger.error(f"Error deleting connection token for connectionId {connectionId}: {str(e)}") - raise - - # ===================== - # Token revocation (LOCAL gateway JWTs) - # ===================== def findActiveTokenById(self, tokenId: str, userId: str, authority: AuthAuthority, sessionId: str = None, mandateId: str = None) -> Optional[Token]: """Find an active access token by its id (jti) with optional session/tenant scoping.""" try: @@ -1088,7 +972,7 @@ class AppObjects: logger.error(f"Error during logout: {str(e)}") raise - # Data Neutralization methods + # Neutralization methods def getNeutralizationConfig(self) -> Optional[DataNeutraliserConfig]: """Get the data neutralization configuration for the current user's mandate""" @@ -1138,98 +1022,6 @@ class AppObjects: logger.error(f"Error creating/updating neutralization config: {str(e)}") raise ValueError(f"Failed to create/update neutralization config: {str(e)}") - def neutralizeText(self, text: str, file_id: Optional[str] = None) -> Dict[str, Any]: - """Neutralize text content and store attribute mappings""" - try: - from modules.services.serviceNeutralization.neutralizer import DataAnonymizer - - # Get neutralization configuration to extract namesToParse - config = self.getNeutralizationConfig() - names_to_parse = [] - if config and hasattr(config, 'namesToParse') and config.namesToParse: - # Split by newlines and filter out empty strings - names_to_parse = [name.strip() for name in config.namesToParse.split('\n') if name.strip()] - - # Initialize anonymizer with custom names - anonymizer = DataAnonymizer(names_to_parse=names_to_parse) - - # Process the text - result = anonymizer.process_content(text, 'text') - - # Store attribute mappings in database - stored_attributes = [] - for original_text, neutralized_text in result.mapping.items(): - # Extract pattern type and UUID from the neutralized text format [type.uuid] - pattern_type = "unknown" - placeholder_uuid = None - - if neutralized_text.startswith("[") and "." in neutralized_text and neutralized_text.endswith("]"): - # Extract type and UUID from [type.uuid] format - inner = neutralized_text[1:-1] # Remove [ and ] - if "." in inner: - pattern_type, placeholder_uuid = inner.split(".", 1) - - # Check if this exact original text already has a placeholder in the database - existing_attribute = self.getExistingPlaceholder(original_text) - - if existing_attribute: - # Reuse existing placeholder - existing_uuid = existing_attribute.id - existing_pattern_type = existing_attribute.patternType - - # Update the neutralized text to use the existing UUID - result.data = result.data.replace(neutralized_text, f"[{existing_pattern_type}.{existing_uuid}]") - result.mapping[original_text] = f"[{existing_pattern_type}.{existing_uuid}]" - - stored_attributes.append(existing_attribute) - else: - # Create new attribute record with the UUID that the neutralizer generated - attribute_data = { - "id": placeholder_uuid, # Use the UUID from the neutralizer - "mandateId": self.mandateId, - "userId": self.userId, - "originalText": original_text, - "fileId": file_id, - "patternType": pattern_type - } - - attribute = DataNeutralizerAttributes.from_dict(attribute_data) - created_attribute = self.db.recordCreate(DataNeutralizerAttributes, attribute) - stored_attributes.append(created_attribute) - - - # The neutralized text is already in the correct [type.uuid] format - # No need to replace it, as it's already properly formatted - - return { - "neutralized_text": result.data, - "attributes": stored_attributes, - "mapping": result.mapping, - "replaced_fields": result.replaced_fields, - "processed_info": result.processed_info - } - - except Exception as e: - logger.error(f"Error neutralizing text: {str(e)}") - raise ValueError(f"Failed to neutralize text: {str(e)}") - - def getExistingPlaceholder(self, original_text: str) -> Optional[DataNeutralizerAttributes]: - """Get existing placeholder for original text if it exists""" - try: - existing_attributes = self.db.getRecordset(DataNeutralizerAttributes, recordFilter={ - "mandateId": self.mandateId, - "userId": self.userId, - "originalText": original_text - }) - - if existing_attributes: - return DataNeutralizerAttributes.from_dict(existing_attributes[0]) - return None - - except Exception as e: - logger.error(f"Error getting existing placeholder: {str(e)}") - return None - def getNeutralizationAttributes(self, file_id: Optional[str] = None) -> List[DataNeutralizerAttributes]: """Get neutralization attributes, optionally filtered by file ID""" try: @@ -1246,35 +1038,6 @@ class AppObjects: logger.error(f"Error getting neutralization attributes: {str(e)}") return [] - def resolveNeutralizedText(self, text: str) -> str: - """Resolve UIDs in neutralized text back to original text""" - try: - # Find all placeholders in the new format [type.uuid] - placeholder_pattern = r'\[([a-z]+)\.([a-f0-9-]{36})\]' - matches = re.findall(placeholder_pattern, text) - - resolved_text = text - for placeholder_type, uid in matches: - # Find the attribute with this UID (which is the record ID) - attributes = self.db.getRecordset(DataNeutralizerAttributes, recordFilter={ - "mandateId": self.mandateId, - "id": uid - }) - - if attributes: - attribute = attributes[0] - # Replace placeholder with original text - placeholder = f"[{placeholder_type}.{uid}]" - resolved_text = resolved_text.replace(placeholder, attribute["originalText"]) - else: - logger.warning(f"No attribute found for UID {uid}") - - return resolved_text - - except Exception as e: - logger.error(f"Error resolving neutralized text: {str(e)}") - return text - def deleteNeutralizationAttributes(self, file_id: str) -> bool: """Delete all neutralization attributes for a specific file""" try: diff --git a/modules/interfaces/interfaceTicketObjects.py b/modules/interfaces/interfaceTicketObjects.py index 73894ab8..28dd3c3d 100644 --- a/modules/interfaces/interfaceTicketObjects.py +++ b/modules/interfaces/interfaceTicketObjects.py @@ -6,7 +6,7 @@ import pandas as pd import openpyxl from modules.shared.timezoneUtils import get_utc_now -from modules.connectors.connectorSharepoint import ConnectorSharepoint +from modules.services.serviceSharepoint.mainSharepoint import SharepointService from modules.interfaces.interfaceTicketModel import TicketBase, Task @@ -14,7 +14,7 @@ from modules.interfaces.interfaceTicketModel import TicketBase, Task @dataclass(slots=True) class TicketSharepointSyncInterface: connector_ticket: TicketBase - connector_sharepoint: ConnectorSharepoint + connector_sharepoint: SharepointService task_sync_definition: dict sync_folder: str sync_file: str @@ -26,7 +26,7 @@ class TicketSharepointSyncInterface: async def create( cls, connector_ticket: TicketBase, - connector_sharepoint: ConnectorSharepoint, + connector_sharepoint: SharepointService, task_sync_definition: dict, sync_folder: str, sync_file: str, @@ -700,7 +700,7 @@ class TicketSharepointSyncInterface: def _transform_tasks( self, tasks: list[Task], include_put: bool = False - ) -> list[Task]: + ) -> list[Task]: """Transforms tasks according to the task_sync_definition.""" transformed_tasks = [] diff --git a/modules/routes/routeDataNeutralization.py b/modules/routes/routeDataNeutralization.py index 939c4422..322d5398 100644 --- a/modules/routes/routeDataNeutralization.py +++ b/modules/routes/routeDataNeutralization.py @@ -7,7 +7,7 @@ from modules.security.auth import limiter, getCurrentUser # Import interfaces from modules.interfaces.interfaceAppModel import User, DataNeutraliserConfig, DataNeutralizerAttributes -from modules.features.neutralizePlayground.mainNeutralizePlayground import NeutralizationService +from modules.features.neutralization.mainNeutralizationPlayground import NeutralizationService # Configure logger logger = logging.getLogger(__name__) diff --git a/modules/routes/routeSecurityGoogle.py b/modules/routes/routeSecurityGoogle.py index 9cca2b3a..bf87259e 100644 --- a/modules/routes/routeSecurityGoogle.py +++ b/modules/routes/routeSecurityGoogle.py @@ -339,7 +339,7 @@ async def auth_callback(code: str, state: str, request: Request) -> HTMLResponse ) # Create JWT token data (like Microsoft does) - from modules.security.auth import createAccessToken + from modules.security.jwtService import createAccessToken jwt_token_data = { "sub": user.username, "mandateId": str(user.mandateId), @@ -637,29 +637,19 @@ async def verify_token( detail="No Google connection found for current user" ) - # Get the current token - current_token = appInterface.getConnectionToken(google_connection.id, auto_refresh=False) - + # Get a fresh token via TokenManager convenience method + from modules.security.tokenManager import TokenManager + current_token = TokenManager().getFreshToken(appInterface, google_connection.id) + if not current_token: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="No Google token found for this connection" ) - # Verify the token + # Verify the (fresh) token token_verification = await verify_google_token(current_token.tokenAccess) - if not token_verification.get("valid"): - # Try to refresh the token if verification failed - from modules.security.tokenManager import TokenManager - token_manager = TokenManager() - refreshed_token = token_manager.refresh_token(current_token) - - if refreshed_token: - appInterface.saveConnectionToken(refreshed_token) - # Verify the refreshed token - token_verification = await verify_google_token(refreshed_token.tokenAccess) - return { "valid": token_verification.get("valid", False), "scopes": token_verification.get("scopes", []), @@ -721,8 +711,9 @@ async def refresh_token( logger.debug(f"Found Google connection: {google_connection.id}, status={google_connection.status}") - # Get the token for this specific connection using the new method - current_token = appInterface.getConnectionToken(google_connection.id, auto_refresh=False) + # Get the token for this specific connection (fresh if expiring soon) + from modules.security.tokenManager import TokenManager + current_token = TokenManager().getFreshToken(appInterface, google_connection.id) if not current_token: raise HTTPException( @@ -731,38 +722,25 @@ async def refresh_token( ) + # If we could not obtain a fresh token, report error + if not current_token: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to refresh token") - # Always attempt refresh (as per your requirement) - from modules.security.tokenManager import TokenManager - token_manager = TokenManager() + # Update the connection status and timing + google_connection.expiresAt = float(current_token.expiresAt) if current_token.expiresAt else google_connection.expiresAt + google_connection.lastChecked = get_utc_timestamp() + google_connection.status = ConnectionStatus.ACTIVE + appInterface.db.recordModify(UserConnection, google_connection.id, google_connection.to_dict()) - refreshed_token = token_manager.refresh_token(current_token) - if refreshed_token: - # Save the new connection token (which will automatically replace old ones) - appInterface.saveConnectionToken(refreshed_token) - - # Update the connection's expiration time - google_connection.expiresAt = float(refreshed_token.expiresAt) - google_connection.lastChecked = get_utc_timestamp() - google_connection.status = ConnectionStatus.ACTIVE - - # Save updated connection - appInterface.db.recordModify(UserConnection, google_connection.id, google_connection.to_dict()) - - # Calculate time until expiration - current_time = get_utc_timestamp() - expires_in = int(refreshed_token.expiresAt - current_time) - - return { - "message": "Token refreshed successfully", - "expires_at": refreshed_token.expiresAt, - "expires_in_seconds": expires_in - } - else: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Failed to refresh token" - ) + # Calculate time until expiration + current_time = get_utc_timestamp() + expires_in = int(current_token.expiresAt - current_time) if current_token.expiresAt else 0 + + return { + "message": "Token refreshed successfully", + "expires_at": current_token.expiresAt, + "expires_in_seconds": expires_in + } except HTTPException: raise diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py index 7b396f77..017e7e90 100644 --- a/modules/routes/routeSecurityLocal.py +++ b/modules/routes/routeSecurityLocal.py @@ -13,7 +13,8 @@ from jose import jwt from pydantic import BaseModel # Import auth modules -from modules.security.auth import createAccessToken, createAccessTokenWithCookie, setRefreshTokenCookie, getCurrentUser, limiter, SECRET_KEY, ALGORITHM +from modules.security.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM +from modules.security.jwtService import createAccessToken, createRefreshToken, setAccessTokenCookie, setRefreshTokenCookie from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface from modules.interfaces.interfaceAppModel import User, UserInDB, AuthAuthority, UserPrivilege, Token from modules.shared.attributeUtils import ModelMixin @@ -91,11 +92,13 @@ async def login( session_id = str(uuid.uuid4()) token_data["sid"] = session_id - # Create access token with httpOnly cookie - access_token = createAccessTokenWithCookie(token_data, response) + # Create access token + set cookie + access_token, _access_expires = createAccessToken(token_data) + setAccessTokenCookie(response, access_token) - # Create refresh token with httpOnly cookie - refresh_token = setRefreshTokenCookie(token_data, response) + # Create refresh token + set cookie + refresh_token, _refresh_expires = createRefreshToken(token_data) + setRefreshTokenCookie(response, refresh_token) # Get expiration time for response try: @@ -287,8 +290,9 @@ async def refresh_token( "authenticationAuthority": currentUser.authenticationAuthority } - # Create new access token with cookie - access_token = createAccessTokenWithCookie(token_data, response) + # Create new access token + set cookie + access_token, _expires = createAccessToken(token_data) + setAccessTokenCookie(response, access_token) # Get expiration time try: diff --git a/modules/routes/routeSecurityMsft.py b/modules/routes/routeSecurityMsft.py index 8c2d8856..2b73db59 100644 --- a/modules/routes/routeSecurityMsft.py +++ b/modules/routes/routeSecurityMsft.py @@ -14,7 +14,8 @@ import httpx from modules.shared.configuration import APP_CONFIG from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface from modules.interfaces.interfaceAppModel import AuthAuthority, User, Token, ConnectionStatus, UserConnection -from modules.security.auth import getCurrentUser, limiter, createAccessToken +from modules.security.auth import getCurrentUser, limiter +from modules.security.jwtService import createAccessToken from modules.shared.attributeUtils import ModelMixin from modules.shared.timezoneUtils import get_utc_now, create_expiration_timestamp, get_utc_timestamp @@ -559,9 +560,9 @@ async def refresh_token( logger.debug(f"Found Microsoft connection: {msft_connection.id}, status={msft_connection.status}") - # Get the token for this specific connection using the new method - # Enable auto-refresh to handle expired tokens gracefully - current_token = appInterface.getConnectionToken(msft_connection.id, auto_refresh=True) + # Get a fresh token via TokenManager convenience method + from modules.security.tokenManager import TokenManager + current_token = TokenManager().getFreshToken(appInterface, msft_connection.id) if not current_token: raise HTTPException( diff --git a/modules/security/auth.py b/modules/security/auth.py index 5b882203..a457c60c 100644 --- a/modules/security/auth.py +++ b/modules/security/auth.py @@ -54,106 +54,7 @@ limiter = Limiter(key_func=get_remote_address) # Logger logger = logging.getLogger(__name__) -def createAccessToken(data: dict, expiresDelta: Optional[timedelta] = None) -> Tuple[str, datetime]: - """ - Creates a JWT Access Token. - - Args: - data: Data to encode (usually user ID or username) - expiresDelta: Validity duration of the token (optional) - - Returns: - Tuple of (JWT Token as string, expiration datetime) - """ - toEncode = data.copy() - # Ensure a token id (jti) exists for revocation tracking (only required for local, harmless otherwise) - if "jti" not in toEncode or not toEncode.get("jti"): - toEncode["jti"] = str(uuid.uuid4()) - - if expiresDelta: - expire = get_utc_now() + expiresDelta - else: - expire = get_utc_now() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) - - toEncode.update({"exp": expire}) - encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM) - - return encodedJwt, expire - -def createAccessTokenWithCookie(data: dict, response: Response, expiresDelta: Optional[timedelta] = None) -> str: - """ - Creates a JWT Access Token and sets it as an httpOnly cookie. - - Args: - data: Data to encode (usually user ID or username) - response: FastAPI Response object to set cookie - expiresDelta: Validity duration of the token (optional) - - Returns: - JWT Token as string - """ - access_token, expires_at = createAccessToken(data, expiresDelta) - - # Set httpOnly cookie - response.set_cookie( - key="auth_token", - value=access_token, - httponly=True, - secure=True, # HTTPS only in production - samesite="strict", - max_age=int(expiresDelta.total_seconds()) if expiresDelta else ACCESS_TOKEN_EXPIRE_MINUTES * 60 - ) - - return access_token - -def createRefreshToken(data: dict) -> Tuple[str, datetime]: - """ - Creates a JWT Refresh Token with longer expiration. - - Args: - data: Data to encode (usually user ID or username) - - Returns: - Tuple of (JWT Refresh Token as string, expiration datetime) - """ - toEncode = data.copy() - # Ensure a token id (jti) exists for revocation tracking - if "jti" not in toEncode or not toEncode.get("jti"): - toEncode["jti"] = str(uuid.uuid4()) - - # Add refresh token type - toEncode["type"] = "refresh" - - expire = get_utc_now() + timedelta(days=REFRESH_TOKEN_EXPIRE_DAYS) - toEncode.update({"exp": expire}) - encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM) - - return encodedJwt, expire - -def setRefreshTokenCookie(data: dict, response: Response) -> str: - """ - Creates a JWT Refresh Token and sets it as an httpOnly cookie. - - Args: - data: Data to encode (usually user ID or username) - response: FastAPI Response object to set cookie - - Returns: - JWT Refresh Token as string - """ - refresh_token, expires_at = createRefreshToken(data) - - # Set httpOnly cookie for refresh token - response.set_cookie( - key="refresh_token", - value=refresh_token, - httponly=True, - secure=True, # HTTPS only in production - samesite="strict", - max_age=REFRESH_TOKEN_EXPIRE_DAYS * 24 * 60 * 60 # Days to seconds - ) - - return refresh_token +# Note: JWT creation and cookie helpers moved to modules.security.jwtService def _getUserBase(token: str = Depends(cookieAuth)) -> User: """ diff --git a/modules/security/jwtService.py b/modules/security/jwtService.py new file mode 100644 index 00000000..5e09e63e --- /dev/null +++ b/modules/security/jwtService.py @@ -0,0 +1,72 @@ +""" +JWT Service +Centralizes local JWT creation and cookie helpers. +""" + +from datetime import timedelta +from typing import Optional, Tuple +from fastapi import Response +from jose import jwt + +from modules.shared.configuration import APP_CONFIG +from modules.shared.timezoneUtils import get_utc_now + +# Config +SECRET_KEY = APP_CONFIG.get("APP_JWT_KEY_SECRET") +ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM") +ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY")) +REFRESH_TOKEN_EXPIRE_DAYS = int(APP_CONFIG.get("APP_REFRESH_TOKEN_EXPIRY", "7")) + + +def createAccessToken(data: dict, expiresDelta: Optional[timedelta] = None) -> Tuple[str, "datetime"]: + """Create a JWT access token and return (token, expiresAt).""" + toEncode = data.copy() + if "jti" not in toEncode or not toEncode.get("jti"): + import uuid + toEncode["jti"] = str(uuid.uuid4()) + + expire = get_utc_now() + (expiresDelta if expiresDelta else timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)) + toEncode.update({"exp": expire}) + encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM) + return encodedJwt, expire + + +def createRefreshToken(data: dict) -> Tuple[str, "datetime"]: + """Create a JWT refresh token and return (token, expiresAt).""" + toEncode = data.copy() + if "jti" not in toEncode or not toEncode.get("jti"): + import uuid + toEncode["jti"] = str(uuid.uuid4()) + toEncode["type"] = "refresh" + + expire = get_utc_now() + timedelta(days=REFRESH_TOKEN_EXPIRE_DAYS) + toEncode.update({"exp": expire}) + encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM) + return encodedJwt, expire + + +def setAccessTokenCookie(response: Response, token: str, expiresDelta: Optional[timedelta] = None) -> None: + """Set access token as httpOnly cookie.""" + maxAge = int(expiresDelta.total_seconds()) if expiresDelta else ACCESS_TOKEN_EXPIRE_MINUTES * 60 + response.set_cookie( + key="auth_token", + value=token, + httponly=True, + secure=True, + samesite="strict", + max_age=maxAge + ) + + +def setRefreshTokenCookie(response: Response, token: str) -> None: + """Set refresh token as httpOnly cookie.""" + response.set_cookie( + key="refresh_token", + value=token, + httponly=True, + secure=True, + samesite="strict", + max_age=REFRESH_TOKEN_EXPIRE_DAYS * 24 * 60 * 60 + ) + + diff --git a/modules/security/tokenManager.py b/modules/security/tokenManager.py index c27c9939..92fa747f 100644 --- a/modules/security/tokenManager.py +++ b/modules/security/tokenManager.py @@ -6,7 +6,7 @@ Handles all token operations including automatic refresh for backend services. import logging import httpx from datetime import datetime -from typing import Optional, Dict, Any +from typing import Optional, Dict, Any, Callable from modules.interfaces.interfaceAppModel import Token, AuthAuthority from modules.shared.configuration import APP_CONFIG @@ -198,4 +198,66 @@ class TokenManager: except Exception as e: logger.error(f"Error refreshing token: {str(e)}") return None - \ No newline at end of file + + def ensure_fresh_token(self, token: Token, *, seconds_before_expiry: int = 30 * 60, save_callback: Optional[Callable[[Token], None]] = None) -> Optional[Token]: + """Ensure a token is fresh; refresh if expiring within threshold. + + Args: + token: Existing token to validate/refresh. + seconds_before_expiry: Threshold window to proactively refresh. + save_callback: Optional function to persist a refreshed token. + + Returns: + A fresh token (refreshed or original) or None if refresh failed. + """ + try: + if token is None: + return None + + now_ts = get_utc_timestamp() + expires_at = token.expiresAt or 0 + + # If token expires within the threshold, try to refresh + if expires_at and expires_at < (now_ts + seconds_before_expiry): + logger.info( + f"ensure_fresh_token: Token for connection {token.connectionId} expiring soon " + f"(in {max(0, expires_at - now_ts)}s). Attempting proactive refresh." + ) + refreshed = self.refresh_token(token) + if refreshed: + if save_callback is not None: + try: + save_callback(refreshed) + except Exception as e: + logger.warning(f"ensure_fresh_token: Failed to persist refreshed token: {e}") + return refreshed + else: + logger.warning("ensure_fresh_token: Token refresh failed") + return None + + # Token is sufficiently fresh + return token + except Exception as e: + logger.error(f"ensure_fresh_token: Error ensuring fresh token: {e}") + return None + + # Convenience wrapper to fetch and ensure fresh token for a connection via interface layer + def getFreshToken(self, interfaceApp, connectionId: str, secondsBeforeExpiry: int = 30 * 60) -> Optional[Token]: + """Return a fresh token for a connection, refreshing when expiring soon. + + Reads the latest stored token via interfaceApp.getConnectionToken, then + uses ensure_fresh_token to refresh if needed and persists the refreshed + token via interfaceApp.saveConnectionToken. + """ + try: + token = interfaceApp.getConnectionToken(connectionId) + if not token: + return None + return self.ensure_fresh_token( + token, + seconds_before_expiry=secondsBeforeExpiry, + save_callback=lambda t: interfaceApp.saveConnectionToken(t) + ) + except Exception as e: + logger.error(f"getFreshToken: Error fetching or refreshing token for connection {connectionId}: {e}") + return None \ No newline at end of file diff --git a/modules/security/tokenRefreshService.py b/modules/security/tokenRefreshService.py index 649960bc..5dcef46a 100644 --- a/modules/security/tokenRefreshService.py +++ b/modules/security/tokenRefreshService.py @@ -51,8 +51,8 @@ class TokenRefreshService: try: logger.debug(f"Refreshing Google token for connection {connection.id}") - # Get current token - current_token = interface.getConnectionToken(connection.id, auto_refresh=False) + # Get current token (no refresh in interface layer) + current_token = interface.getConnectionToken(connection.id) if not current_token: logger.warning(f"No Google token found for connection {connection.id}") return False @@ -100,8 +100,8 @@ class TokenRefreshService: try: logger.debug(f"Refreshing Microsoft token for connection {connection.id}") - # Get current token - current_token = interface.getConnectionToken(connection.id, auto_refresh=False) + # Get current token (no refresh in interface layer) + current_token = interface.getConnectionToken(connection.id) if not current_token: logger.warning(f"No Microsoft token found for connection {connection.id}") return False diff --git a/modules/services/__init__.py b/modules/services/__init__.py new file mode 100644 index 00000000..842a2176 --- /dev/null +++ b/modules/services/__init__.py @@ -0,0 +1,100 @@ +from typing import Any + +from modules.interfaces.interfaceAppModel import User +from modules.interfaces.interfaceChatModel import ChatWorkflow +from modules.services.serviceWorkflows.mainServiceWorkflows import WorkflowService + +class PublicService: + """Lightweight proxy exposing only public callable attributes of a target. + + - Hides names starting with '_' + - Optionally restricts to callables only + - Optional name_filter predicate for allow-list patterns + """ + + def __init__(self, target: Any, functions_only: bool = True, name_filter=None): + self._target = target + self._functions_only = functions_only + self._name_filter = name_filter + + def __getattr__(self, name: str): + if name.startswith('_'): + raise AttributeError(f"'{type(self._target).__name__}' attribute '{name}' is private") + if self._name_filter and not self._name_filter(name): + raise AttributeError(f"'{name}' not exposed by policy") + attr = getattr(self._target, name) + if self._functions_only and not callable(attr): + raise AttributeError(f"'{name}' is not a function") + return attr + + def __dir__(self): + names = [ + n for n in dir(self._target) + if not n.startswith('_') + and (not self._functions_only or callable(getattr(self._target, n, None))) + and (self._name_filter(n) if self._name_filter else True) + ] + return sorted(names) + + +class Services: + + def __init__(self, user: User, workflow: ChatWorkflow): + self.user: User = user + self.workflow: ChatWorkflow = workflow + + # Directly expose existing service modules + + from .serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService + self.document = PublicService(DocumentExtractionService(self)) + + from .serviceDocument.mainServiceDocumentGeneration import DocumentGenerationService + self.document = PublicService(DocumentGenerationService(self)) + + from .serviceNeutralization.mainNeutralization import NeutralizationService + self.neutralization = PublicService(NeutralizationService()) + + from .serviceSharepoint.mainSharepoint import SharePointService + self.sharepoint = PublicService(SharePointService(self)) + + from .serviceAi.mainServiceAi import AiService + self.ai = PublicService(AiService(self)) + + from .serviceWorkflows.mainServiceWorkflows import WorkflowService + self.workflow = PublicService(WorkflowService(self)) + + # Initialize chat interface for workflow operations + from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface + self.chatInterface = getChatInterface(user) + + # Chat interface wrapper methods + def getWorkflow(self, workflowId: str): + return self.chatInterface.getWorkflow(workflowId) + + def createWorkflow(self, workflowData: dict): + return self.chatInterface.createWorkflow(workflowData) + + def updateWorkflow(self, workflowId: str, workflowData: dict): + return self.chatInterface.updateWorkflow(workflowId, workflowData) + + def createMessage(self, messageData: dict): + return self.chatInterface.createMessage(messageData) + + def updateMessage(self, messageId: str, messageData: dict): + return self.chatInterface.updateMessage(messageId, messageData) + + def createLog(self, logData: dict): + return self.chatInterface.createLog(logData) + + def updateWorkflowStats(self, workflowId: str, bytesSent: int = 0, bytesReceived: int = 0, tokenCount: int = 0): + return self.chatInterface.updateWorkflowStats(workflowId, bytesSent, bytesReceived, tokenCount) + + @property + def mandateId(self): + return self.chatInterface.mandateId + + +def getInterface(user: User, workflow: ChatWorkflow) -> Services: + return Services(user, workflow) + + diff --git a/modules/services/serviceAi/mainServiceAi.py b/modules/services/serviceAi/mainServiceAi.py new file mode 100644 index 00000000..c7458756 --- /dev/null +++ b/modules/services/serviceAi/mainServiceAi.py @@ -0,0 +1,137 @@ +import logging +from typing import Dict, Any, List, Optional, Tuple + +from modules.interfaces.interfaceChatModel import ChatDocument +from modules.services.serviceDocument.documentExtraction import DocumentExtractionService +from modules.interfaces.interfaceAiModel import AiCallRequest, AiCallOptions +from modules.interfaces.interfaceAiObjects import AiObjects + + +logger = logging.getLogger(__name__) + + +# Model registry is now provided by interfaces via AiModels + + +class AiService: + """Centralized AI service orchestrating documents, model selection and failover. + + The concrete connector instances (OpenAI/Anthropic) are injected by the interface layer. + """ + + def __init__(self, aiObjects: AiObjects | None = None) -> None: + # Only depend on interfaces + self.aiObjects = aiObjects or AiObjects() + self.documentExtractor = DocumentExtractionService() + + async def callAi( + self, + prompt: str, + documents: Optional[List[ChatDocument]] = None, + processDocumentsIndividually: bool = False, + options: Optional[AiCallOptions] = None, + ) -> str: + try: + documentContent = "" + if documents: + documentContent = await self._processDocumentsForAi( + documents, + options.operationType if options else "general", + options.compressContext if options else True, + processDocumentsIndividually, + ) + + effectiveOptions = options or AiCallOptions() + request = AiCallRequest( + prompt=prompt, + context=documentContent or None, + options=effectiveOptions, + ) + + response = await self.aiObjects.call(request) + return response.content + except Exception as e: + logger.error(f"Error in centralized AI call: {str(e)}") + return f"Error: {str(e)}" + + # Model selection now handled by interface AiObjects + + # Cost estimation handled by interface for model selection + + async def _processDocumentsForAi( + self, + documents: List[ChatDocument], + operationType: str, + compressDocuments: bool, + processIndividually: bool, + ) -> str: + if not documents: + return "" + + processedContents: List[str] = [] + for doc in documents: + try: + extracted = await self.documentExtractor.processFileData( + doc.fileData, + doc.fileName, + doc.mimeType, + prompt=f"Extract relevant content for {operationType}", + documentId=doc.id, + enableAI=True, + ) + + docContent: List[str] = [] + for contentItem in extracted.contents: + if contentItem.data and contentItem.data.strip(): + docContent.append(contentItem.data) + + if docContent: + combinedDocContent = "\n\n".join(docContent) + if ( + compressDocuments + and len(combinedDocContent.encode("utf-8")) > 10000 + ): + combinedDocContent = await self._compressContent( + combinedDocContent, 10000, "document" + ) + processedContents.append( + f"Document: {doc.fileName}\n{combinedDocContent}" + ) + except Exception as e: + logger.warning( + f"Error processing document {doc.fileName}: {str(e)}" + ) + processedContents.append( + f"Document: {doc.fileName}\n[Error processing document: {str(e)}]" + ) + + return "\n\n---\n\n".join(processedContents) + + # Prompt/context optimization (compression) handled by interface + + async def _compressContent(self, content: str, targetSize: int, contentType: str) -> str: + if len(content.encode("utf-8")) <= targetSize: + return content + + try: + compressionPrompt = f""" + Komprimiere den folgenden {contentType} auf maximal {targetSize} Zeichen, + behalte aber alle wichtigen Informationen bei: + + {content} + + Gib nur den komprimierten Inhalt zurück, ohne zusätzliche Erklärungen. + """ + + # Service must not call connectors directly; use simple truncation fallback here + data = content.encode("utf-8") + return data[:targetSize].decode("utf-8", errors="ignore") + "... [truncated]" + except Exception as e: + logger.warning(f"AI compression failed, using truncation: {str(e)}") + return content[:targetSize] + "... [truncated]" + + # Failover logic now centralized in interface via model selection; service delegates a single call + + # Fallback selection moved to interface; service doesn't select models directly + + diff --git a/modules/services/serviceCenter.py b/modules/services/serviceCenter.py deleted file mode 100644 index 0b999772..00000000 --- a/modules/services/serviceCenter.py +++ /dev/null @@ -1,1206 +0,0 @@ -import logging -import importlib -import pkgutil -import inspect -import os -from typing import Dict, Any, List, Optional -from modules.interfaces.interfaceAppModel import User, UserConnection -from modules.interfaces.interfaceChatModel import ( - TaskStatus, ChatDocument, TaskItem, TaskAction, TaskResult, ChatStat, ChatLog, ChatMessage, ChatWorkflow, DocumentExchange, ExtractedContent -) -from modules.interfaces.interfaceAiCalls import AiCalls -from modules.interfaces.interfaceChatObjects import getInterface as getChatObjects -from modules.interfaces.interfaceChatModel import ActionResult -from modules.interfaces.interfaceComponentObjects import getInterface as getComponentObjects -from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects -from modules.services.serviceDocument.documentExtraction import DocumentExtraction -from modules.services.serviceDocument.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData -from modules.workflows.methods.methodBase import MethodBase -from modules.shared.timezoneUtils import get_utc_timestamp -import uuid - -import asyncio - -logger = logging.getLogger(__name__) - -class ServiceCenter: - """Service center that provides access to all services and their functions""" - - def __init__(self, currentUser: User, workflow: ChatWorkflow): - # Core services - self.user = currentUser - self.workflow = workflow - self.tasks = workflow.tasks - self.statusEnums = TaskStatus - self.currentTask = None # Initialize current task as None - - # Initialize managers - self.interfaceChat = getChatObjects(currentUser) - self.interfaceComponent = getComponentObjects(currentUser) - self.interfaceApp = getAppObjects(currentUser) - self.interfaceAiCalls = AiCalls() - self.documentProcessor = DocumentExtraction(self) - - # Initialize methods catalog - self.methods = {} - # Discover additional methods - self._discoverMethods() - - def _discoverMethods(self): - """Dynamically discover all method classes and their actions in modules methods package""" - try: - # Import the methods package - methodsPackage = importlib.import_module('modules.workflows.methods') - - # Discover all modules in the package - for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__): - if not isPkg and name.startswith('method'): - try: - # Import the module - module = importlib.import_module(f'modules.workflows.methods.{name}') - - # Find all classes in the module that inherit from MethodBase - for itemName, item in inspect.getmembers(module): - if (inspect.isclass(item) and - issubclass(item, MethodBase) and - item != MethodBase): - # Instantiate the method - methodInstance = item(self) - - # Discover actions from public methods - actions = {} - for methodName, method in inspect.getmembers(type(methodInstance), predicate=inspect.iscoroutinefunction): - if not methodName.startswith('_'): - # Bind the method to the instance - bound_method = method.__get__(methodInstance, type(methodInstance)) - sig = inspect.signature(method) - params = {} - for paramName, param in sig.parameters.items(): - if paramName not in ['self']: - # Get parameter type - paramType = param.annotation if param.annotation != param.empty else Any - - # Get parameter description from docstring or default - paramDesc = None - if param.default != param.empty and hasattr(param.default, '__doc__'): - paramDesc = param.default.__doc__ - - params[paramName] = { - 'type': paramType, - 'required': param.default == param.empty, - 'description': paramDesc, - 'default': param.default if param.default != param.empty else None - } - - actions[methodName] = { - 'description': method.__doc__ or '', - 'parameters': params, - 'method': bound_method - } - - # Add method instance with discovered actions - self.methods[methodInstance.name] = { - 'instance': methodInstance, - 'description': methodInstance.description, - 'actions': actions - } - logger.info(f"Discovered method: {methodInstance.name} with {len(actions)} actions") - - except Exception as e: - logger.error(f"Error loading method module {name}: {str(e)}", exc_info=True) - - except Exception as e: - logger.error(f"Error discovering methods: {str(e)}") - - - - # ===== Functions for Prompts: Context ===== - - def getMethodsList(self) -> List[str]: - """Get list of available methods with their signatures in the required format""" - methodList = [] - for methodName, method in self.methods.items(): - methodInstance = method['instance'] - for actionName, action in method['actions'].items(): - # Use the new signature format from MethodBase - signature = methodInstance.getActionSignature(actionName) - if signature: - methodList.append(signature) - return methodList - - async def summarizeChat(self, messages: List[ChatMessage]) -> str: - """ - Summarize chat messages from last to first message with status="first" - - Args: - messages: List of chat messages to summarize - - Returns: - str: Summary of the chat in user's language - """ - try: - # Get messages from last to first, stopping at first message with status="first" - relevantMessages = [] - for msg in reversed(messages): - relevantMessages.append(msg) - if msg.status == "first": - break - - # Create prompt for AI - prompt = f"""You are an AI assistant providing a summary of a chat conversation. -Please respond in '{self.user.language}' language. - -Chat History: -{chr(10).join(f"- {msg.message}" for msg in reversed(relevantMessages))} - -Instructions: -1. Summarize the conversation's key points and outcomes -2. Be concise but informative -3. Use a professional but friendly tone -4. Focus on important decisions and next steps if any - -Please provide a comprehensive summary of this conversation.""" - - # Get summary using AI - return await self.callAiTextBasic(prompt) - - except Exception as e: - logger.error(f"Error summarizing chat: {str(e)}") - return f"Error summarizing chat: {str(e)}" - - # ===== Functions for Prompts + Actions: Document References generation and resolution ===== - - def getEnhancedDocumentContext(self) -> str: - """Get enhanced document context formatted for action planning prompts with proper docList and docItem references""" - try: - document_list = self.getDocumentReferenceList() - - # Build technical context string for AI action planning - context = "AVAILABLE DOCUMENTS:\n\n" - - # Process chat exchanges (current round) - if document_list["chat"]: - context += "CURRENT ROUND DOCUMENTS:\n" - for exchange in document_list["chat"]: - # Generate docList reference for the exchange (using message ID and label) - # Find the message that corresponds to this exchange - message_id = None - for message in self.workflow.messages: - if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel: - message_id = message.id - break - - if message_id: - doc_list_ref = f"docList:{message_id}:{exchange.documentsLabel}" - else: - # Fallback to label-only format if message ID not found - doc_list_ref = f"docList:{exchange.documentsLabel}" - - logger.debug(f"Using document label for action planning: {exchange.documentsLabel} (message_id: {message_id})") - context += f"- {doc_list_ref} contains:\n" - # Generate docItem references for each document in the list - for doc_ref in exchange.documents: - if doc_ref.startswith("docItem:"): - context += f" - {doc_ref}\n" - else: - # Convert to proper docItem format if needed - context += f" - docItem:{doc_ref}\n" - context += "\n" - - # Process history exchanges (previous rounds) - if document_list["history"]: - context += "WORKFLOW HISTORY DOCUMENTS:\n" - for exchange in document_list["history"]: - # Generate docList reference for the exchange (using message ID and label) - # Find the message that corresponds to this exchange - message_id = None - for message in self.workflow.messages: - if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel: - message_id = message.id - break - - if message_id: - doc_list_ref = f"docList:{message_id}:{exchange.documentsLabel}" - else: - # Fallback to label-only format if message ID not found - doc_list_ref = f"docList:{exchange.documentsLabel}" - - logger.debug(f"Using history document label for action planning: {exchange.documentsLabel} (message_id: {message_id})") - context += f"- {doc_list_ref} contains:\n" - # Generate docItem references for each document in the list - for doc_ref in exchange.documents: - if doc_ref.startswith("docItem:"): - context += f" - {doc_ref}\n" - else: - # Convert to proper docItem format if needed - context += f" - docItem:{doc_ref}\n" - context += "\n" - - if not document_list["chat"] and not document_list["history"]: - context += "NO DOCUMENTS AVAILABLE - This workflow has no documents to process.\n" - - return context - - except Exception as e: - logger.error(f"Error generating enhanced document context: {str(e)}") - return "NO DOCUMENTS AVAILABLE - Error generating document context." - - def getDocumentReferenceList(self) -> Dict[str, List[DocumentExchange]]: - """Get list of document exchanges with new labeling format, sorted by recency""" - # Collect all documents first and refresh their attributes - all_documents = [] - for message in self.workflow.messages: - if message.documents: - all_documents.extend(message.documents) - - # Refresh file attributes for all documents - if all_documents: - self._refreshDocumentFileAttributes(all_documents) - - chat_exchanges = [] - history_exchanges = [] - - # Process messages in reverse order; "first" marks boundary - in_current_round = True - for message in reversed(self.workflow.messages): - is_first = message.status == "first" if hasattr(message, 'status') else False - - # Build a DocumentExchange if message has documents - doc_exchange = None - if message.documents: - if message.actionId and message.documentsLabel: - # Validate that we use the same label as in the message - validated_label = self._validateDocumentLabelConsistency(message) - - # Use the message's actual documentsLabel - doc_refs = [] - for doc in message.documents: - doc_ref = self._getDocumentReferenceFromChatDocument(doc, message) - doc_refs.append(doc_ref) - - doc_exchange = DocumentExchange( - documentsLabel=validated_label, - documents=doc_refs - ) - else: - # Generate new labels for documents without explicit labels - doc_refs = [] - for doc in message.documents: - doc_ref = self._getDocumentReferenceFromChatDocument(doc, message) - doc_refs.append(doc_ref) - - if doc_refs: - # Create a label based on message context - context_prefix = self._generateWorkflowContextPrefix(message) - context_label = f"{context_prefix}_context" - - doc_exchange = DocumentExchange( - documentsLabel=context_label, - documents=doc_refs - ) - - # Append to appropriate container based on boundary - if doc_exchange: - if in_current_round: - chat_exchanges.append(doc_exchange) - else: - history_exchanges.append(doc_exchange) - - # Flip boundary after including the "first" message in chat - if in_current_round and is_first: - in_current_round = False - - # Sort by recency: most recent first, then current round, then earlier rounds - # Sort chat exchanges by message sequence number (most recent first) - chat_exchanges.sort(key=lambda x: self._getMessageSequenceForExchange(x), reverse=True) - # Sort history exchanges by message sequence number (most recent first) - history_exchanges.sort(key=lambda x: self._getMessageSequenceForExchange(x), reverse=True) - - return { - "chat": chat_exchanges, - "history": history_exchanges - } - - def _refreshDocumentFileAttributes(self, documents: List[ChatDocument]) -> None: - """Update file attributes (fileName, fileSize, mimeType) for documents""" - for doc in documents: - try: - file_item = self.interfaceComponent.getFile(doc.fileId) - if file_item: - doc.fileName = file_item.fileName - doc.fileSize = file_item.fileSize - doc.mimeType = file_item.mimeType - else: - logger.warning(f"File not found for document {doc.id}, fileId: {doc.fileId}") - except Exception as e: - logger.error(f"Error refreshing file attributes for document {doc.id}: {e}") - - def _generateWorkflowContextPrefix(self, message: ChatMessage) -> str: - """Generate workflow context prefix: round{num}_task{num}_action{num}""" - round_num = message.roundNumber if hasattr(message, 'roundNumber') else 1 - task_num = message.taskNumber if hasattr(message, 'taskNumber') else 0 - action_num = message.actionNumber if hasattr(message, 'actionNumber') else 0 - return f"round{round_num}_task{task_num}_action{action_num}" - - def _getDocumentReferenceFromChatDocument(self, document: ChatDocument, message: ChatMessage) -> str: - """Get document reference using document ID and filename.""" - try: - # Use document ID and filename for simple reference - return f"docItem:{document.id}:{document.fileName}" - except Exception as e: - logger.error(f"Critical error creating document reference for document {document.id}: {str(e)}") - # Re-raise the error to prevent workflow from continuing with invalid data - raise - - def _getMessageSequenceForExchange(self, exchange: DocumentExchange) -> int: - """Get message sequence number for sorting exchanges by recency""" - try: - # Extract message ID from the first document reference - if exchange.documents and len(exchange.documents) > 0: - first_doc_ref = exchange.documents[0] - if first_doc_ref.startswith("docItem:"): - # docItem::