full ai center integration test and fix with extraction and generation engine

This commit is contained in:
ValueOn AG 2025-10-12 14:52:42 +02:00
parent dedee0ecda
commit d9ee4d9605
27 changed files with 1158 additions and 254 deletions

View file

@ -0,0 +1,90 @@
# Development Environment Configuration
# System Configuration
APP_ENV_TYPE = dev
APP_ENV_LABEL = Development Instance Patrick
APP_API_URL = http://localhost:8000
APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/key.txt
APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEeFFtRGtQeVUtcjlrU3dab1ZxUm9WSks0MlJVYUtERFlqUElHemZrOGNENk1tcmJNX3Vxc01UMDhlNU40VzZZRVBpUGNmT3podzZrOGhOeEJIUEt4eVlSWG5UYXA3d09DVXlLT21Kb1JYSUU9
APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERzZjNm56WGVBdjJTeG5Udjd6OGQwUVotYXUzQjJ1YVNyVXVBa3NZVml3ODU0MVNkZjhWWmJwNUFkc19BcHlHMTU1Q3BRcHU0cDBoZkFlR2l6UEZQU3d2U3MtMDh5UDZteGFoQ0EyMUE1ckE9
# PostgreSQL Storage (new)
DB_APP_HOST=localhost
DB_APP_DATABASE=poweron_app
DB_APP_USER=poweron_dev
DB_APP_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEcUIxNEFfQ2xnS0RrSC1KNnUxTlVvTGZoMHgzaEI4Z3NlVzVROTVLak5Ubi1vaEZubFZaMTFKMGd6MXAxekN2d2NvMy1hRjg2UVhybktlcFA5anZ1WjFlQmZhcXdwaGhWdzRDc3ExeUhzWTg9
DB_APP_PORT=5432
# PostgreSQL Storage (new)
DB_CHAT_HOST=localhost
DB_CHAT_DATABASE=poweron_chat
DB_CHAT_USER=poweron_dev
DB_CHAT_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERFNzNVhoalpCR0QxYXAwdEpXWXVVOTdZdWtqWW5FNXFGcFl2amNYLWYwYl9STXltRlFxLWNzVWlMVnNYdXk0RklnRExFT0FaQjg2aGswNnhhSGhCN29KN2VEb2FlUV9NTlV3b0tLelplSVU9
DB_CHAT_PORT=5432
# PostgreSQL Storage (new)
DB_MANAGEMENT_HOST=localhost
DB_MANAGEMENT_DATABASE=poweron_management
DB_MANAGEMENT_USER=poweron_dev
DB_MANAGEMENT_PASSWORD_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEUldqSTVpUnFqdGhITDYzT3RScGlMYVdTMmZhOXdudDRCc3dhdllOd3l6MS1vWHY2MjVsTUF1Sk9saEJOSk9ONUlBZjQwb2c2T1gtWWJhcXFzVVVXd01xc0U0b0lJX0JyVDRxaDhNS01JcWs9
DB_MANAGEMENT_PORT=5432
# Security Configuration
APP_JWT_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERjlrSktmZHVuQnJ1VVJDdndLaUcxZGJsT2ZlUFRlcFdOZ001RnlzM2FhLWhRV2tjWWFhaWQwQ3hkcUFvbThMcndxSjFpYTdfRV9OZGhTcksxbXFTZWg5MDZvOHpCVXBHcDJYaHlJM0tyNWRZckZsVHpQcmxTZHJoZUs1M3lfU2ljRnJaTmNSQ0w0X085OXI0QW80M2xfQnJqZmZ6VEh3TUltX0xzeE42SGtZPQ==
APP_TOKEN_EXPIRY=300
# CORS Configuration
APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net
# Logging configuration
APP_LOGGING_LOG_LEVEL = DEBUG
APP_LOGGING_LOG_DIR = D:/Athi/Local/Web/poweron/local/logs
APP_LOGGING_FORMAT = %(asctime)s - %(levelname)s - %(name)s - %(message)s
APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S
APP_LOGGING_CONSOLE_ENABLED = True
APP_LOGGING_FILE_ENABLED = True
APP_LOGGING_ROTATION_SIZE = 10485760
APP_LOGGING_BACKUP_COUNT = 5
# Service Redirects
Service_MSFT_REDIRECT_URI = http://localhost:8000/api/msft/auth/callback
Service_GOOGLE_REDIRECT_URI = http://localhost:8000/api/google/auth/callback
# OpenAI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEajBuZmtYTVdqLTBpQm9KZ2pCXzRCV3VhZzlYTEhKb1FqWXNrV3lyb25uZUN1WVVQUEY3dGYtejludV9MNGlKeVREanZGOGloV09mY2ttQ3k5SjBFOGFac2ZQTkNKNUZWVnRINVQyeWhsR2wyYnVrRDNzV2NqSHB0ajQ4UWtGeGZtbmR0Q3VvS0hDZlphVmpSc2Z6RG5nPT0=
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
Connector_AiOpenai_MAX_TOKENS = 2000
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
Connector_AiAnthropic_MAX_TOKENS = 2000
# Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = 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
Connector_AiPerplexity_MODEL_NAME = sonar
Connector_AiPerplexity_TEMPERATURE = 0.2
Connector_AiPerplexity_MAX_TOKENS = 2000
# Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
Service_MSFT_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQk4xYnpmbnItUEU3dHU4eHB5dzVYay1WT012RTRLUWJDTlBILVY5dC1FX3VMNjZmLThrbDRFNWFSNGprY3RRTlpYNGlubVBpNnY3MjNJcGtzVk9PMzRacl9LUlM2RU5vTVVZWHJvaUhWSHVfc1pNR0pfQmI5SEprOG5KdlB1QnQ=
Service_MSFT_TENANT_ID = common
# Google Service configuration
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
Service_GOOGLE_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM=
# Tavily Web Search configuration
Connector_WebTavily_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI=
# Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETk5FWWM3Q0JKMzhIYTlyMkhuNjA4NlF4dk82U2NScHhTVGY3UG83NkhfX3RrcWVtWWcyLXRjU1dTT21zWEl6YWRMMUFndXpsUnJOeHh3QThsNDZKRXROTzdXRUdsT0JZajZJNVlfb0gtMXkwWm9DOERPVnpjU0pyUEZfOGJsUnprT3ltMVVhalUyUm9hMUFtZEtHUnJqOGZ4dEZjZm5SWVVTckVCWnY1UkdVSHVmUlgwbnAyc0xDQW84R3ViSko5OHVCVWZRUVNiaG1pVFB6X3EwS0FPd2dUYjhiSmRjcXh2WEZiXzI4SFZqT21tbDduUWRyVWdFZXpmcVM5ZDR0VWtzZnF5UER6cGwwS2JlLV9CSTZ0Z0IyQ1h0YW9TcmhRTXZEckp4bWhmTkt6UTNYMk4zVkpnbUJmaDIxZnoyR2dWTEYwTUFEV0w2eUdUUGpoZk9XRkt4RVF1Z1NPdUpBeTcyWV9PY1Ffd2s0ZEdVekxGekhoeEl4TmNqaXYtbUJuSVdycFducERWdWtZajZnX011Q2w4eE9VMTBqQ1ZxRmdScWhXY1E3WWhzX1JZcHhxam9FbDVPN3Q1MWtrMUZuTUg3LVFQVHp1T1hpQWNDMzEzekVJWk9ybl91YUVjSkFob1VaMi1ONEtuMnRSOEg1S3QybUMwbVZDejItajBLTjM2Zy1hNzZQMW5LLVVDVGdFWm5BZUxNeEFnUkZzU3dxV0lCUlc0LWo4b05GczVpOGZSV2ZxbFBwUml6OU5tYjdnTks3Y3hrVEZVTHlmc1NPdFh4WE5pWldEZklOQUxBbjBpMTlkX3FFQVJ6c2NSZGdzTThycE92VW82enZKamhiRGFnU25aZGlHZHhZd2lUUmhuTVptNjhoWVlJQkxIOEkzbzJNMjZCZFJyM25tdXBnQ2ZWaHV3b2p6UWJpdk9xUEhBc1dyTlNmeF9wbm5yYUhHV01UZnVXWDFlNzBkdXlWUWhvcmJpSmljbmE3LUpUZEg4VzRwZ2JVSjdYUm1sODViQXVxUzdGTmZFbVpiN2V1YW5XV3U4b2VRWmxldGVGVHZsSldoekhVLU9wZ2V0cGZIYkNqM2pXVGctQVAyUm4xTHhpd1VVLXFhcnVEV21Rby1hbTlqTl84TjVveHdYTExUVkhHQ0ltaTB2WXJnY1NQVE5PbWg3ejgySElYc1JSTlQ3NDlFUWR6STZVUjVqaXFRN200NF9LY1ljQ0R2UldlWUtKY1NQVnJ4QXRyYTBGSWVuenhyM0Z0cWtndTd1eG8xRzY5a2dNZ1hkQm5MV3BHVzA2N1QwUkd6WlRGYTZQOUhnVWQ2S0Y5U0s1dXFNVXh5Q2pLWVUxSUQ2MlR1ak52NmRIZ2hlYTk1SGZGWS1RV3hWVU9rR3d1Rk9MLS11REZXbzhqMHpsSm1HYW1jMUNLT29YOHZsRWNaLTVvOFpmT3l3MHVwaERTT0dNLWFjcGRYZ25qT2szTkVFUnRFR3JWYS1aNXFIRnMyalozTlQzNFF2NXJLVHVPVF9zdTF6ZjlkbzJ4RFc2ZENmNFFxZDZzTzhfMUl0bW96V0lPZkh1dXFYZlEteFBlSG84Si1FNS1TTi1OMkFnX2pOYW8xY3MxMVJnVC02MDUyaXZfMEVHWDQtVlRpcENmV0h3V0dCWEFRS2prQXdNRlQ5dnRFVHU0Q1dNTmh0SlBCaU55bFMydWM1TTFFLW96ODBnV3dNZHFZTWZhRURYSHlrdzF3RlRuWDBoQUhSOUJWemtRM3pxcDJFbGJoaTJ3ZktRTlJxbXltaHBoZXVJVDlxS3cxNWo2c0ZBV0NzaUstRWdsMW1xLXFkanZGYUFiU0tSLXFQa0tkcDFoMV9kak41ZjQ0R214UmtOR1ZBanRuemY3Mmw1SkZ5aDZodGIzT3N2aV85MW9kcld6c0g0ZDgtTWo3b3Y3VjJCRnR2U2tMVm9rUXNVRnVHbzZXVTZ6RmI2RkNmajBfMWVnODVFbnpkT0oyci15czJHU0p1cUowTGZJMzVnd3hIRjQyTVhKOGRkcFRKdVpyQ3Yzd01Jb1lSajFmV0paeEV0cjk1SmpmdWpDVFJMUmMtUFctOGhaTmlKQXNRVlVUNlhJemxudHZCR056SVlBb3NOTEYxRTRLaFlVd2d3TWtxVlB6ZEtQLTkxOGMyY3N0a2pYRFUweDBNaGhja2xSSklPOUZla1dKTWRNbG8tUGdSNEV5cW90OWlOZFlIUExBd3U2b2hyS1owbXVMM3p0Qm41cUtzWUxYNzB1N3JpUTNBSGdsT0NuamNTb1lIbXR4MG1sakNPVkxBUXRLVE1xX0YxWDhOcERIY1lTQVFqS01CaXZKNllFaXlIR0JsM1pKMmV1OUo3TGI1WkRaVnYxUTl1LTM0SU1qN1V1b0RCT0x0VHNLTmNLZnk1S0MxYnBBcm03WnVua0xqaEhGUzhOU253ZkppRzdudXBSVlMxeFVOSWxtZ1o2RVBSQUhEUEFuQ1hxSVZMME4yWUtaU3VyRGo3RkUyRUNjT0pNcE1BdE1ZRzdXVl8ydUtXZjdMdHdEVW4teHUtTi1HSGliLUxud21TX0NtcGVkRFBHNkZ1WTlNczR4OUJfUVluc1BoV09oWS1scUdsNnB5d1U5M1huX3k4QzAyNldtb2hybktYN2xKZ1NTNWFsaWwzV3pCRVhkaGR5eTNlV1d6ZzFfaFZTT0E4UjRpQ3pKdEZxUlJ6UFZXM3laUndyWEk2NlBXLUpoajVhZzVwQXpWVzUtVjVNZFBwdWdQa3AxZC1KdGdqNnhibjN4dmFYb2cxcEVwc1g5R09zRUdINUZtOE5QRjVUU0dpZy1QVl9odnFtVDNuWFZLSURtMXlSMlhRNTBWSVFJbEdOOWpfVWV0SmdRWDdlUXZZWE8xRUxDN1I0aEN6MHYwNzM1cmpJS0ZpMnBYWkxfb3FsbEV1VnlqWGxqdVJ6SHlwSjAzRlMycTBaQ295NXNnZERpUnJQcjhrUUd3bkI4bDVzRmxQblhkaFJPTTdISnVUQmhET3BOMTM4bjVvUEc2VmZhb2lrR1FyTUl2RWNEeGg0U0dsNnV6eU5zOUxiNDY5SXBxR0hBS00wOTgyWTFnWkQyaEtLVUloT3ZxZGh0RWVGRmJzenFsaUtfZENQM0JzdkVVeTdXR3hUSmJST1NBMUI1NkVFWncwNW5JZVVLX1p1RXdqVnFfQWpvQ08yQjZhN1NkTkpTSnUxOVRXZXE0WFEtZWxhZW1NNXYtQ2sya0VGLURmS01lMkctNVY3c2ZhN0ZGRFgwWHlabTFkeS1hcUZ1dDZ3cnpPQ3hha2IzVE11M0pqbklmU0diczBqTFBNZC1QZGp6VzNTSnJVSjJoWkJUQjVORG4tYUJmMEJtSUNUdVpEaGt6OTM3TjFOdVhXUHItZjRtZ25nU3NhZC1sVTVXNTRDTmxZbnlfeHNsdkpuMXhUYnE1MnpVQ0ZOclRWM1M4eHdXTzRXbFRZZVQtTS1iRVdXVWZMSGotcWg3MUxUYTFnSEEtanBCRHlZRUNIdGdpUFhsYjdYUndCZnRITzhMZVJ1dHFoVlVNb0duVjlxd0U4OGRuQVV3MG90R0hiYW5MWkxWVklzbWFRNzBfSUNrdzc5bVdtTXg0dExEYnRCaDI3c1I4TWFwLXZKR0wxSjRZYjZIV3ZqZjNqTWhFT0RGSDVMc1A1UzY2bDBiMGFSUy1fNVRQRzRJWDVydUpqb1ZfSHNVbldVeUN2YlAxSW5WVDdxVzJ1WHpLeUdmb0xWMDNHN05oQzY3YnhvUUdhS2xaOHNidkVvbTZtSHFlblhOYmwyR3NQdVJDRUdxREhWdF9ZcXhwUWxHc2hyLW5vUGhIUVhJNUNhY0hFU0ptVnI0TFVhZDE1TFBBUEstSkRoZWJ5MHJhUmZrR1ZrRlFtRGpxS1pOMmFMQjBsdjluY3FiYUU4eGJVVXlZVEpuNWdHVVhJMGtwaTdZR2NDbXd2eHpOQ09SeTV6N1BaVUpsR1pQVDBZcElJUUt6VnVpQmxSYnE4Y1BCWV9IRWdVV0p3enBGVHItdnBGN3NyNWFBWmkySnByWThsbDliSlExQmp3LVlBaDIyZXp6UnR6cU9rTzJmTDBlSVpON0tiWllMdm1oME1zTFl2S2ZYYllhQlY2VHNZRGtHUDY4U1lIVExLZTU4VzZxSTZrZHl1ZTBDc0g4SjI4WGYyZHV1bm9wQ3R2Z09ld1ZmUkN5alJGeHZKSHl1bWhQVXpNMzdjblpLcUhfSm02Qlh5S1FVN3lIcHl0NnlRPT0=
# Feature SyncDelta JIRA configuration
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJKbUwyRW5kMnRaNW4wM2YxMkJUTXVXZUdmdVRCaUZIVHU2TTV2RWZLRmUtZkcwZE4yRUNlNDQ0aUJWYjNfdVg5YjV5c2JwMHhoUUYxZWdkeS11bXR0eGxRLWRVaVU3cUVQZWJlNDRtY1lWUDdqeDVFSlpXS0VFX21WajlRS3lHQjc0bS11akkybWV3QUFlR2hNWUNYLUdiRjZuN2dQODdDSExXWG1Dd2ZGclI2aUhlSWhETVZuY3hYdnhkb2c2LU1JTFBvWFpTNmZtMkNVOTZTejJwbDI2eGE0OS1xUlIwQnlCSmFxRFNCeVJNVzlOMDhTR1VUamx4RDRyV3p6Tk9qVHBrWWdySUM3TVRaYjd3N0JHMFhpdzFhZTNDLTFkRVQ2RVE4U19COXRhRWtNc0NVOHRqUS1CRDFpZ19xQmtFLU9YSDU3TXBZQXpVcld3PT0=

View file

@ -66,7 +66,7 @@ Connector_AiAnthropic_MAX_TOKENS = 2000
# Perplexity AI configuration # Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = 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 Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQm82Mzk2Q1MwZ0dNcUVBcUtuRDJIcTZkMXVvYnpjM3JEMzJiT1NKSHljX282ZDIyZTJYc09VSTdVNXAtOWU2UXp5S193NTk5dHJsWlFjRjhWektFOG1DVGY4ZUhHTXMzS0RPN1lNcF9nSlVWbW5BZ1hkZDVTejl6bVZNRFVvX29xamJidWRFMmtjQmkyRUQ2RUh6UTN1aWNPSUJBPT0=
Connector_AiPerplexity_MODEL_NAME = sonar Connector_AiPerplexity_MODEL_NAME = sonar
Connector_AiPerplexity_TEMPERATURE = 0.2 Connector_AiPerplexity_TEMPERATURE = 0.2
Connector_AiPerplexity_MAX_TOKENS = 2000 Connector_AiPerplexity_MAX_TOKENS = 2000
@ -88,3 +88,7 @@ Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETk5FWWM3Q0JKMzhI
# Feature SyncDelta JIRA configuration # Feature SyncDelta JIRA configuration
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJKbUwyRW5kMnRaNW4wM2YxMkJUTXVXZUdmdVRCaUZIVHU2TTV2RWZLRmUtZkcwZE4yRUNlNDQ0aUJWYjNfdVg5YjV5c2JwMHhoUUYxZWdkeS11bXR0eGxRLWRVaVU3cUVQZWJlNDRtY1lWUDdqeDVFSlpXS0VFX21WajlRS3lHQjc0bS11akkybWV3QUFlR2hNWUNYLUdiRjZuN2dQODdDSExXWG1Dd2ZGclI2aUhlSWhETVZuY3hYdnhkb2c2LU1JTFBvWFpTNmZtMkNVOTZTejJwbDI2eGE0OS1xUlIwQnlCSmFxRFNCeVJNVzlOMDhTR1VUamx4RDRyV3p6Tk9qVHBrWWdySUM3TVRaYjd3N0JHMFhpdzFhZTNDLTFkRVQ2RVE4U19COXRhRWtNc0NVOHRqUS1CRDFpZ19xQmtFLU9YSDU3TXBZQXpVcld3PT0= Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEbm0yRUJ6VUJKbUwyRW5kMnRaNW4wM2YxMkJUTXVXZUdmdVRCaUZIVHU2TTV2RWZLRmUtZkcwZE4yRUNlNDQ0aUJWYjNfdVg5YjV5c2JwMHhoUUYxZWdkeS11bXR0eGxRLWRVaVU3cUVQZWJlNDRtY1lWUDdqeDVFSlpXS0VFX21WajlRS3lHQjc0bS11akkybWV3QUFlR2hNWUNYLUdiRjZuN2dQODdDSExXWG1Dd2ZGclI2aUhlSWhETVZuY3hYdnhkb2c2LU1JTFBvWFpTNmZtMkNVOTZTejJwbDI2eGE0OS1xUlIwQnlCSmFxRFNCeVJNVzlOMDhTR1VUamx4RDRyV3p6Tk9qVHBrWWdySUM3TVRaYjd3N0JHMFhpdzFhZTNDLTFkRVQ2RVE4U19COXRhRWtNc0NVOHRqUS1CRDFpZ19xQmtFLU9YSDU3TXBZQXpVcld3PT0=
# Debug Configuration
APP_DEBUG_CHAT_WORKFLOW_ENABLED = True
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat

View file

@ -0,0 +1,90 @@
# Integration Environment Configuration
# System Configuration
APP_ENV_TYPE = int
APP_ENV_LABEL = Integration Instance
APP_API_URL = https://gateway-int.poweron-center.net
APP_KEY_SYSVAR = CONFIG_KEY
APP_INIT_PASS_ADMIN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjWm41MWZ4TUZGaVlrX3pWZWNwakJsY3Facm0wLVZDd1VKeTFoZEVZQnItcEdUUnVJS1NXeDBpM2xKbGRsYmxOSmRhc29PZjJSU2txQjdLbUVrTTE1NEJjUXBHbV9NOVJWZUR3QlJkQnJvTEU9
APP_INIT_PASS_EVENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjdmtrakgxa0djekZVNGtTZV8wM2I5UUpCZllveVBMWXROYk5yS3BiV3JEelJSM09VYTRONHpnY3VtMGxDRk5JTEZSRFhtcDZ0RVRmZ1RicTFhb3c5dVZRQ1o4SmlkLVpPTW5MMTU2eTQ0Vkk9
# PostgreSQL Storage (new)
DB_APP_HOST=gateway-int-server.postgres.database.azure.com
DB_APP_DATABASE=poweron_app
DB_APP_USER=heeshkdlby
DB_APP_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjb2dka2pnN0tUbW1EU0w1Rk1jNERKQ0Z1U3JkVDhuZWZDM0g5M0kwVDE5VHdubkZna3gtZVAxTnl4MDdrR1c1ZXJ3ejJHYkZvcGUwbHJaajBGOWJob0EzRXVHc0JnZkJyNGhHZTZHOXBxd2c9
DB_APP_PORT=5432
# PostgreSQL Storage (new)
DB_CHAT_HOST=gateway-int-server.postgres.database.azure.com
DB_CHAT_DATABASE=poweron_chat
DB_CHAT_USER=heeshkdlby
DB_CHAT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjczYzOUtTa21MMGJVTUQ5UmFfdWc3YlhCbWZOeXFaNEE1QzdJV3BLVjhnalBkLVVCMm5BZzdxdlFXQXc2RHYzLWtPSFZkZE1iWG9rQ1NkVWlpRnF5TURVbnl1cm9iYXlSMGYxd1BGYVc0VDA9
DB_CHAT_PORT=5432
# PostgreSQL Storage (new)
DB_MANAGEMENT_HOST=gateway-int-server.postgres.database.azure.com
DB_MANAGEMENT_DATABASE=poweron_management
DB_MANAGEMENT_USER=heeshkdlby
DB_MANAGEMENT_PASSWORD_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjTnJKNlJMNmEwQ0Y5dVNrR3pkZk9SQXVvLTRTNW9lQ1g3TTE5cFhBNTd5UENqWW9qdWd3NWNseWhnUHJveDJyd1Z3X1czS3VuZnAwZHBXYVNQWlZsRy12ME42NndEVlR5X3ZPdFBNNmhLYm89
DB_MANAGEMENT_PORT=5432
# Security Configuration
APP_JWT_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNUctb2RwU25iR3ZnanBOdHZhWUtIajZ1RnZzTEp4aDR0MktWRjNoeVBrY1Npd1R0VE9YVHp3M2w1cXRzbUxNaU82QUJvaDNFeVQyN05KblRWblBvbWtoT0VXbkNBbDQ5OHhwSUFnaDZGRG10Vmgtdm1YUkRsYUhFMzRVZURmSFlDTFIzVWg4MXNueDZyMGc5aVpFdWRxY3dkTExGM093ZTVUZVl5LUhGWnlRPQ==
APP_TOKEN_EXPIRY=300
# CORS Configuration
APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net,https://playground-int.poweron-center.net,http://localhost:5176,https://nyla.poweron-center.net, https://nyla-int.poweron-center.net
# Logging configuration
APP_LOGGING_LOG_LEVEL = DEBUG
APP_LOGGING_LOG_DIR = /home/site/wwwroot/
APP_LOGGING_FORMAT = %(asctime)s - %(levelname)s - %(name)s - %(message)s
APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S
APP_LOGGING_CONSOLE_ENABLED = True
APP_LOGGING_FILE_ENABLED = True
APP_LOGGING_ROTATION_SIZE = 10485760
APP_LOGGING_BACKUP_COUNT = 5
# Service Redirects
Service_MSFT_REDIRECT_URI = https://gateway-int.poweron-center.net/api/msft/auth/callback
Service_GOOGLE_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/callback
# OpenAI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjSDBNYkptSkQxTUotYVVpZVNZc0dxNGNwSEtkOEE0T3RZWjROTEhSRlRXdlZmQUxxZ0w3Y0xOV2JNV19LNF9yTUZiU1pUNG15U2VDUDdSVlI4VlpnR3JXVFFtcXBaTEZiaUtSclVFd0lCZG1rWVhra1dfWTVQOTBEYUU0MjByYVNEMTFmeXNOcmpUT216MmJKdlVPeW5nPT0=
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
Connector_AiOpenai_MAX_TOKENS = 2000
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
Connector_AiAnthropic_MAX_TOKENS = 2000
# Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = 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
Connector_AiPerplexity_MODEL_NAME = sonar
Connector_AiPerplexity_TEMPERATURE = 0.2
Connector_AiPerplexity_MAX_TOKENS = 2000
# Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
Service_MSFT_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNzB2M3ZjaE1SVE9ON2FKam9yVURxcHl1Ym5VNVUtS0MyWUpNVXVlaWpWS2U3VVd3em9vQl9lcnVYay03bS04YjNBbDZZNTB4eUtjT3ppQjJjY3dOT0FNLW9LeDhIUU5iaTNqNURUWE5La3kzaHNGcU9yNVI0YjhWZTZRRFktcTk=
Service_MSFT_TENANT_ID = common
# Google Service configuration
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo=
# Tavily Web Search configuration
Connector_WebTavily_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk=
# Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkNmVXZ1pWcHcydTF2MXF0ZGJoWHBydF85bTczTktiaEJ3Wk1vMW1mZVhDSG1yd0ZxR2ZuSGJTX0N3MWptWXFJTkNTWjh1SUVVTXI4UDVzcGdLMkU5SHJ2TUpkRlRoRWdnSldtYjNTQkh4UDJHY2xmdTdZQ1ZiMTZZcGZxS3RzaHdjV3dtVkZUcEpJcWx0b2xuQVR6ZmpoVFZPY1hNMTV2SnhDaC1IZEh4UUpLTy1ILXA4RG1zamJTbUJ4X0t2M2NkdzJPbEJxSmFpRzV3WC0wZThoVzlxcmpHZ3ZkLVlVY3REZk1vV19WQ05BOWN6cnJ4MWNYYnNiQ0FQSUVnUlpfM3BhMnlsVlZUOG5wM3pzM1lSN1UzWlZKUXRLczlHbjI1LTFvSUJ4SlVXMy1BNk43bE5Hb0RfTTVlWk9oZnFIaVg0SW5pbm9EcXRTTzU1RFlYY3dTcnpKWWNyNjN5T1BGZ0FmX253cEFncmhvZVRuM05KYzhkOEhFMFJsc2NBSEwzZVZ1R0JMOGxsekVwUE55alZaRXFrdzNWWVNGWXNmbnhKeWhQSFo2VXBTUlRPeHdvdVdncEFuOWgydEtsSUFneUN6cGVaTnBSdjNCdVJseGJFdmlMc203UFhLVlYyTENkaGg2dVN6Z2xwT1ZmTmN5bVZGUkM3ZWcyVkt2ckFUVVd3WFFwYnJjNVRobEh2SkVJbXRwUUpEOFJKQ1NUc0Q4NHNqUFhPSDh5cTV6MEcwSDEwRUJCQ2JiTTJlOE5nd3pMMkJaQ1dVYjMwZVVWWnlETmp2dkZ3aXEtQ29WNkxZTFkzYUkxdTlQUU1OTnhWWU12YU9MVnJQa1d2ZjRtUlhneTNubEMxTmp1eUNPOThSMlB3Y1F0T2tCdFNsNFlKalZPV25yR2QycVBUb096RmZ1V0FTaGsxLV9FWDBmenBIOXpMdGpLcUc0TWRoY2hlMFhYTzlET1ZRekw0ZHNwUVBQdVJBX2h6Q2ZzWVZJWTNybTJiekp3WmhmWF9SUFBXQzlqUjctcVlHWWVMZWVQallzR0JGTVF0WmtnWlg1aTM1bFprNVExZXY5dnNvWF93UjhwbkJ3RzNXaVJ2d2RRU3JJVlBvaVh4eTlBRUtqWkJia3dJQVVBV2Nqdm9FUTRUVW1TaHp2ZUwxT0N2ZndxQ2Nka1RYWXF0LWxIWFE0dTFQcVhncFFPM0hFdUUtYlFnemx3WkF4bjA1aDFULUdrZlVZbEJtRGRCdjJyVkdJSXozd0I0dF9zbWhOeHFqRDA4T1NVaWR5cjBwSVgwbllPU294NjZGTnM1bFhIdGpNQUxFOENWd3FCbGpSRFRmRXotQnU0N2lCVEU5RGF6Qi10S2U2NGdadDlrRjZtVE5oZkw5ZWFjXzhCTmxXQzNFTFgxRXVYY3J3YkxnbnlBSm9PY3h4MlM1NVFQbVNDRW5Ld1dvNWMxSmdoTXJuaE1pT2VFeXYwWXBHZ29MZDVlN2lwUUNIeGNCVVdQVi1rRXdJMWFncUlPTXR0MmZVQ1l0d09mZTdzWGFBWUJMUFd3b0RSOU8zeER2UWpNdzAxS0ZJWnB5S3FJdU9wUDJnTTNwMWw3VFVqVXQ3ZGZnU1RkUktkc0NhUHJ0SGFxZ0lVWDEzYjNtU2JfMGNWM1Y0dHlCTzNESEdENC1jUWF5MVppRzR1QlBNSUJySjFfRi1ENHEwcmJ4S3hQUFpXVHA0TG9DZWdoUlo5WnNSM1lCZm1KbEs2ak1yUUU4Wk9JcVJGUkJwc0NvUkMyTjhoTWxtZmVQeDREZVRKZkhYN2duLVNTeGZzdFdBVnhEandJSXB5QjM0azF0ckI3Tk1wSzFhNGVOUVRrNjU0cG9JQ29pN09xOFkwR1lMTlktaGp4TktxdTVtTnNEcldsV2pEZm5nQWpJc2hxY0hjQnVSWUR5VVdaUXBHWUloTzFZUC1oNzJ4UjZ1dnpLcDJxWEZtQlNIMWkzZ0hXWXdKeC1iLXdZWVJhcU04VFlpMU5pd2ZIdTdCdkVWVFVBdmJuRk16bEFFQTh4alBrcTV2RzliT2hGdTVPOXlRMjFuZktiRTZIamQ1VFVqS0hRTXhxcU1mdkgyQ1NjQmZfcjl4c3NJd0RIeDVMZUFBbHJqdEJxWWl3aWdGUEQxR3ZnMkNGdVB4RUxkZi1xOVlFQXh1NjRfbkFEaEJ5TVZlUGFrWVhSTVRPeGxqNlJDTHNsRWRrei1pYjhnUmZrb3BvWkQ2QXBzYjFHNXZoWU1LSExhLWtlYlJTZlJmYUM5Y1Rhb1pkMVYyWTByM3NTS0VXMG1ybm1BTVN2QXRYaXZqX2dKSkZrajZSS2cyVlNOQnd5Y29zMlVyaWlNbTJEb3FuUFFtbWNTNVpZTktUenFZSl91cVFXZjRkQUZyYmtPczU2S1RKQ19ONGFOTHlwX2hOOEE1UHZEVjhnT0xxRjMxTEE4SHhRbmlmTkZwVXJBdlJDbU5oZS05SzI4QVhEWDZaN2ZiSlFwUGRXSnB5TE9MZV9ia3pYcmZVa1dicG5FMHRXUFZXMWJQVDAwOEdDQzJmZEl0ZDhUOEFpZXZWWXl5Q2xwSmFienNCMldlb2NKb2ZRYV9KbUdHRzNUcjU1VUFhMzk1a2J6dDVuNTl6NTdpM0hGa3k0UWVtbF9pdDVsQVp2cndDLUU5dnNYOF9CLS0ySXhBSFdCSnpqV010bllBb3U0cEZZYVF5R2tSNFM5NlRhdS1fb1NqbDBKMkw0V2N0VEZhNExtQlR3ckZ3cVlCeHVXdXJ6X0s4cEtsaG5rVUxCN2RRbHQxTmcyVFBqYUxyOHJzeFBXVUJaRHpXbUoxdHZzMFBzQk1UTUFvX1pGNFNMNDFvZWdTdEUtMUNKMXNIeVlvQk1CeEdpZVdmN0tsSDVZZHJXSGt5c2o2MHdwSTZIMVBhRzM1eU43Q2FtcVNidExxczNJeUx5U2RuUG5EeHpCTlg2SV9WNk1ET3BRNXFuc0pNWlVvZUYtY21oRGtJSmwxQ09QbHBUV3BuS3B5NE9RVkhfellqZjJUQ0diSV94QlhQWmdaaC1TRWxsMUVWSXB0aE1McFZDZDNwQUVKZ2t5cXRTXzlRZVJwN0pZSnJSV21XMlh0TzFRVEl0c2I4QjBxOGRCYkNxek04a011X1lrb2poQ3h2LUhKTGJiUlhneHp5QWFBcE5nMElkNTVzM3JGOWtUQ19wNVBTaVVHUHFDNFJnNXJaWDNBSkMwbi1WbTdtSnFySkhNQl9ZQjZrR2xDcXhTRExhMmNHcGlyWjR3ZU9SSjRZd1l4ZjVPeHNiYk53SW5SYnZPTzNkd1lnZmFseV9tQ3BxM3lNYVBHT0J0elJnMTByZ3VHemxta0tVQzZZRllmQ2VLZ1ZCNDhUUTc3LWNCZXBMekFwWW1fQkQ1NktzNGFMYUdYTU0xbXprY1FONUNlUHNMY3h2NFJMMmhNa3VNdzF4TVFWQk9odnJUMjFJMVd3Z2N6Sms5aEM2SWlWZFViZ0JWTEpUWWM5NmIzOS1oQmRqdkt1NUUycFlVcUxERUZGbnZqTUxIYnJmMDBHZDEzbnJsWEEzSUo3UmNPUDg1dnRUU1FzcWtjTWZwUG9zM0JTY3RqMDdST2UxcXFTM0d0bGkwdFhnMk5LaUlxNWx3V1pLaVlLUFJXZzBzVl9Ia1V1OHdYUEFWOU50UndycGtCdzM0Q0NQamp2VTNqbFBLaGhsbUk5dUI5MjU5OHVySk1oY0drUWtXUloyVVRvOWJmbUVYRzFVeWNQczh2NXJCeVppRlZiWDNJaDhOSmRmX2lURTNVS3NXQXFZT1QtUmdvMWJoVWYxU3lqUUJhbzEyX3I3TXhwbm9wc1FoQ1ZUTlNBRjMyQTBTY2tzbHZ3RFUtTjVxQ0o1QXRTVks2WENwMGZCRGstNU1jN3FhUFJCQThyaFhhMVRsbnlSRXNGRmt3Yk01X21ldmV3bTItWm1JaGpZQWZROEFtT1d1UUtPQlhYVVFqT2NxLUxQenJHX3JfMEdscDRiMXcyZ1ZmU3NFMzVoelZJaDlvT0ZoRGQ2bmtlM0M5ZHlCd2ZMbnRZRkZUWHVBUEx4czNfTmtMckh5eXZrZFBzOEItOGRYOEhsMzBhZ0xlOWFjZzgteVBsdnpPT1pYdUxnbFNXYnhKaVB6QUxVdUJCOFpvU2x2c1FHZV94MDBOVWJhYkxISkswc0U5UmdPWFJLXzZNYklHTjN1QzRKaldKdEVHb0pOU284N3c2LXZGMGVleEZ5NGZ6OGV1dm1tM0J0aTQ3VFlNOEJrdEh3PT0=
# Feature SyncDelta JIRA configuration
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkTUNsWm4wX0p6eXFDZmJ4dFdHNEs1MV9MUzdrb3RzeC1jVWVYZ0REWHRyZkFiaGZLcUQtTXFBZzZkNzRmQ0gxbEhGbUNlVVFfR1JEQTc0aldkZkgyWnBOcjdlUlZxR0tDTEdKRExULXAyUEtsVmNTMkRKU1BJNnFiM0hlMXo4YndMcHlRMExtZDQ3Zm9vNFhMcEZCcHpBPT0=

View file

@ -66,7 +66,7 @@ Connector_AiAnthropic_MAX_TOKENS = 2000
# Perplexity AI configuration # Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = 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 Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQm82Mzk2UWZJdUFhSW8yc3RKc0tKRXphd0xWMkZOVlFpSGZ4SGhFWnk0cTF5VjlKQVZjdS1QSWdkS0pUSWw4OFU5MjUxdTVQel9aeWVIZTZ5TXRuVmFkZG0zWEdTOGdHMHpsTzI0TGlWYURKU1Q0VVpKTlhxUk5FTmN6SUJScDZ3ZldIaUJZcWpaQVRiSEpyQm9tRTNDWk9KTnZBPT0=
Connector_AiPerplexity_MODEL_NAME = sonar Connector_AiPerplexity_MODEL_NAME = sonar
Connector_AiPerplexity_TEMPERATURE = 0.2 Connector_AiPerplexity_TEMPERATURE = 0.2
Connector_AiPerplexity_MAX_TOKENS = 2000 Connector_AiPerplexity_MAX_TOKENS = 2000
@ -88,3 +88,7 @@ Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkNmVXZ1pWcHcydTF2
# Feature SyncDelta JIRA configuration # Feature SyncDelta JIRA configuration
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkTUNsWm4wX0p6eXFDZmJ4dFdHNEs1MV9MUzdrb3RzeC1jVWVYZ0REWHRyZkFiaGZLcUQtTXFBZzZkNzRmQ0gxbEhGbUNlVVFfR1JEQTc0aldkZkgyWnBOcjdlUlZxR0tDTEdKRExULXAyUEtsVmNTMkRKU1BJNnFiM0hlMXo4YndMcHlRMExtZDQ3Zm9vNFhMcEZCcHpBPT0= Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkTUNsWm4wX0p6eXFDZmJ4dFdHNEs1MV9MUzdrb3RzeC1jVWVYZ0REWHRyZkFiaGZLcUQtTXFBZzZkNzRmQ0gxbEhGbUNlVVFfR1JEQTc0aldkZkgyWnBOcjdlUlZxR0tDTEdKRExULXAyUEtsVmNTMkRKU1BJNnFiM0hlMXo4YndMcHlRMExtZDQ3Zm9vNFhMcEZCcHpBPT0=
# Debug Configuration
APP_DEBUG_CHAT_WORKFLOW_ENABLED = FALSE
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat

View file

@ -0,0 +1,90 @@
# Production Environment Configuration
# System Configuration
APP_ENV_TYPE = prod
APP_ENV_LABEL = Production Instance
APP_API_URL = https://gateway.poweron-center.net
APP_KEY_SYSVAR = CONFIG_KEY
APP_INIT_PASS_ADMIN_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pSXoyVEVwNDZ6cmthQTROUkxGUjh1UWF2UU5zaWRuX3p2aHJCVFo2NEstR0RqdnQ5clZmeVliRlhHZGFHTlhZV2dzMmRPZFVEemVlSHd5VHR3cmpNUXRaRlhZSFZ6d1dsX2Y5Zl9lOXdYdEU9
APP_INIT_PASS_EVENT_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5peGNMWExjWGZxQ2VndXVOSUVGcWhQTWd0N3d0blU3bGJvNjgzNVVNNktCQnZlTEtVckV5RUtQMjMwRTBkdmxEMlZwX0k1M1hlOFFNY3hjaWsyd2JmRGl2UWxfSXEwenVnQ3NmaTlxckp2VXM9
# PostgreSQL Storage (new)
DB_APP_HOST=gateway-prod-server.postgres.database.azure.com
DB_APP_DATABASE=poweron_app
DB_APP_USER=gzxxmcrdhn
DB_APP_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pVmtwYWZQakdWZnJPamVlRWJPa0tnc3daSVVHejVrQ0x1VFZZbHhVSkk0S2tFWl92T2NwWURBMU9UbFROMHZ2TkNKZFlEWjhJZDZ0bnFndC1oYjhNRW1VLWpEYnlDNEJwcGVKckpUVlp6YTg9
DB_APP_PORT=5432
# PostgreSQL Storage (new)
DB_CHAT_HOST=gateway-prod-server.postgres.database.azure.com
DB_CHAT_DATABASE=poweron_chat
DB_CHAT_USER=gzxxmcrdhn
DB_CHAT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pZVZnTzBPTDY1Q3c2U1pDV0lxbXhoWnlYSXRDWVhIeGJwSkdNMzMxR2h5a1FRN00xcWtYUE4ySGpqRllSaGM5SmRZZk9Bd2trVDJNZDdWcEFIbTJtel91MHpsazlTQnRsV2docGdBc0RVeEU9
DB_CHAT_PORT=5432
# PostgreSQL Storage (new)
DB_MANAGEMENT_HOST=gateway-prod-server.postgres.database.azure.com
DB_MANAGEMENT_DATABASE=poweron_management
DB_MANAGEMENT_USER=gzxxmcrdhn
DB_MANAGEMENT_PASSWORD_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pQXdaRnVEQUx2MmU5ck9XZzNfaGVoRXlYMlVjSVM5dWNTekhmR2VYNkd6WVhELUlkLWdFWWRWQ1JJLWZ4WUNwclZVRlg3ZHBCS0xwM1laNklTaEs1czFDRTMxYlV2TWNueEJlTHFyNEt4aVk9
DB_MANAGEMENT_PORT=5432
# Security Configuration
APP_JWT_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pY3JfX1R3cEJhTjAzZGx2amtRSE4yVzZhMmY3a3FHam9BdzBxVWd5R0FRSW1KbmNGS3JDMktKTWptZm4wYmZZZTVDQkh3NVlxSW1MZEdiVWdORng4dm0xV08wZDh0YlBNQTdEbmlnVWduMzNWY1RPX1BqaGtnOTc2ZWNBTnNnd1AtaTNRUExpRThVdzNmdVFHM2hkTjFjcW0ya2szMWNaT3VDeDhXMlJ1NDM4PQ==
APP_TOKEN_EXPIRY=300
# CORS Configuration
APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net,https://playground-int.poweron-center.net,http://localhost:5176,https://nyla.poweron-center.net,https://nyla-int.poweron-center.net
# Logging configuration
APP_LOGGING_LOG_LEVEL = DEBUG
APP_LOGGING_LOG_DIR = /home/site/wwwroot/
APP_LOGGING_FORMAT = %(asctime)s - %(levelname)s - %(name)s - %(message)s
APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S
APP_LOGGING_CONSOLE_ENABLED = True
APP_LOGGING_FILE_ENABLED = True
APP_LOGGING_ROTATION_SIZE = 10485760
APP_LOGGING_BACKUP_COUNT = 5
# Service Redirects
Service_MSFT_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/msft/auth/callback
Service_GOOGLE_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/callback
# OpenAI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pU05XM2hMaExPMnpYeFpwRVhyYl9JZmRITmlmRDlWOUJSSWE4NTFLZUptSkJhNlEycHBLZmh3WFA2ZmU5VmxHZks1UUNVOUZnckZNdXZ2MTY2dFg1Nl8yWDRrcTRlT0tHYkhyRGZINTEzU25iYVFRMzJGeUZIdlc4LU9GbmpQYmtmU3lJT2VVZ1UzLVd3R25ZQ092SUVnPT0=
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
Connector_AiOpenai_MAX_TOKENS = 2000
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNTA1RkZ3UllCOXVsNVZzbkw2Rkl1TWxCZ0wwWEVXUm9ReUhBcVl1cGFUdW9FRVh4elVxR0x3NVRxZkc4SkxHVFdzSU1YNG5Rb0FqSHJhdElwWm1iLWdubTVDcUl3UkVjVHNoU0xLa0ZTSFlfTlJUVXg4cVVwUWdlVDBTSFU5SnBzS0ZnVjlQcmtiNzV2UTNMck1IakZ0OWlubUtlWDZnMk4yX2JsZ1U4Wm1yT29fM2d2NVBNOWNBbWtTRWNyQ2tZNjhwSVF6bG5SU3dTenR2MzA3Z19NUT09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
Connector_AiAnthropic_MAX_TOKENS = 2000
# Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = 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
Connector_AiPerplexity_MODEL_NAME = sonar
Connector_AiPerplexity_TEMPERATURE = 0.2
Connector_AiPerplexity_MAX_TOKENS = 2000
# Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
Service_MSFT_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pVEhHdlZHU3FNMmhuRGVwaGc3YzIxSjlZNzBCQjlOV2pSYVNXb0t1ZnVwQzZsQzY4cHMtVlZtNF85OEVaV1BMTzdXMmpzaGZpaG1DalJ0bkNPMHA5ZUcwZjNDdGk1TFdxYTJSZnVrVmhhZ2VRUEZxbjJOOGFhWk9EYlY3dmRVTnI=
Service_MSFT_TENANT_ID = common
# Google Service configuration
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pV2JEV0lNUXhwa1VTUGh2RWcyYnJHSFQyTmdBOEhwRkJWc3MwOFZlcHJGUmlGOVVFbG1XalNyUXVuaExESy1xeFNIQlRiSFVIWTB6Rm1fNFg0OHZZSkF4ZlBIcFZDMjZHcFRERXJ0WlVFclhHa29Za1BqWGxsM05NZGFRc1BLZnE=
# Tavily Web Search configuration
Connector_WebTavily_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pMjhJNS1CZFJubUlkN3ZrTUoxR0Y1QzJFWEJSMk0wQkI0UndqOW1UelVieWhGaTVBcHoxRXo1VjRzVVRROHFIeHMyS3Q5cDZCeUlEMzE1ZlhVTmNveFk5VmFQMm80NTRyVW1TZHVsR3dUN0RtMnd4LW1VWlpqOXJPeXZBTmg4OEM=
# Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNjlJdmFMeERXUUQzR0duRUY4cGRZRzdwQlpnVFAzSzQ5cHZNRnVUZ0xWd3dQMHR3QjVsdF92NmdUQlJGRk1RcG1RYWZzcE9RbEhjQmR5Yk5Ud3ZKTW5jbmpEVGJ2ZkxVeVJpcUxaT2lNREFXaks5WHg5aVlHcXlUZldMdnZGYklHWjlJOWJ6Wm5RSkNmdm5feENjS1E0QUVXTTE5SW5sNFBEeTJ1RjRmVm9SQUNIYmF2U1U2dklsbTVlWFpCcHMwTFF1SUg5NmNfcWhQRFlpeWt0U19HMXNuUHd2RFdrVl9XdUFaY0hWdVBPYWlybU1CdGlCN1A0RzZBbi1IUVJ1TWMxTE9Ea09sTURhcDFZb1JIUW1zUFJybW15MDcxOUtfVXA2N0xwMnFrczA1YTJaN05pRHhOYWNzMjVmUHdhbVdlemF3TEIzN0pJaVo3bGJBMXJnZmNYTXVJVDdmYkRXWTlBT2F2NmN4eTlteUI1SlJTOXc2WWFWUTBCZTJBVHRLVDhEVjBFeHE0Nmk1YkxYd3N3RXgtVUdGdlZFSmk4dHM0QjFmbktsQTctbmJMT0MtMDlKS1pUR0pELXBxckhULUUycjlBZmVJQjFrM0xEUm50U2ZabExtVjZ1WWZ1WnlobUZIOVlndjNydUZfczJUWVVRZURTd1lYazllaER4VU10cXUyVS1ZNG9Ha2hnbTAzOEpGMklFSWpWeVV5eFB2UlVWYmJJakZnOVM2R2lJSXRSM3VzVEZZNUVpNmVjRzdXRUJsT2hzcjhZWERFeGV5c1dFQVM3dkhGY2Q3ckNBRDZCcVdhZnZkdzM3QVNpODZYWE81TEIyZGUycldkSVRvbm5hR3Jib2UzOEtXdUpHQ2FyWDQtMDdQbC1ycEdfUzdXd0U2dHFIVjhoRDJ0YkNsWUpva1dzOGNPdXRpZjVwUldtT3FVN3RrZUhTN3JfX1M3LU9PaXZELWkzRmtMbjgxZGZ6ZjVJNW9RZW1nM2hqUXo4Z2I5Z2tSVTVMdUNLblRxOGQ1Y3F4SGZIbWo4YkFBV3FIbjB6LUxGNHdsQWgxQUM4bzVrblBObFFfVWNaQ3QwejQ1eGFlSXVIcXlyVEZEdzVKNV9pd2o4RW1UVjlqb3VMWnF0V1JTcWF1R0RjdUNjM2lLUHRqZDl2WWtXUnhmbVdxeHA3REFHTkdkMjM4LTllajBWQnd3RHlFSVdiUThfQnduOVFJdmR6OUVGN1lOYjBqclhadHozX21kRzlUT2EtWVBkYWFRSjRGdW80dmlEUTVrVjhWbjJYNGtCeGNtNzRHQXJsRlZyWjBYdHltVDM2MV9IT0RFT2dLLTVBREtsS09HdUxrODRLcEQ1TmRoVDh6WmgybGc5MzgtbmJSYThQd3FFaUcxbmg3eE95RkJVX2hHM20wT1k2c21qd24wSkFWNGROaklQeHZrc21PdTVsdHVxR0pxd3Ztb1NQVHEtd25URHRNa1pqa3BLdVdkTnNFeDNManJST0dOb1RWM2hqekxFTlFSZkd6TlZBY1VQT1NFOVlDQzlPQWVlVXQ4MW0wdGkzd0Myam1lSWE2aEtVVTVNc3N3dENpa1BWRl9ZQ3daYllONWRmRUF0THpleFRmdWRqTFM2aldmLUFuZzFGdkFQNHR6d21SdzRGQ0Q4cU8yV0xGUTVUY01TZlYxSzZ4cmtfUGZvVDhmYmNBX1pibTVTcl9lenJoME9KSnBucUxPRU1PRXBmLWFENEgwRWZOU0RvRDlvQk9ueVp0dXJrUVgtQUk5VldVbV9MS19PYmlua3liWl80Z2hMcFRnTXBkZDA3enIxRWFzaU56TEZKa0hPQUtNY0dCY1pnQ2V3Zml6ZFczWFBESUlLd3BSVEs5ZXlGLUpINDRsd1NBVjBkR1dvbE8wLWZBeEhFQ0hvY3E5UGJsTDdteGdSRjBIZTRobXpsd29PMmhKQkxXY3Znd2FMdWtZU1VkQlVRZXlSZ3FaVnNqcXpwR3N3SktOTDA3aUZIcE9TR1VDcXdaTDhQX2E5VDlwckoyX0xlNmFQcnoydEkwc0s1S08yaVlsM0pwYktUVWl3LU5hQzF2UVZNSm9ZR3QyQWdrUXB2a25QNzhkVEFOYmZ0b1BmTXRCMmVQZTAtYzdOeUlBYlNINlZNZW1nUTFfSV92UlJiWGt6Qms1c1hBc3kzZkVRMzEwNVJDOS1JeVg4YWtVeUJyOTZPQ0FnSUs1Z25sMlY0S1V1c0dIWEpuX2pMQmZ4Z29SY1U0bVZscXNWcjJwRy1UZEFYSXBzQURGblRTelBybU5BeDF6N3hZLXZwSHBkMmlzbHZWN2JkU3hRcE0zQ0hna3QwYWlJX3hBdGcxUHdGRE55cndUNHRvbXU5VTRMRmZDRjhvXzIwajI1Y0RCcmR2OV94cS1XYkNwalNHS2lObHlkNGZBbklycnZMSlJYVnlfakRXb1ZfWUo2MGxzYUNIektYeENGTkUzMUJXRE9WRHRrY2o5UFJHckZza2RQbjNPUkstbG9GZG4yNmxKeEdtbHo4WDZFc0lvT01wZkxuN29ycXl3X1hTN1prRGdvWG9hRFYwNzBwVVpuMW0wQlZYbGZxZjFQUHp2XzBQT3Fqa3lzejVKZmJDMG0wRzhqWV9HY1dxaXB2VFNQUzV2LUJSOXRFRUllak83cUI3RGUtYVBJakF1YUVOV0otT1BxUHJqS0NLdFVHc0tsT2RGcWd6UTU4Yi1kc0JZS1VPT1NXSlc3TDM5ZDVEZlRDOURZU1hMT0YxZ25ndVBUaG1VcGsxWFZSS1RxT1ZZTU1vclZjVU5iYmZMd0VBTXlvdTE0YjdoclZ6ZnNKMmE2Yy1ORmNCMnJNX3dwcVJSN2RSd2d6aENLRXQyTjhkcDlLTFVZMHBydFowNTJoZm1mVHNRVHI1YjhTNnl1Vll4dFZhenZfa0dybk9KYVh6LUluSUo0djUzRFNEdzBoVGt5UU9tMlg5UnBLbk9WaEhoU2txY2tUSXJmemlmNEExb3Q1blI5bE9adHluWVI3NXZQNUtXdmpra05aNy15dTBXdlVqcXhteFVqSXFxNnlQR2FGeVNONkx3NVpQUk1FNk5yTUY4T1hQV1FCdm9PYzdFTGl4QXZkODltSlprbGJ6cWREcEM1VlNwN3V5aWdWYXNkekk4X3U0cjJjZ1k2X190cmNnMlpMQVlLdExxM3pFNkZudVFKci1CalE1U3kzdmotQ01LV0ZzWnp0VUxRblhkdlN6VG1MWHNQdGlrNmF4RnFtd0c3UXNqZFVRZTRFMGl1NFU5T2k3VEpjZXA1U052VkJtdUhDWEpTaDRGQnM0SDQwY2IxdDVNbUtELTQ0R0s0OHpfTHdFOHZ0VmRMTC1FUVpPSkJ4QXRWNnl5MURUdjVyUk53emRwbDBxUnloUmlheXhKY3RBUG1mX3JxM2w0VlZvcE40b2ROeG15NS01RFlvUHdoYllLNVhCZUNEd0dwQnFCLVdZU0RhVEFzR2gxTVpub3FGRnl4VDNiSVZrTnpMQUlxeGJGQzh5WlNZR2NKbklHRVRTaVJ2REduN0hXaGo5MHFGb1FOa0U5TUFwQ09zOXVWMnRRNVlJWmZpaTUxLWFIeWR0UEFtaVNDX1k5Q1p3Y2V4ckVXQVBRYzV1eGwwMWd0SE15WUxiYzUyLTUzTGlyTUhZUDFlRTFjcFpieWQwU0pxRWJXSE53Nkd5aHp5T28wZVd6Z1phLTQ4TmgxU3hvNHpySzExUk5WZlFFS3VpOXNHMDdZU0gzSGxYUlU4WmgwNUlPdlhQcUI0cGtITmQ4SlByczN0THUxNHc0a21vUEp6S1hLNnFRNmFfdlpmUWpJQ1VNYXVEOW1abzlsd2RoRG5pVXRVbjBKV2RFTGFEa3ZYTHByOTJjalc1b3hTWkFmS2RPdVlTUTVkRkpSTnZsMWtnYWZEUm1SR3lBemdON2xiN3pkZlNfX2NSYU5wWHNybHh4V0lnNHJjQ2NON1hiRHMycUdmNC1kay13bUE0OTBPN0xmNDA1NlQxVmRySEJvM1VUN2Y2Sl9KX2pZVHRPWEdfR2RYNUoxY01Va3pXb2VBd3lZb3BSXzU5NVJfWlhEYXFSVDJrUnFHWG42RVZJUVQ2RlJWUEkyQnRnREI3eHNiRERiQ3FUczJsRTBDZ3pUUGZPcjExZUFKc21QUWxVYVBmV2hPZXRGd3lJX3ZTczhCVG1jWFVwanhIZHlyTTdiR2c5cTBVSXBRV1U4ZExtWWdub1pTSHU0cU5aYWJVWmExbXI0MjE3WUVnPT0=
# Feature SyncDelta JIRA configuration
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pTDhnTVNzRUhScU8wYnZsZk52bHFkSWxLc18xQmtCeC1HbnNwTzVBbXRNTmQzRjZYaGE2MVlCNGtnWDk1T2I5VXVKNHpKU1VRbXEyN2tRWUJnU2ltZE5qZ3lmNEF6Z1hMTTEwZkk2NUNBYjhmVTJEcWpRUW9HNEVpSGFWdjBWQXQ3eUtHUTFJS3U5QWpaeno0RFNhMUxnPT0=

View file

@ -66,7 +66,7 @@ Connector_AiAnthropic_MAX_TOKENS = 2000
# Perplexity AI configuration # Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = 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 Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQm82Mzk2Q1FGRkJEUkI4LXlQbHYzT2RkdVJEcmM4WGdZTWpJTEhoeUF1NW5LUVpJdDBYN3k1WFN4a2FQSWJSQmd0U0xJbzZDTmFFN05FcXl0Z3V1OEpsZjYydV94TXVjVjVXRTRYSWdLMkd5XzZIbFV6emRCZHpuOUpQeThadE5xcDNDVGV1RHJrUEN0c1BBYXctZFNWcFRuVXhRPT0=
Connector_AiPerplexity_MODEL_NAME = sonar Connector_AiPerplexity_MODEL_NAME = sonar
Connector_AiPerplexity_TEMPERATURE = 0.2 Connector_AiPerplexity_TEMPERATURE = 0.2
Connector_AiPerplexity_MAX_TOKENS = 2000 Connector_AiPerplexity_MAX_TOKENS = 2000
@ -88,3 +88,7 @@ Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNjlJdmFMeERXUUQ
# Feature SyncDelta JIRA configuration # Feature SyncDelta JIRA configuration
Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pTDhnTVNzRUhScU8wYnZsZk52bHFkSWxLc18xQmtCeC1HbnNwTzVBbXRNTmQzRjZYaGE2MVlCNGtnWDk1T2I5VXVKNHpKU1VRbXEyN2tRWUJnU2ltZE5qZ3lmNEF6Z1hMTTEwZkk2NUNBYjhmVTJEcWpRUW9HNEVpSGFWdjBWQXQ3eUtHUTFJS3U5QWpaeno0RFNhMUxnPT0= Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pTDhnTVNzRUhScU8wYnZsZk52bHFkSWxLc18xQmtCeC1HbnNwTzVBbXRNTmQzRjZYaGE2MVlCNGtnWDk1T2I5VXVKNHpKU1VRbXEyN2tRWUJnU2ltZE5qZ3lmNEF6Z1hMTTEwZkk2NUNBYjhmVTJEcWpRUW9HNEVpSGFWdjBWQXQ3eUtHUTFJS3U5QWpaeno0RFNhMUxnPT0=
# Debug Configuration
APP_DEBUG_CHAT_WORKFLOW_ENABLED = FALSE
APP_DEBUG_CHAT_WORKFLOW_DIR = ./test-chat

View file

@ -1,5 +1,6 @@
import logging import logging
import httpx import httpx
import os
from typing import Dict, Any, List, Union from typing import Dict, Any, List, Union
from fastapi import HTTPException from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
@ -147,6 +148,11 @@ class AiAnthropic:
# Direct content as string (in older API versions) # Direct content as string (in older API versions)
content = anthropicResponse["content"] content = anthropicResponse["content"]
# Debug logging for empty responses
if not content or content.strip() == "":
logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}")
content = "[Anthropic API returned empty response]"
# Return in OpenAI format # Return in OpenAI format
return { return {
"id": anthropicResponse.get("id", ""), "id": anthropicResponse.get("id", ""),
@ -182,14 +188,27 @@ class AiAnthropic:
The analysis response as text The analysis response as text
""" """
try: try:
# Debug logging
logger.info(f"callAiImage called with imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}")
# Distinguish between file path and binary data # Distinguish between file path and binary data
if isinstance(imageData, str): if isinstance(imageData, str):
# It's a file path - import filehandling only when needed # Check if it's base64 encoded data or a file path
from modules import agentserviceFilemanager as fileHandler if len(imageData) > 100 and not os.path.exists(imageData):
base64Data, autoMimeType = fileHandler.encodeFileToBase64(imageData) # It's likely base64 encoded data
mimeType = mimeType or autoMimeType logger.info("Treating imageData as base64 encoded string")
base64Data = imageData
if not mimeType:
mimeType = "image/png"
else:
# It's a file path - import filehandling only when needed
logger.info(f"Treating imageData as file path: {imageData}")
from modules import agentserviceFilemanager as fileHandler
base64Data, autoMimeType = fileHandler.encodeFileToBase64(imageData)
mimeType = mimeType or autoMimeType
else: else:
# It's binary data # It's binary data
logger.info("Treating imageData as binary data")
import base64 import base64
base64Data = base64.b64encode(imageData).decode('utf-8') base64Data = base64.b64encode(imageData).decode('utf-8')
# MIME type must be specified for binary data # MIME type must be specified for binary data
@ -216,8 +235,16 @@ class AiAnthropic:
# Use the existing callAiBasic function with the Vision model # Use the existing callAiBasic function with the Vision model
response = await self.callAiBasic(messages) response = await self.callAiBasic(messages)
# Extract and return content # Extract and return content with proper error handling
return response["choices"][0]["message"]["content"] try:
content = response["choices"][0]["message"]["content"]
if content is None or content.strip() == "":
return "[AI returned empty response for image analysis]"
return content
except (KeyError, IndexError, TypeError) as e:
logger.error(f"Error extracting content from AI response: {str(e)}")
logger.error(f"Response structure: {response}")
return f"[Error extracting AI response: {str(e)}]"
except Exception as e: except Exception as e:
logger.error(f"Error during image analysis: {str(e)}", exc_info=True) logger.error(f"Error during image analysis: {str(e)}", exc_info=True)

View file

@ -33,7 +33,7 @@ class AiService:
Args: Args:
serviceCenter: Service center instance for accessing other services serviceCenter: Service center instance for accessing other services
""" """
self.serviceCenter = serviceCenter self.services = serviceCenter
# Only depend on interfaces # Only depend on interfaces
self.aiObjects = None # Will be initialized in create() self.aiObjects = None # Will be initialized in create()
self._extractionService = None # Lazy initialization self._extractionService = None # Lazy initialization
@ -43,7 +43,7 @@ class AiService:
"""Lazy initialization of extraction service.""" """Lazy initialization of extraction service."""
if self._extractionService is None: if self._extractionService is None:
logger.info("Lazy initializing ExtractionService...") logger.info("Lazy initializing ExtractionService...")
self._extractionService = ExtractionService() self._extractionService = ExtractionService(self.services)
return self._extractionService return self._extractionService
async def _ensureAiObjectsInitialized(self): async def _ensureAiObjectsInitialized(self):
@ -63,42 +63,6 @@ class AiService:
logger.info("AiObjects.create() completed") logger.info("AiObjects.create() completed")
return instance return instance
def _createImageAnalysisPrompt(self, userPrompt: str, generate_json: bool = False) -> str:
"""Create a safe image analysis prompt based on the user's original request."""
# Escape the user prompt to prevent string formatting issues
safeUserPrompt = userPrompt.replace("'", "\\'").replace('"', '\\"').replace('\n', ' ').replace('\r', ' ')
if generate_json:
return f"""Based on the user's request: '{safeUserPrompt}', analyze this image and extract all content as structured JSON.
You must respond with valid JSON only in this exact format:
{{
"metadata": {{
"title": "Image Content",
"version": "1.0"
}},
"sections": [
{{
"id": "image_content",
"type": "paragraph",
"data": {{
"text": "Detailed description of everything visible in the image"
}}
}}
]
}}
Analyze the image completely and describe everything you see including:
- All text content
- Tables or structured data
- Logos, graphics, and visual elements
- Layout and formatting
- Any other relevant information
Return only the JSON structure with actual content from the image. Do not include any text before or after the JSON."""
else:
return f"Based on the user's request: '{safeUserPrompt}', \n\nAnalyze this image completely. Provide a detailed description of everything you see. Include all text, logos, colors, fonts, layout, objects, and visual elements. This is a document processing system that requires complete image analysis for all content types including logos, graphics, simple images, flags, and animations. You must analyze and describe everything visible in the image without any disclaimers or refusals."
# AI Image Analysis # AI Image Analysis
async def readImage( async def readImage(
self, self,
@ -109,8 +73,15 @@ Return only the JSON structure with actual content from the image. Do not includ
) -> str: ) -> str:
"""Call AI for image analysis using interface.callImage().""" """Call AI for image analysis using interface.callImage()."""
try: try:
print(f"🔍 readImage called with prompt: '{prompt[:100]}...', imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}") # Check if imageData is valid
logger.info(f"readImage called with prompt: '{prompt[:100]}...', imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}") if not imageData:
error_msg = "No image data provided"
self.services.utils.debugLogToFile(f"Error in AI image analysis: {error_msg}", "AI_SERVICE")
logger.error(f"Error in AI image analysis: {error_msg}")
return f"Error: {error_msg}"
self.services.utils.debugLogToFile(f"readImage called with prompt, imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}", "AI_SERVICE")
logger.info(f"readImage called with prompt, imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}")
# Always use IMAGE_ANALYSIS operation type for image processing # Always use IMAGE_ANALYSIS operation type for image processing
if options is None: if options is None:
@ -119,14 +90,25 @@ Return only the JSON structure with actual content from the image. Do not includ
# Override the operation type to ensure image analysis # Override the operation type to ensure image analysis
options.operationType = OperationType.IMAGE_ANALYSIS options.operationType = OperationType.IMAGE_ANALYSIS
print(f"🔍 Calling aiObjects.callImage with operationType: {options.operationType}") self.services.utils.debugLogToFile(f"Calling aiObjects.callImage with operationType: {options.operationType}", "AI_SERVICE")
logger.info(f"Calling aiObjects.callImage with operationType: {options.operationType}") logger.info(f"Calling aiObjects.callImage with operationType: {options.operationType}")
result = await self.aiObjects.callImage(prompt, imageData, mimeType, options) result = await self.aiObjects.callImage(prompt, imageData, mimeType, options)
print(f"🔍 callImage returned: {result[:200]}..." if len(result) > 200 else result)
# Debug the result
self.services.utils.debugLogToFile(f"Raw AI result type: {type(result)}, value: {repr(result)}", "AI_SERVICE")
# Check if result is valid
if not result or (isinstance(result, str) and not result.strip()):
error_msg = f"No response from AI image analysis (result: {repr(result)})"
self.services.utils.debugLogToFile(f"Error in AI image analysis: {error_msg}", "AI_SERVICE")
logger.error(f"Error in AI image analysis: {error_msg}")
return f"Error: {error_msg}"
self.services.utils.debugLogToFile(f"callImage returned: {result[:200]}..." if len(result) > 200 else result, "AI_SERVICE")
logger.info(f"callImage returned: {result[:200]}..." if len(result) > 200 else result) logger.info(f"callImage returned: {result[:200]}..." if len(result) > 200 else result)
return result return result
except Exception as e: except Exception as e:
print(f"🔍 Error in AI image analysis: {str(e)}") self.services.utils.debugLogToFile(f"Error in AI image analysis: {str(e)}", "AI_SERVICE")
logger.error(f"Error in AI image analysis: {str(e)}") logger.error(f"Error in AI image analysis: {str(e)}")
return f"Error: {str(e)}" return f"Error: {str(e)}"
@ -562,7 +544,7 @@ Return only the JSON structure with actual content from the image. Do not includ
}, },
} }
logger.debug(f"Per-chunk extraction options: {extractionOptions}") logger.debug(f"Per-chunk extraction options: prompt length={len(extractionOptions.get('prompt', ''))} chars, operationType={extractionOptions.get('operationType')}")
try: try:
# Extract content with chunking # Extract content with chunking
@ -620,7 +602,7 @@ Return only the JSON structure with actual content from the image. Do not includ
}, },
} }
logger.debug(f"Per-chunk extraction options (JSON mode): {extractionOptions}") logger.debug(f"Per-chunk extraction options (JSON mode): prompt length={len(extractionOptions.get('prompt', ''))} chars, operationType={extractionOptions.get('operationType')}")
try: try:
# Extract content with chunking # Extract content with chunking
@ -695,17 +677,37 @@ Return only the JSON structure with actual content from the image. Do not includ
) )
# Debug logging # Debug logging
print(f"🔍 Chunk {chunk_index}: document_mime_type={document_mime_type}, part.mimeType={part.mimeType}, part.typeGroup={part.typeGroup}, is_image={is_image}") self.services.utils.debugLogToFile(f"Chunk {chunk_index}: document_mime_type={document_mime_type}, part.mimeType={part.mimeType}, part.typeGroup={part.typeGroup}, is_image={is_image}", "AI_SERVICE")
logger.info(f"Chunk {chunk_index}: document_mime_type={document_mime_type}, part.mimeType={part.mimeType}, part.typeGroup={part.typeGroup}, is_image={is_image}") logger.info(f"Chunk {chunk_index}: document_mime_type={document_mime_type}, part.mimeType={part.mimeType}, part.typeGroup={part.typeGroup}, is_image={is_image}")
if is_image: if is_image:
# Use the same extraction prompt for image analysis (contains table JSON format) # Use the same extraction prompt for image analysis (contains table JSON format)
ai_result = await self.readImage( self.services.utils.debugLogToFile(f"Processing image chunk {chunk_index}: mimeType={part.mimeType}, data_length={len(part.data) if part.data else 0}", "AI_SERVICE")
prompt=prompt,
imageData=part.data, # Check if image data is available
mimeType=part.mimeType, if not part.data:
options=options error_msg = f"No image data available for chunk {chunk_index}"
) logger.warning(error_msg)
ai_result = f"Error: {error_msg}"
else:
try:
ai_result = await self.readImage(
prompt=prompt,
imageData=part.data,
mimeType=part.mimeType,
options=options
)
self.services.utils.debugLogToFile(f"Image analysis result for chunk {chunk_index}: length={len(ai_result) if ai_result else 0}, preview={ai_result[:200] if ai_result else 'None'}...", "AI_SERVICE")
# Check if result is empty or None
if not ai_result or not ai_result.strip():
logger.warning(f"Image chunk {chunk_index} returned empty response from AI")
ai_result = "No content detected in image"
except Exception as e:
logger.error(f"Error processing image chunk {chunk_index}: {str(e)}")
ai_result = f"Error analyzing image: {str(e)}"
# If generating JSON, clean image analysis result # If generating JSON, clean image analysis result
if generate_json: if generate_json:
@ -715,43 +717,63 @@ Return only the JSON structure with actual content from the image. Do not includ
# Clean the response - remove markdown code blocks if present # Clean the response - remove markdown code blocks if present
cleaned_result = ai_result.strip() cleaned_result = ai_result.strip()
# Remove various markdown patterns
if cleaned_result.startswith('```json'): if cleaned_result.startswith('```json'):
# Remove ```json from start and ``` from end
cleaned_result = re.sub(r'^```json\s*', '', cleaned_result) cleaned_result = re.sub(r'^```json\s*', '', cleaned_result)
cleaned_result = re.sub(r'\s*```$', '', cleaned_result) cleaned_result = re.sub(r'\s*```$', '', cleaned_result)
elif cleaned_result.startswith('```'): elif cleaned_result.startswith('```'):
# Remove ``` from start and end
cleaned_result = re.sub(r'^```\s*', '', cleaned_result) cleaned_result = re.sub(r'^```\s*', '', cleaned_result)
cleaned_result = re.sub(r'\s*```$', '', cleaned_result) cleaned_result = re.sub(r'\s*```$', '', cleaned_result)
# Remove any leading/trailing text that's not JSON
# Look for the first { and last } to extract JSON
first_brace = cleaned_result.find('{')
last_brace = cleaned_result.rfind('}')
if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
cleaned_result = cleaned_result[first_brace:last_brace + 1]
# Additional cleaning for common AI response issues
cleaned_result = cleaned_result.strip()
# Validate JSON # Validate JSON
json.loads(cleaned_result) json.loads(cleaned_result)
ai_result = cleaned_result # Use cleaned version ai_result = cleaned_result # Use cleaned version
self.services.utils.debugLogToFile(f"Image chunk {chunk_index} JSON validation successful", "AI_SERVICE")
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
logger.warning(f"Image chunk {chunk_index} returned invalid JSON: {str(e)}") logger.warning(f"Image chunk {chunk_index} returned invalid JSON: {str(e)}")
# Create fallback JSON logger.warning(f"Raw response was: '{ai_result[:500]}...'")
# Create fallback JSON with the actual response content (not the error message)
# Use the original AI response content, not the error message
fallback_content = ai_result if ai_result and ai_result.strip() else "No content detected"
self.services.utils.debugLogToFile(f"IMAGE FALLBACK CONTENT PREVIEW: '{fallback_content[:200]}...'", "AI_SERVICE")
ai_result = json.dumps({ ai_result = json.dumps({
"metadata": {"title": "Error Section"}, "metadata": {"title": f"Image Analysis - Chunk {chunk_index}"},
"sections": [{ "sections": [{
"id": f"error_section_{chunk_index}", "id": f"image_section_{chunk_index}",
"type": "paragraph", "type": "paragraph",
"data": {"text": f"Error parsing JSON: {str(e)}"} "data": {"text": fallback_content}
}] }]
}) })
self.services.utils.debugLogToFile(f"Created fallback JSON for image chunk {chunk_index} with actual content", "AI_SERVICE")
elif part.typeGroup in ("container", "binary"): elif part.typeGroup in ("container", "binary"):
# Handle ALL container and binary content generically - let AI process any document type # Handle ALL container and binary content generically - let AI process any document type
print(f"🔍 DEBUG: Chunk {chunk_index}: typeGroup={part.typeGroup}, mimeType={part.mimeType}, data_length={len(part.data) if part.data else 0}") self.services.utils.debugLogToFile(f"DEBUG: Chunk {chunk_index}: typeGroup={part.typeGroup}, mimeType={part.mimeType}, data_length={len(part.data) if part.data else 0}", "AI_SERVICE")
if part.mimeType and part.data and len(part.data.strip()) > 0: if part.mimeType and part.data and len(part.data.strip()) > 0:
# Process any document container as text content # Process any document container as text content
request_options = options if options is not None else AiCallOptions() request_options = options if options is not None else AiCallOptions()
request_options.operationType = OperationType.GENERAL request_options.operationType = OperationType.GENERAL
print(f"🔍 EXTRACTION CONTAINER CHUNK {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}") self.services.utils.debugLogToFile(f"EXTRACTION CONTAINER CHUNK {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}", "AI_SERVICE")
logger.info(f"Chunk {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}") logger.info(f"Chunk {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}")
# Log extraction prompt and context # Log extraction prompt and context
print(f"🔍 EXTRACTION PROMPT: {prompt}") self.services.utils.debugLogToFile(f"EXTRACTION PROMPT: {prompt}", "AI_SERVICE")
print(f"🔍 EXTRACTION CONTEXT LENGTH: {len(part.data) if part.data else 0} characters") self.services.utils.debugLogToFile(f"EXTRACTION CONTEXT LENGTH: {len(part.data) if part.data else 0} characters", "AI_SERVICE")
request = AiCallRequest( request = AiCallRequest(
prompt=prompt, prompt=prompt,
@ -762,7 +784,7 @@ Return only the JSON structure with actual content from the image. Do not includ
ai_result = response.content ai_result = response.content
# Log extraction response # Log extraction response
print(f"🔍 EXTRACTION RESPONSE LENGTH: {len(ai_result) if ai_result else 0} characters") self.services.utils.debugLogToFile(f"EXTRACTION RESPONSE LENGTH: {len(ai_result) if ai_result else 0} characters", "AI_SERVICE")
# Save full extraction prompt and response to debug file # Save full extraction prompt and response to debug file
try: try:
@ -786,33 +808,52 @@ Return only the JSON structure with actual content from the image. Do not includ
# Clean the response - remove markdown code blocks if present # Clean the response - remove markdown code blocks if present
cleaned_result = ai_result.strip() cleaned_result = ai_result.strip()
# Remove various markdown patterns
if cleaned_result.startswith('```json'): if cleaned_result.startswith('```json'):
# Remove ```json from start and ``` from end
cleaned_result = re.sub(r'^```json\s*', '', cleaned_result) cleaned_result = re.sub(r'^```json\s*', '', cleaned_result)
cleaned_result = re.sub(r'\s*```$', '', cleaned_result) cleaned_result = re.sub(r'\s*```$', '', cleaned_result)
elif cleaned_result.startswith('```'): elif cleaned_result.startswith('```'):
# Remove ``` from start and end
cleaned_result = re.sub(r'^```\s*', '', cleaned_result) cleaned_result = re.sub(r'^```\s*', '', cleaned_result)
cleaned_result = re.sub(r'\s*```$', '', cleaned_result) cleaned_result = re.sub(r'\s*```$', '', cleaned_result)
# Remove any leading/trailing text that's not JSON
# Look for the first { and last } to extract JSON
first_brace = cleaned_result.find('{')
last_brace = cleaned_result.rfind('}')
if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
cleaned_result = cleaned_result[first_brace:last_brace + 1]
# Additional cleaning for common AI response issues
cleaned_result = cleaned_result.strip()
# Validate JSON # Validate JSON
json.loads(cleaned_result) json.loads(cleaned_result)
ai_result = cleaned_result # Use cleaned version ai_result = cleaned_result # Use cleaned version
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
logger.warning(f"Container chunk {chunk_index} ({part.mimeType}) returned invalid JSON: {str(e)}") logger.warning(f"Container chunk {chunk_index} ({part.mimeType}) returned invalid JSON: {str(e)}")
# Create fallback JSON logger.warning(f"Raw response was: '{ai_result[:500]}...'")
# Create fallback JSON with the actual response content (not the error message)
# Use the original AI response content, not the error message
fallback_content = ai_result if ai_result and ai_result.strip() else "No content detected"
self.services.utils.debugLogToFile(f"FALLBACK CONTENT PREVIEW: '{fallback_content[:200]}...'", "AI_SERVICE")
ai_result = json.dumps({ ai_result = json.dumps({
"metadata": {"title": "Error Section"}, "metadata": {"title": f"Document Analysis - Chunk {chunk_index}"},
"sections": [{ "sections": [{
"id": f"error_section_{chunk_index}", "id": f"analysis_section_{chunk_index}",
"type": "paragraph", "type": "paragraph",
"data": {"text": f"Error parsing JSON: {str(e)}"} "data": {"text": fallback_content}
}] }]
}) })
self.services.utils.debugLogToFile(f"Created fallback JSON for container chunk {chunk_index} with actual content", "AI_SERVICE")
else: else:
# Skip empty or invalid container/binary content - don't create a result # Skip empty or invalid container/binary content - don't create a result
print(f"🔍 DEBUG: Chunk {chunk_index}: Skipping empty container - mimeType={part.mimeType}, data_length={len(part.data) if part.data else 0}") self.services.utils.debugLogToFile(f"DEBUG: Chunk {chunk_index}: Skipping empty container - mimeType={part.mimeType}, data_length={len(part.data) if part.data else 0}", "AI_SERVICE")
# Return None to indicate this chunk should be completely skipped # Return None to indicate this chunk should be completely skipped
return None return None
else: else:
@ -820,12 +861,11 @@ Return only the JSON structure with actual content from the image. Do not includ
request_options = options if options is not None else AiCallOptions() request_options = options if options is not None else AiCallOptions()
# FIXED: Set operation type to general for text processing # FIXED: Set operation type to general for text processing
request_options.operationType = OperationType.GENERAL request_options.operationType = OperationType.GENERAL
print(f"🔍 EXTRACTION CHUNK {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}") self.services.utils.debugLogToFile(f"EXTRACTION CHUNK {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}", "AI_SERVICE")
logger.info(f"Chunk {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}") logger.info(f"Chunk {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}")
# Log extraction prompt and context # Log extraction context length
print(f"🔍 EXTRACTION PROMPT: {prompt}") self.services.utils.debugLogToFile(f"EXTRACTION CONTEXT LENGTH: {len(part.data) if part.data else 0} characters", "AI_SERVICE")
print(f"🔍 EXTRACTION CONTEXT LENGTH: {len(part.data) if part.data else 0} characters")
request = AiCallRequest( request = AiCallRequest(
prompt=prompt, prompt=prompt,
@ -835,10 +875,10 @@ Return only the JSON structure with actual content from the image. Do not includ
response = await self.aiObjects.call(request) response = await self.aiObjects.call(request)
ai_result = response.content ai_result = response.content
# Log extraction response # Log extraction response length
print(f"🔍 EXTRACTION RESPONSE LENGTH: {len(ai_result) if ai_result else 0} characters") self.services.utils.debugLogToFile(f"EXTRACTION RESPONSE LENGTH: {len(ai_result) if ai_result else 0} characters", "AI_SERVICE")
# Save full extraction prompt and response to debug file # Save extraction response to debug file (without verbose prompt)
try: try:
import os import os
from datetime import datetime, UTC from datetime import datetime, UTC
@ -846,8 +886,6 @@ Return only the JSON structure with actual content from the image. Do not includ
debug_root = "./test-chat/ai" debug_root = "./test-chat/ai"
os.makedirs(debug_root, exist_ok=True) os.makedirs(debug_root, exist_ok=True)
with open(os.path.join(debug_root, f"{ts}_extraction_chunk_{chunk_index}.txt"), "w", encoding="utf-8") as f: with open(os.path.join(debug_root, f"{ts}_extraction_chunk_{chunk_index}.txt"), "w", encoding="utf-8") as f:
f.write(f"EXTRACTION PROMPT:\n{prompt}\n\n")
f.write(f"EXTRACTION CONTEXT:\n{part.data if part.data else 'No context'}\n\n")
f.write(f"EXTRACTION RESPONSE:\n{ai_result if ai_result else 'No response'}\n") f.write(f"EXTRACTION RESPONSE:\n{ai_result if ai_result else 'No response'}\n")
except Exception: except Exception:
pass pass
@ -929,9 +967,9 @@ Return only the JSON structure with actual content from the image. Do not includ
max_concurrent = options.maxParallelChunks max_concurrent = options.maxParallelChunks
logger.info(f"Processing {len(chunks_to_process)} chunks with max concurrency: {max_concurrent}") logger.info(f"Processing {len(chunks_to_process)} chunks with max concurrency: {max_concurrent}")
print(f"🔍 DEBUG: Chunks to process: {len(chunks_to_process)}") self.services.utils.debugLogToFile(f"DEBUG: Chunks to process: {len(chunks_to_process)}", "AI_SERVICE")
for i, chunk_info in enumerate(chunks_to_process): for i, chunk_info in enumerate(chunks_to_process):
print(f"🔍 DEBUG: Chunk {i}: typeGroup={chunk_info['part'].typeGroup}, mimeType={chunk_info['part'].mimeType}, data_length={len(chunk_info['part'].data) if chunk_info['part'].data else 0}") self.services.utils.debugLogToFile(f"DEBUG: Chunk {i}: typeGroup={chunk_info['part'].typeGroup}, mimeType={chunk_info['part'].mimeType}, data_length={len(chunk_info['part'].data) if chunk_info['part'].data else 0}", "AI_SERVICE")
# Create semaphore for concurrency control # Create semaphore for concurrency control
semaphore = asyncio.Semaphore(max_concurrent) semaphore = asyncio.Semaphore(max_concurrent)
@ -942,9 +980,9 @@ Return only the JSON structure with actual content from the image. Do not includ
# Process all chunks in parallel with concurrency control # Process all chunks in parallel with concurrency control
tasks = [process_with_semaphore(chunk_info) for chunk_info in chunks_to_process] tasks = [process_with_semaphore(chunk_info) for chunk_info in chunks_to_process]
print(f"🔍 DEBUG: Created {len(tasks)} tasks for parallel processing") self.services.utils.debugLogToFile(f"DEBUG: Created {len(tasks)} tasks for parallel processing", "AI_SERVICE")
chunk_results = await asyncio.gather(*tasks, return_exceptions=True) chunk_results = await asyncio.gather(*tasks, return_exceptions=True)
print(f"🔍 DEBUG: Got {len(chunk_results)} results from parallel processing") self.services.utils.debugLogToFile(f"DEBUG: Got {len(chunk_results)} results from parallel processing", "AI_SERVICE")
# Handle any exceptions in the gather itself # Handle any exceptions in the gather itself
processed_results = [] processed_results = []
@ -1626,8 +1664,7 @@ Return only the JSON structure with actual content from the image. Do not includ
# Get log directory from configuration via service center if possible # Get log directory from configuration via service center if possible
logDir = None logDir = None
try: try:
if self.serviceCenter and hasattr(self.serviceCenter, 'utils'): logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
logDir = self.serviceCenter.utils.configGet("APP_LOGGING_LOG_DIR", "./")
except Exception: except Exception:
pass pass
if not logDir: if not logDir:
@ -1800,7 +1837,7 @@ Return only the JSON structure with actual content from the image. Do not includ
try: try:
# Get format-specific extraction prompt from generation service # Get format-specific extraction prompt from generation service
from modules.services.serviceGeneration.mainServiceGeneration import GenerationService from modules.services.serviceGeneration.mainServiceGeneration import GenerationService
generation_service = GenerationService(self.serviceCenter) generation_service = GenerationService(self.services)
# Use default title if not provided # Use default title if not provided
if not title: if not title:

View file

@ -1,10 +1,13 @@
from typing import Any, Dict, List from typing import Any, Dict, List
import base64 import base64
import logging
from ..subUtils import makeId from ..subUtils import makeId
from modules.datamodels.datamodelExtraction import ContentPart from modules.datamodels.datamodelExtraction import ContentPart
from ..subRegistry import Extractor from ..subRegistry import Extractor
logger = logging.getLogger(__name__)
class ImageExtractor(Extractor): class ImageExtractor(Extractor):
def detect(self, fileName: str, mimeType: str, headBytes: bytes) -> bool: def detect(self, fileName: str, mimeType: str, headBytes: bytes) -> bool:
@ -12,6 +15,35 @@ class ImageExtractor(Extractor):
def extract(self, fileBytes: bytes, context: Dict[str, Any]) -> List[ContentPart]: def extract(self, fileBytes: bytes, context: Dict[str, Any]) -> List[ContentPart]:
mimeType = context.get("mimeType") or "image/unknown" mimeType = context.get("mimeType") or "image/unknown"
fileName = context.get("fileName", "")
# Convert GIF to PNG during extraction
if mimeType.lower() == "image/gif":
try:
from PIL import Image
import io
# Open GIF and convert to PNG
with Image.open(io.BytesIO(fileBytes)) as img:
# Convert to RGB (removes animation)
if img.mode in ('RGBA', 'LA', 'P'):
img = img.convert('RGB')
# Save as PNG in memory
png_buffer = io.BytesIO()
img.save(png_buffer, format='PNG')
png_data = png_buffer.getvalue()
# Update mimeType and fileBytes
mimeType = "image/png"
fileBytes = png_data
logger.info(f"GIF converted to PNG during extraction: {fileName}, original={len(fileBytes)} bytes, converted={len(png_data)} bytes")
except Exception as e:
logger.warning(f"GIF conversion failed during extraction for {fileName}: {str(e)}, using original")
# Keep original GIF data if conversion fails
return [ContentPart( return [ContentPart(
id=makeId(), id=makeId(),
parentId=None, parentId=None,

View file

@ -85,7 +85,7 @@ def runExtraction(extractorRegistry: ExtractorRegistry, chunkerRegistry: Chunker
chunk_parts = [p for p in parts if p.metadata.get("chunk", False)] chunk_parts = [p for p in parts if p.metadata.get("chunk", False)]
logger.debug(f"runExtraction: Preserving {len(chunk_parts)} chunks from merging") logger.debug(f"runExtraction: Preserving {len(chunk_parts)} chunks from merging")
print(f"🔍 DEBUG: runExtraction - non_chunk_parts: {len(non_chunk_parts)}, chunk_parts: {len(chunk_parts)}") logger.debug(f"runExtraction - non_chunk_parts: {len(non_chunk_parts)}, chunk_parts: {len(chunk_parts)}")
# Apply intelligent merging for small text parts # Apply intelligent merging for small text parts
if non_chunk_parts: if non_chunk_parts:
@ -99,7 +99,7 @@ def runExtraction(extractorRegistry: ExtractorRegistry, chunkerRegistry: Chunker
parts = non_chunk_parts + chunk_parts parts = non_chunk_parts + chunk_parts
logger.debug(f"runExtraction: Final parts after merging: {len(parts)} (chunks: {len(chunk_parts)})") logger.debug(f"runExtraction: Final parts after merging: {len(parts)} (chunks: {len(chunk_parts)})")
print(f"🔍 DEBUG: runExtraction - Final parts: {len(parts)} (chunks: {len(chunk_parts)})") logger.debug(f"runExtraction - Final parts: {len(parts)} (chunks: {len(chunk_parts)})")
# DEBUG: dump parts and chunks to files TODO TO REMOVE # DEBUG: dump parts and chunks to files TODO TO REMOVE
try: try:
base_dir = "./test-chat/ai" base_dir = "./test-chat/ai"
@ -154,22 +154,22 @@ def poolAndLimit(parts: List[ContentPart], chunkerRegistry: ChunkerRegistry, opt
kept: List[ContentPart] = [] kept: List[ContentPart] = []
remaining: List[ContentPart] = [] remaining: List[ContentPart] = []
print(f"🔍 DEBUG: Starting poolAndLimit with {len(parts)} parts, maxSize={maxSize}") logger.debug(f"Starting poolAndLimit with {len(parts)} parts, maxSize={maxSize}")
for i, p in enumerate(parts): for i, p in enumerate(parts):
size = int(p.metadata.get("size", 0) or 0) size = int(p.metadata.get("size", 0) or 0)
# Show first 50 characters of text content for debugging # Show first 50 characters of text content for debugging
content_preview = p.data[:50].replace('\n', '\\n') if p.data else "" content_preview = p.data[:50].replace('\n', '\\n') if p.data else ""
print(f"🔍 DEBUG: Part {i}: {p.typeGroup} - {size} bytes - '{content_preview}...' (current: {current})") logger.debug(f"Part {i}: {p.typeGroup} - {size} bytes - '{content_preview}...' (current: {current})")
if current + size <= maxSize: if current + size <= maxSize:
kept.append(p) kept.append(p)
current += size current += size
print(f"🔍 DEBUG: Part {i} kept (total: {current})") logger.debug(f"Part {i} kept (total: {current})")
else: else:
remaining.append(p) remaining.append(p)
print(f"🔍 DEBUG: Part {i} moved to remaining") logger.debug(f"Part {i} moved to remaining")
print(f"🔍 DEBUG: Kept: {len(kept)}, Remaining: {len(remaining)}") logger.debug(f"Kept: {len(kept)}, Remaining: {len(remaining)}")
# If we have remaining parts and chunking is allowed, try chunking # If we have remaining parts and chunking is allowed, try chunking
if remaining and chunkAllowed: if remaining and chunkAllowed:
@ -177,15 +177,15 @@ def poolAndLimit(parts: List[ContentPart], chunkerRegistry: ChunkerRegistry, opt
logger.debug(f"Remaining parts to chunk: {len(remaining)}") logger.debug(f"Remaining parts to chunk: {len(remaining)}")
logger.debug(f"Max size limit: {maxSize} bytes") logger.debug(f"Max size limit: {maxSize} bytes")
logger.debug(f"Current size used: {current} bytes") logger.debug(f"Current size used: {current} bytes")
print(f"🔍 DEBUG: Chunking {len(remaining)} remaining parts") logger.debug(f"Chunking {len(remaining)} remaining parts")
for p in remaining: for p in remaining:
if p.typeGroup in ("text", "table", "structure", "image", "container", "binary"): if p.typeGroup in ("text", "table", "structure", "image", "container", "binary"):
logger.debug(f"Chunking {p.typeGroup} part: {len(p.data)} chars") logger.debug(f"Chunking {p.typeGroup} part: {len(p.data)} chars")
print(f"🔍 DEBUG: Chunking {p.typeGroup} part with {len(p.data)} chars") logger.debug(f"Chunking {p.typeGroup} part with {len(p.data)} chars")
chunks = chunkerRegistry.resolve(p.typeGroup).chunk(p, options) chunks = chunkerRegistry.resolve(p.typeGroup).chunk(p, options)
logger.debug(f"Created {len(chunks)} chunks") logger.debug(f"Created {len(chunks)} chunks")
print(f"🔍 DEBUG: Created {len(chunks)} chunks") logger.debug(f"Created {len(chunks)} chunks")
chunks_added = 0 chunks_added = 0
for ch in chunks: for ch in chunks:
@ -229,7 +229,7 @@ def poolAndLimit(parts: List[ContentPart], chunkerRegistry: ChunkerRegistry, opt
kept = non_chunk_parts + chunk_parts kept = non_chunk_parts + chunk_parts
logger.debug(f"Final parts after merging: {len(kept)} (chunks: {len(chunk_parts)})") logger.debug(f"Final parts after merging: {len(kept)} (chunks: {len(chunk_parts)})")
print(f"🔍 DEBUG: Final parts after merging: {len(kept)} (chunks: {len(chunk_parts)})") logger.debug(f"Final parts after merging: {len(kept)} (chunks: {len(chunk_parts)})")
# Re-check size after merging # Re-check size after merging
totalSize = sum(int(p.metadata.get("size", 0) or 0) for p in kept) totalSize = sum(int(p.metadata.get("size", 0) or 0) for p in kept)
@ -237,13 +237,13 @@ def poolAndLimit(parts: List[ContentPart], chunkerRegistry: ChunkerRegistry, opt
# Apply size limit to merged parts # Apply size limit to merged parts
kept = _applySizeLimit(kept, maxSize) kept = _applySizeLimit(kept, maxSize)
print(f"🔍 DEBUG: poolAndLimit returning {len(kept)} parts") logger.debug(f"poolAndLimit returning {len(kept)} parts")
return kept return kept
def _applyMerging(parts: List[ContentPart], strategy: Dict[str, Any]) -> List[ContentPart]: def _applyMerging(parts: List[ContentPart], strategy: Dict[str, Any]) -> List[ContentPart]:
"""Apply merging strategy to parts with intelligent token-aware merging.""" """Apply merging strategy to parts with intelligent token-aware merging."""
print(f"🔍 DEBUG: _applyMerging called with {len(parts)} parts") logger.debug(f"_applyMerging called with {len(parts)} parts")
# Check if intelligent merging is enabled # Check if intelligent merging is enabled
if strategy.get("useIntelligentMerging", False): if strategy.get("useIntelligentMerging", False):
@ -256,7 +256,7 @@ def _applyMerging(parts: List[ContentPart], strategy: Dict[str, Any]) -> List[Co
# Calculate and log optimization stats # Calculate and log optimization stats
stats = subMerger.calculate_optimization_stats(parts, merged) stats = subMerger.calculate_optimization_stats(parts, merged)
logger.info(f"🧠 Intelligent merging stats: {stats}") logger.info(f"🧠 Intelligent merging stats: {stats}")
print(f"🔍 DEBUG: Intelligent merging: {stats['original_ai_calls']}{stats['optimized_ai_calls']} calls ({stats['reduction_percent']}% reduction)") logger.debug(f"Intelligent merging: {stats['original_ai_calls']}{stats['optimized_ai_calls']} calls ({stats['reduction_percent']}% reduction)")
return merged return merged
@ -271,29 +271,29 @@ def _applyMerging(parts: List[ContentPart], strategy: Dict[str, Any]) -> List[Co
structureParts = [p for p in parts if p.typeGroup == "structure"] structureParts = [p for p in parts if p.typeGroup == "structure"]
otherParts = [p for p in parts if p.typeGroup not in ("text", "table", "structure")] otherParts = [p for p in parts if p.typeGroup not in ("text", "table", "structure")]
print(f"🔍 DEBUG: Grouped - text: {len(textParts)}, table: {len(tableParts)}, structure: {len(structureParts)}, other: {len(otherParts)}") logger.debug(f"Grouped - text: {len(textParts)}, table: {len(tableParts)}, structure: {len(structureParts)}, other: {len(otherParts)}")
merged: List[ContentPart] = [] merged: List[ContentPart] = []
if textParts: if textParts:
textMerged = textMerger.merge(textParts, strategy) textMerged = textMerger.merge(textParts, strategy)
print(f"🔍 DEBUG: TextMerger merged {len(textParts)} parts into {len(textMerged)} parts") logger.debug(f"TextMerger merged {len(textParts)} parts into {len(textMerged)} parts")
merged.extend(textMerged) merged.extend(textMerged)
if tableParts: if tableParts:
tableMerged = tableMerger.merge(tableParts, strategy) tableMerged = tableMerger.merge(tableParts, strategy)
print(f"🔍 DEBUG: TableMerger merged {len(tableParts)} parts into {len(tableMerged)} parts") logger.debug(f"TableMerger merged {len(tableParts)} parts into {len(tableMerged)} parts")
merged.extend(tableMerged) merged.extend(tableMerged)
if structureParts: if structureParts:
# For now, treat structure like text # For now, treat structure like text
structureMerged = textMerger.merge(structureParts, strategy) structureMerged = textMerger.merge(structureParts, strategy)
print(f"🔍 DEBUG: StructureMerger merged {len(structureParts)} parts into {len(structureMerged)} parts") logger.debug(f"StructureMerger merged {len(structureParts)} parts into {len(structureMerged)} parts")
merged.extend(structureMerged) merged.extend(structureMerged)
if otherParts: if otherParts:
otherMerged = defaultMerger.merge(otherParts, strategy) otherMerged = defaultMerger.merge(otherParts, strategy)
print(f"🔍 DEBUG: DefaultMerger merged {len(otherParts)} parts into {len(otherMerged)} parts") logger.debug(f"DefaultMerger merged {len(otherParts)} parts into {len(otherMerged)} parts")
merged.extend(otherMerged) merged.extend(otherMerged)
print(f"🔍 DEBUG: _applyMerging returning {len(merged)} parts") logger.debug(f"_applyMerging returning {len(merged)} parts")
return merged return merged

View file

@ -1,7 +1,10 @@
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
import logging
from modules.datamodels.datamodelExtraction import ContentPart from modules.datamodels.datamodelExtraction import ContentPart
logger = logging.getLogger(__name__)
class Extractor: class Extractor:
def detect(self, fileName: str, mimeType: str, headBytes: bytes) -> bool: def detect(self, fileName: str, mimeType: str, headBytes: bytes) -> bool:
@ -64,9 +67,9 @@ class ExtractorRegistry:
self.register("ppt", PptxExtractor()) self.register("ppt", PptxExtractor())
# fallback # fallback
self.setFallback(BinaryExtractor()) self.setFallback(BinaryExtractor())
print(f"ExtractorRegistry: Successfully registered {len(self._map)} extractors") logger.info(f"ExtractorRegistry: Successfully registered {len(self._map)} extractors")
except Exception as e: except Exception as e:
print(f"ExtractorRegistry: Failed to register extractors: {str(e)}") logger.error(f"ExtractorRegistry: Failed to register extractors: {str(e)}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
@ -105,7 +108,7 @@ class ChunkerRegistry:
self.register("container", TextChunker()) self.register("container", TextChunker())
self.register("binary", TextChunker()) self.register("binary", TextChunker())
except Exception as e: except Exception as e:
print(f"ChunkerRegistry: Failed to register chunkers: {str(e)}") logger.error(f"ChunkerRegistry: Failed to register chunkers: {str(e)}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()

View file

@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
class GenerationService: class GenerationService:
def __init__(self, serviceCenter=None): def __init__(self, serviceCenter=None):
# Directly use interfaces from the provided service center (no self.service calls) # Directly use interfaces from the provided service center (no self.service calls)
self.serviceCenter = serviceCenter self.services = serviceCenter
self.interfaceDbComponent = getattr(serviceCenter, 'interfaceDbComponent', None) if serviceCenter else None self.interfaceDbComponent = getattr(serviceCenter, 'interfaceDbComponent', None) if serviceCenter else None
self.interfaceDbChat = getattr(serviceCenter, 'interfaceDbChat', None) if serviceCenter else None self.interfaceDbChat = getattr(serviceCenter, 'interfaceDbChat', None) if serviceCenter else None
self.workflow = getattr(serviceCenter, 'workflow', None) if serviceCenter else None self.workflow = getattr(serviceCenter, 'workflow', None) if serviceCenter else None
@ -346,7 +346,8 @@ class GenerationService:
outputFormat=outputFormat, outputFormat=outputFormat,
userPrompt=userPrompt, userPrompt=userPrompt,
title=title, title=title,
aiService=aiService aiService=aiService,
services=self.services
) )
except Exception as e: except Exception as e:
logger.warning(f"Failed to generate AI-based generation prompt: {str(e)}, using user prompt") logger.warning(f"Failed to generate AI-based generation prompt: {str(e)}, using user prompt")
@ -395,7 +396,8 @@ class GenerationService:
renderer=renderer, renderer=renderer,
userPrompt=userPrompt, userPrompt=userPrompt,
title=title, title=title,
aiService=aiService aiService=aiService,
services=self.services
) )
logger.info(f"Generated {outputFormat}-specific extraction prompt: {len(extractionPrompt)} characters") logger.info(f"Generated {outputFormat}-specific extraction prompt: {len(extractionPrompt)} characters")
@ -409,14 +411,14 @@ class GenerationService:
"""Get the appropriate renderer for the specified format using auto-discovery.""" """Get the appropriate renderer for the specified format using auto-discovery."""
try: try:
from .renderers.registry import get_renderer from .renderers.registry import get_renderer
renderer = get_renderer(output_format) renderer = get_renderer(output_format, services=self.services)
if renderer: if renderer:
return renderer return renderer
# Fallback to text renderer if no specific renderer found # Fallback to text renderer if no specific renderer found
logger.warning(f"No renderer found for format {output_format}, falling back to text") logger.warning(f"No renderer found for format {output_format}, falling back to text")
fallback_renderer = get_renderer('text') fallback_renderer = get_renderer('text', services=self.services)
if fallback_renderer: if fallback_renderer:
return fallback_renderer return fallback_renderer

View file

@ -92,7 +92,7 @@ class RendererRegistry:
except Exception as e: except Exception as e:
logger.error(f"Error registering renderer {renderer_class.__name__}: {str(e)}") logger.error(f"Error registering renderer {renderer_class.__name__}: {str(e)}")
def get_renderer(self, output_format: str) -> Optional[BaseRenderer]: def get_renderer(self, output_format: str, services=None) -> Optional[BaseRenderer]:
"""Get a renderer instance for the specified format.""" """Get a renderer instance for the specified format."""
if not self._discovered: if not self._discovered:
self.discover_renderers() self.discover_renderers()
@ -109,7 +109,7 @@ class RendererRegistry:
if renderer_class: if renderer_class:
try: try:
return renderer_class() return renderer_class(services=services)
except Exception as e: except Exception as e:
logger.error(f"Error creating renderer instance for {format_name}: {str(e)}") logger.error(f"Error creating renderer instance for {format_name}: {str(e)}")
return None return None
@ -144,9 +144,9 @@ class RendererRegistry:
# Global registry instance # Global registry instance
_registry = RendererRegistry() _registry = RendererRegistry()
def get_renderer(output_format: str) -> Optional[BaseRenderer]: def get_renderer(output_format: str, services=None) -> Optional[BaseRenderer]:
"""Get a renderer instance for the specified format.""" """Get a renderer instance for the specified format."""
return _registry.get_renderer(output_format) return _registry.get_renderer(output_format, services)
def get_supported_formats() -> List[str]: def get_supported_formats() -> List[str]:
"""Get list of all supported formats.""" """Get list of all supported formats."""

View file

@ -12,8 +12,9 @@ logger = logging.getLogger(__name__)
class BaseRenderer(ABC): class BaseRenderer(ABC):
"""Base class for all format renderers.""" """Base class for all format renderers."""
def __init__(self): def __init__(self, services=None):
self.logger = logger self.logger = logger
self.services = services # Add services attribute
@classmethod @classmethod
def get_supported_formats(cls) -> List[str]: def get_supported_formats(cls) -> List[str]:
@ -313,7 +314,6 @@ class BaseRenderer(ABC):
Dict with styling definitions Dict with styling definitions
""" """
# DEBUG: Show which renderer is calling this method # DEBUG: Show which renderer is calling this method
print(f"🔍 BASE TEMPLATE _get_ai_styles called by: {self.__class__.__name__}")
if not ai_service: if not ai_service:
return default_styles return default_styles
@ -361,11 +361,8 @@ class BaseRenderer(ABC):
self.logger.warning(f"AI styling returned invalid JSON: {json_error}") self.logger.warning(f"AI styling returned invalid JSON: {json_error}")
# Use print instead of logger to avoid truncation # Use print instead of logger to avoid truncation
print(f"🔍 FULL AI RESPONSE THAT FAILED TO PARSE:") self.services.utils.debugLogToFile(f"FULL AI RESPONSE THAT FAILED TO PARSE: {result}", "RENDERER")
print("=" * 100) self.services.utils.debugLogToFile(f"RESPONSE LENGTH: {len(result)} characters", "RENDERER")
print(result)
print("=" * 100)
print(f"🔍 RESPONSE LENGTH: {len(result)} characters")
self.logger.warning(f"Raw content that failed to parse: {result}") self.logger.warning(f"Raw content that failed to parse: {result}")
@ -446,10 +443,6 @@ class BaseRenderer(ABC):
schema_json = json.dumps(style_schema, indent=4) schema_json = json.dumps(style_schema, indent=4)
# DEBUG: Show the schema being sent # DEBUG: Show the schema being sent
print(f"🔍 AI STYLE SCHEMA FOR {format_name.upper()}:")
print("=" * 80)
print(schema_json)
print("=" * 80)
return f"""You are a professional document styling expert. Generate a complete JSON styling configuration for {format_name.upper()} documents. return f"""You are a professional document styling expert. Generate a complete JSON styling configuration for {format_name.upper()} documents.

View file

@ -42,7 +42,7 @@ class RendererDocx(BaseRenderer):
async def render(self, extracted_content: Dict[str, Any], title: str, user_prompt: str = None, ai_service=None) -> Tuple[str, str]: async def render(self, extracted_content: Dict[str, Any], title: str, user_prompt: str = None, ai_service=None) -> Tuple[str, str]:
"""Render extracted JSON content to DOCX format using AI-analyzed styling.""" """Render extracted JSON content to DOCX format using AI-analyzed styling."""
print(f"🔍 DOCX RENDER CALLED: title={title}, user_prompt={user_prompt[:50] if user_prompt else 'None'}...") self.services.utils.debugLogToFile(f"DOCX RENDER CALLED: title={title}, user_prompt={user_prompt[:50] if user_prompt else 'None'}...", "DOCX_RENDERER")
try: try:
if not DOCX_AVAILABLE: if not DOCX_AVAILABLE:
# Fallback to HTML if python-docx not available # Fallback to HTML if python-docx not available
@ -68,10 +68,8 @@ class RendererDocx(BaseRenderer):
doc = Document() doc = Document()
# Get AI-generated styling definitions # Get AI-generated styling definitions
print(f"🔍 ABOUT TO CALL AI STYLING: user_prompt={user_prompt[:50] if user_prompt else 'None'}...")
self.logger.info(f"About to call AI styling with user_prompt: {user_prompt[:100] if user_prompt else 'None'}...") self.logger.info(f"About to call AI styling with user_prompt: {user_prompt[:100] if user_prompt else 'None'}...")
styles = await self._get_docx_styles(user_prompt, ai_service) styles = await self._get_docx_styles(user_prompt, ai_service)
print(f"🔍 AI STYLING RESULT: {type(styles)}")
# Apply basic document setup # Apply basic document setup
self._setup_basic_document_styles(doc) self._setup_basic_document_styles(doc)

View file

@ -103,11 +103,11 @@ class RendererPdf(BaseRenderer):
# Process each section # Process each section
sections = json_content.get("sections", []) sections = json_content.get("sections", [])
print(f"🔍 PDF SECTIONS TO PROCESS: {len(sections)} sections") self.services.utils.debugLogToFile(f"PDF SECTIONS TO PROCESS: {len(sections)} sections", "PDF_RENDERER")
for i, section in enumerate(sections): for i, section in enumerate(sections):
print(f"🔍 PDF SECTION {i}: type={section.get('type', 'unknown')}, id={section.get('id', 'unknown')}") self.services.utils.debugLogToFile(f"PDF SECTION {i}: type={section.get('type', 'unknown')}, id={section.get('id', 'unknown')}", "PDF_RENDERER")
section_elements = self._render_json_section(section, styles) section_elements = self._render_json_section(section, styles)
print(f"🔍 PDF SECTION {i} ELEMENTS: {len(section_elements)} elements") self.services.utils.debugLogToFile(f"PDF SECTION {i} ELEMENTS: {len(section_elements)} elements", "PDF_RENDERER")
story.extend(section_elements) story.extend(section_elements)
# Build PDF # Build PDF
@ -139,40 +139,15 @@ class RendererPdf(BaseRenderer):
style_template = self._create_ai_style_template("pdf", user_prompt, style_schema) style_template = self._create_ai_style_template("pdf", user_prompt, style_schema)
# DEBUG: Show which method is being called
print(f"🔍 PDF RENDERER: Calling base template _get_ai_styles")
# Use base template method like DOCX does (this works!) # Use base template method like DOCX does (this works!)
styles = await self._get_ai_styles(ai_service, style_template, self._get_default_pdf_styles()) styles = await self._get_ai_styles(ai_service, style_template, self._get_default_pdf_styles())
# DEBUG: Check what we got from AI styling
print(f"🔍 PDF AI STYLING RESULT: {type(styles)}")
if styles is None: if styles is None:
print(f"🔍 PDF AI STYLING RETURNED NONE!")
return self._get_default_pdf_styles() return self._get_default_pdf_styles()
elif isinstance(styles, dict):
print(f"🔍 PDF AI STYLING KEYS: {list(styles.keys())}")
print(f"🔍 PDF AI STYLING CONTENT:")
for key, value in styles.items():
print(f" {key}: {value}")
# Check specific colors
print(f"🔍 PDF TITLE COLOR FROM AI: {styles.get('title', {}).get('color', 'NOT_FOUND')}")
print(f"🔍 PDF HEADING1 COLOR FROM AI: {styles.get('heading1', {}).get('color', 'NOT_FOUND')}")
print(f"🔍 PDF PARAGRAPH COLOR FROM AI: {styles.get('paragraph', {}).get('color', 'NOT_FOUND')}")
else:
print(f"🔍 PDF AI STYLING VALUE: {styles}")
# Convert colors to PDF format after getting styles # Convert colors to PDF format after getting styles
print(f"🔍 PDF BEFORE COLOR CONVERSION:")
for key, value in styles.items():
print(f" {key}: {value}")
styles = self._convert_colors_format(styles) styles = self._convert_colors_format(styles)
print(f"🔍 PDF AFTER COLOR CONVERSION:")
for key, value in styles.items():
print(f" {key}: {value}")
# Validate and fix contrast issues # Validate and fix contrast issues
return self._validate_pdf_styles_contrast(styles) return self._validate_pdf_styles_contrast(styles)
@ -255,11 +230,8 @@ class RendererPdf(BaseRenderer):
self.logger.warning(f"AI styling returned invalid JSON: {json_error}") self.logger.warning(f"AI styling returned invalid JSON: {json_error}")
# Use print instead of logger to avoid truncation # Use print instead of logger to avoid truncation
print(f"🔍 FULL AI RESPONSE THAT FAILED TO PARSE:") self.services.utils.debugLogToFile(f"FULL AI RESPONSE THAT FAILED TO PARSE: {result}", "PDF_RENDERER")
print("=" * 100) self.services.utils.debugLogToFile(f"RESPONSE LENGTH: {len(result)} characters", "PDF_RENDERER")
print(result)
print("=" * 100)
print(f"🔍 RESPONSE LENGTH: {len(result)} characters")
self.logger.warning(f"Raw content that failed to parse: {result}") self.logger.warning(f"Raw content that failed to parse: {result}")
@ -399,8 +371,8 @@ class RendererPdf(BaseRenderer):
# DEBUG: Show what color and spacing is being used for title # DEBUG: Show what color and spacing is being used for title
title_color = title_style_def.get("color", "#1F4E79") title_color = title_style_def.get("color", "#1F4E79")
title_space_after = title_style_def.get("space_after", 30) title_space_after = title_style_def.get("space_after", 30)
print(f"🔍 PDF TITLE COLOR: {title_color} -> {self._hex_to_color(title_color)}") self.services.utils.debugLogToFile(f"PDF TITLE COLOR: {title_color} -> {self._hex_to_color(title_color)}", "PDF_RENDERER")
print(f"🔍 PDF TITLE SPACE_AFTER: {title_space_after}") self.services.utils.debugLogToFile(f"PDF TITLE SPACE_AFTER: {title_space_after}", "PDF_RENDERER")
return ParagraphStyle( return ParagraphStyle(
'CustomTitle', 'CustomTitle',
@ -441,12 +413,35 @@ class RendererPdf(BaseRenderer):
def _get_alignment(self, align: str) -> int: def _get_alignment(self, align: str) -> int:
"""Convert alignment string to reportlab alignment constant.""" """Convert alignment string to reportlab alignment constant."""
if not align or not isinstance(align, str):
return TA_LEFT
align_map = { align_map = {
"center": TA_CENTER, "center": TA_CENTER,
"left": TA_LEFT, "left": TA_LEFT,
"justify": TA_JUSTIFY "justify": TA_JUSTIFY,
"right": TA_LEFT, # ReportLab doesn't have TA_RIGHT, use LEFT as fallback
"0": TA_LEFT, # Handle numeric strings
"1": TA_CENTER,
"2": TA_JUSTIFY
} }
return align_map.get(align.lower(), TA_LEFT) return align_map.get(align.lower().strip(), TA_LEFT)
def _get_table_alignment(self, align: str) -> str:
"""Convert alignment string to ReportLab table alignment string."""
if not align or not isinstance(align, str):
return 'LEFT'
align_map = {
"center": 'CENTER',
"left": 'LEFT',
"justify": 'LEFT', # Tables don't support justify, use LEFT
"right": 'RIGHT',
"0": 'LEFT', # Handle numeric strings
"1": 'CENTER',
"2": 'LEFT' # Tables don't support justify, use LEFT
}
return align_map.get(align.lower().strip(), 'LEFT')
def _hex_to_color(self, hex_color: str) -> colors.Color: def _hex_to_color(self, hex_color: str) -> colors.Color:
"""Convert hex color to reportlab color.""" """Convert hex color to reportlab color."""
@ -518,7 +513,7 @@ class RendererPdf(BaseRenderer):
table_style = [ table_style = [
('BACKGROUND', (0, 0), (-1, 0), self._hex_to_color(table_header_style.get("background", "#4F4F4F"))), ('BACKGROUND', (0, 0), (-1, 0), self._hex_to_color(table_header_style.get("background", "#4F4F4F"))),
('TEXTCOLOR', (0, 0), (-1, 0), self._hex_to_color(table_header_style.get("text_color", "#FFFFFF"))), ('TEXTCOLOR', (0, 0), (-1, 0), self._hex_to_color(table_header_style.get("text_color", "#FFFFFF"))),
('ALIGN', (0, 0), (-1, -1), self._get_alignment(table_cell_style.get("align", "left"))), ('ALIGN', (0, 0), (-1, -1), self._get_table_alignment(table_cell_style.get("align", "left"))),
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold' if table_header_style.get("bold", True) else 'Helvetica'), ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold' if table_header_style.get("bold", True) else 'Helvetica'),
('FONTSIZE', (0, 0), (-1, 0), table_header_style.get("font_size", 12)), ('FONTSIZE', (0, 0), (-1, 0), table_header_style.get("font_size", 12)),
('BOTTOMPADDING', (0, 0), (-1, 0), 12), ('BOTTOMPADDING', (0, 0), (-1, 0), 12),

View file

@ -202,8 +202,8 @@ class RendererXlsx(BaseRenderer):
"""Generate Excel content from structured JSON document using AI-generated styling.""" """Generate Excel content from structured JSON document using AI-generated styling."""
try: try:
# Debug output # Debug output
print(f"🔍 EXCEL JSON CONTENT TYPE: {type(json_content)}") self.services.utils.debugLogToFile(f"EXCEL JSON CONTENT TYPE: {type(json_content)}", "EXCEL_RENDERER")
print(f"🔍 EXCEL JSON CONTENT KEYS: {list(json_content.keys()) if isinstance(json_content, dict) else 'Not a dict'}") self.services.utils.debugLogToFile(f"EXCEL JSON CONTENT KEYS: {list(json_content.keys()) if isinstance(json_content, dict) else 'Not a dict'}", "EXCEL_RENDERER")
# Get AI-generated styling definitions # Get AI-generated styling definitions
styles = await self._get_excel_styles(user_prompt, ai_service) styles = await self._get_excel_styles(user_prompt, ai_service)
@ -223,7 +223,7 @@ class RendererXlsx(BaseRenderer):
# Create sheets based on content # Create sheets based on content
sheets = self._create_excel_sheets(wb, json_content, styles) sheets = self._create_excel_sheets(wb, json_content, styles)
print(f"🔍 EXCEL SHEETS CREATED: {list(sheets.keys()) if sheets else 'None'}") self.services.utils.debugLogToFile(f"EXCEL SHEETS CREATED: {list(sheets.keys()) if sheets else 'None'}", "EXCEL_RENDERER")
# Populate sheets with content # Populate sheets with content
self._populate_excel_sheets(sheets, json_content, styles) self._populate_excel_sheets(sheets, json_content, styles)
@ -235,12 +235,12 @@ class RendererXlsx(BaseRenderer):
# Convert to base64 # Convert to base64
excel_bytes = buffer.getvalue() excel_bytes = buffer.getvalue()
print(f"🔍 EXCEL BYTES LENGTH: {len(excel_bytes)}") self.services.utils.debugLogToFile(f"EXCEL BYTES LENGTH: {len(excel_bytes)}", "EXCEL_RENDERER")
try: try:
excel_base64 = base64.b64encode(excel_bytes).decode('utf-8') excel_base64 = base64.b64encode(excel_bytes).decode('utf-8')
print(f"🔍 EXCEL BASE64 LENGTH: {len(excel_base64)}") self.services.utils.debugLogToFile(f"EXCEL BASE64 LENGTH: {len(excel_base64)}", "EXCEL_RENDERER")
except Exception as b64_error: except Exception as b64_error:
print(f"🔍 BASE64 ENCODING ERROR: {b64_error}") self.services.utils.debugLogToFile(f"BASE64 ENCODING ERROR: {b64_error}", "EXCEL_RENDERER")
raise raise
return excel_base64 return excel_base64
@ -285,10 +285,6 @@ class RendererXlsx(BaseRenderer):
import json import json
import re import re
# Debug output
print(f"🔍 AI STYLING RESPONSE TYPE: {type(response)}")
print(f"🔍 AI STYLING RESPONSE LENGTH: {len(response.content) if response and hasattr(response, 'content') and response.content else 0}")
# Clean and parse JSON # Clean and parse JSON
result = response.content.strip() if response and response.content else "" result = response.content.strip() if response and response.content else ""
@ -301,23 +297,20 @@ class RendererXlsx(BaseRenderer):
json_match = re.search(r'```json\s*\n(.*?)\n```', result, re.DOTALL) json_match = re.search(r'```json\s*\n(.*?)\n```', result, re.DOTALL)
if json_match: if json_match:
result = json_match.group(1).strip() result = json_match.group(1).strip()
print(f"🔍 EXTRACTED JSON FROM MARKDOWN: {result[:100]}...") self.services.utils.debugLogToFile(f"EXTRACTED JSON FROM MARKDOWN: {result[:100]}...", "EXCEL_RENDERER")
elif result.startswith('```json'): elif result.startswith('```json'):
result = re.sub(r'^```json\s*', '', result) result = re.sub(r'^```json\s*', '', result)
result = re.sub(r'\s*```$', '', result) result = re.sub(r'\s*```$', '', result)
print(f"🔍 CLEANED JSON FROM MARKDOWN: {result[:100]}...") self.services.utils.debugLogToFile(f"CLEANED JSON FROM MARKDOWN: {result[:100]}...", "EXCEL_RENDERER")
elif result.startswith('```'): elif result.startswith('```'):
result = re.sub(r'^```\s*', '', result) result = re.sub(r'^```\s*', '', result)
result = re.sub(r'\s*```$', '', result) result = re.sub(r'\s*```$', '', result)
print(f"🔍 CLEANED JSON FROM GENERIC MARKDOWN: {result[:100]}...") self.services.utils.debugLogToFile(f"CLEANED JSON FROM GENERIC MARKDOWN: {result[:100]}...", "EXCEL_RENDERER")
# Try to parse JSON # Try to parse JSON
try: try:
styles = json.loads(result) styles = json.loads(result)
print(f"🔍 AI STYLING PARSED KEYS: {list(styles.keys()) if isinstance(styles, dict) else 'Not a dict'}")
except json.JSONDecodeError as json_error: except json.JSONDecodeError as json_error:
print(f"🔍 AI STYLING JSON ERROR: {json_error}")
print(f"🔍 AI STYLING RAW RESULT: {result[:200]}...")
self.logger.warning(f"AI styling returned invalid JSON: {json_error}, using defaults") self.logger.warning(f"AI styling returned invalid JSON: {json_error}, using defaults")
return default_styles return default_styles
@ -352,23 +345,19 @@ class RendererXlsx(BaseRenderer):
def _convert_colors_format(self, styles: Dict[str, Any]) -> Dict[str, Any]: def _convert_colors_format(self, styles: Dict[str, Any]) -> Dict[str, Any]:
"""Convert hex colors to aRGB format for Excel compatibility.""" """Convert hex colors to aRGB format for Excel compatibility."""
try: try:
print(f"🔍 CONVERTING COLORS IN STYLES: {styles}") self.services.utils.debugLogToFile(f"CONVERTING COLORS IN STYLES: {styles}", "EXCEL_RENDERER")
for style_name, style_config in styles.items(): for style_name, style_config in styles.items():
if isinstance(style_config, dict): if isinstance(style_config, dict):
for prop, value in style_config.items(): for prop, value in style_config.items():
if isinstance(value, str) and value.startswith('#') and len(value) == 7: if isinstance(value, str) and value.startswith('#') and len(value) == 7:
# Convert #RRGGBB to #AARRGGBB (add FF alpha channel) # Convert #RRGGBB to #AARRGGBB (add FF alpha channel)
old_value = value
styles[style_name][prop] = f"FF{value[1:]}" styles[style_name][prop] = f"FF{value[1:]}"
print(f"🔍 CONVERTED COLOR: {old_value}{styles[style_name][prop]}")
elif isinstance(value, str) and value.startswith('#') and len(value) == 9: elif isinstance(value, str) and value.startswith('#') and len(value) == 9:
print(f"🔍 COLOR ALREADY aRGB: {value}") pass # Already aRGB format
elif isinstance(value, str) and value.startswith('#'): elif isinstance(value, str) and value.startswith('#'):
print(f"🔍 UNEXPECTED COLOR FORMAT: {value} (length: {len(value)})") pass # Unexpected format, keep as is
print(f"🔍 FINAL CONVERTED STYLES: {styles}")
return styles return styles
except Exception as e: except Exception as e:
print(f"🔍 COLOR CONVERSION ERROR: {e}")
return styles return styles
def _validate_excel_styles_contrast(self, styles: Dict[str, Any]) -> Dict[str, Any]: def _validate_excel_styles_contrast(self, styles: Dict[str, Any]) -> Dict[str, Any]:
@ -426,7 +415,7 @@ class RendererXlsx(BaseRenderer):
# Get sheet names from AI styles or generate based on content # Get sheet names from AI styles or generate based on content
sheet_names = styles.get("sheet_names", self._generate_sheet_names_from_content(json_content)) sheet_names = styles.get("sheet_names", self._generate_sheet_names_from_content(json_content))
print(f"🔍 EXCEL SHEET NAMES: {sheet_names}") self.services.utils.debugLogToFile(f"EXCEL SHEET NAMES: {sheet_names}", "EXCEL_RENDERER")
# Create sheets # Create sheets
for i, sheet_name in enumerate(sheet_names): for i, sheet_name in enumerate(sheet_names):
@ -562,15 +551,11 @@ class RendererXlsx(BaseRenderer):
# Safety check for title style # Safety check for title style
title_style = styles.get("title", {"font_size": 16, "bold": True, "color": "#FF1F4E79", "align": "center"}) title_style = styles.get("title", {"font_size": 16, "bold": True, "color": "#FF1F4E79", "align": "center"})
print(f"🔍 EXCEL TITLE STYLE: {title_style}")
print(f"🔍 EXCEL TITLE COLOR: {title_style['color']} (type: {type(title_style['color'])}, length: {len(title_style['color']) if isinstance(title_style['color'], str) else 'not string'})")
try: try:
safe_color = self._get_safe_color(title_style["color"]) safe_color = self._get_safe_color(title_style["color"])
sheet['A1'].font = Font(size=title_style["font_size"], bold=title_style["bold"], color=safe_color) sheet['A1'].font = Font(size=title_style["font_size"], bold=title_style["bold"], color=safe_color)
sheet['A1'].alignment = Alignment(horizontal=title_style["align"]) sheet['A1'].alignment = Alignment(horizontal=title_style["align"])
print(f"🔍 EXCEL TITLE FONT CREATED SUCCESSFULLY with color: {safe_color}")
except Exception as font_error: except Exception as font_error:
print(f"🔍 EXCEL TITLE FONT ERROR: {font_error}")
# Try with a safe color # Try with a safe color
sheet['A1'].font = Font(size=title_style["font_size"], bold=title_style["bold"], color="FF000000") sheet['A1'].font = Font(size=title_style["font_size"], bold=title_style["bold"], color="FF000000")
sheet['A1'].alignment = Alignment(horizontal=title_style["align"]) sheet['A1'].alignment = Alignment(horizontal=title_style["align"])

View file

@ -21,7 +21,8 @@ async def buildExtractionPrompt(
renderer: _RendererLike, renderer: _RendererLike,
userPrompt: str, userPrompt: str,
title: str, title: str,
aiService=None aiService=None,
services=None
) -> str: ) -> str:
""" """
Build the final extraction prompt by combining: Build the final extraction prompt by combining:
@ -35,7 +36,7 @@ async def buildExtractionPrompt(
""" """
# Parse user prompt to separate extraction intent from generation format using AI # Parse user prompt to separate extraction intent from generation format using AI
extractionIntent = await _parseExtractionIntent(userPrompt, outputFormat, aiService) extractionIntent = await _parseExtractionIntent(userPrompt, outputFormat, aiService, services)
# Import JSON schema for structured output # Import JSON schema for structured output
from .subJsonSchema import get_document_subJsonSchema from .subJsonSchema import get_document_subJsonSchema
@ -95,6 +96,14 @@ Content Types to Extract:
3. Headings: Extract with appropriate levels 3. Headings: Extract with appropriate levels
4. Paragraphs: Extract as structured text 4. Paragraphs: Extract as structured text
5. Code: Extract code blocks with language identification 5. Code: Extract code blocks with language identification
6. Images: Analyze images and describe all visible content including text, tables, logos, graphics, layout, and visual elements
Image Analysis Requirements:
- If you cannot analyze an image for any reason, explain why in the JSON response
- Describe everything you see in the image
- Include all text content, tables, logos, graphics, layout, and visual elements
- If the image is too small, corrupted, or unclear, explain this
- Always provide feedback - never return empty responses
Return only the JSON structure with actual data from the documents. Do not include any text before or after the JSON. Return only the JSON structure with actual data from the documents. Do not include any text before or after the JSON.
""".strip() """.strip()
@ -103,7 +112,7 @@ Return only the JSON structure with actual data from the documents. Do not inclu
finalPrompt = genericIntro finalPrompt = genericIntro
# Debug output # Debug output
print(f"🔍 EXTRACTION INTENT: {extractionIntent}") services.utils.debugLogToFile(f"EXTRACTION INTENT: Processed", "PROMPT_BUILDER")
# Save full extraction prompt to debug file # Save full extraction prompt to debug file
try: try:
@ -125,7 +134,8 @@ async def buildGenerationPrompt(
outputFormat: str, outputFormat: str,
userPrompt: str, userPrompt: str,
title: str, title: str,
aiService=None aiService=None,
services=None
) -> str: ) -> str:
""" """
Use AI to build the generation prompt based on user intent and format requirements. Use AI to build the generation prompt based on user intent and format requirements.
@ -140,7 +150,7 @@ async def buildGenerationPrompt(
safeUserPrompt = userPrompt.replace('"', '\\"').replace("'", "\\'").replace('\n', ' ').replace('\r', ' ') safeUserPrompt = userPrompt.replace('"', '\\"').replace("'", "\\'").replace('\n', ' ').replace('\r', ' ')
# Debug output # Debug output
print(f"🔍 GENERATION PROMPT REQUEST: buildGenerationPrompt called with outputFormat='{outputFormat}', title='{title}'") services.utils.debugLogToFile(f"GENERATION PROMPT REQUEST: buildGenerationPrompt called with outputFormat='{outputFormat}', title='{title}'", "PROMPT_BUILDER")
# AI call to generate the appropriate generation prompt # AI call to generate the appropriate generation prompt
generationPromptRequest = f""" generationPromptRequest = f"""
@ -165,7 +175,7 @@ Return only the generation prompt, starting with "Generate a {outputFormat} docu
""" """
# Call AI service to generate the prompt # Call AI service to generate the prompt
print(f"🔍 GENERATION PROMPT REQUEST: Calling AI for generation prompt...") services.utils.debugLogToFile("GENERATION PROMPT REQUEST: Calling AI for generation prompt...", "PROMPT_BUILDER")
# Import and set proper options for AI call # Import and set proper options for AI call
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationType from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationType
@ -175,7 +185,6 @@ Return only the generation prompt, starting with "Generate a {outputFormat} docu
request = AiCallRequest(prompt=generationPromptRequest, context="", options=request_options) request = AiCallRequest(prompt=generationPromptRequest, context="", options=request_options)
response = await aiService.aiObjects.call(request) response = await aiService.aiObjects.call(request)
result = response.content if response else "" result = response.content if response else ""
print(f"🔍 GENERATION PROMPT AI RESPONSE: '{result}'")
# Replace the placeholder that the AI created with actual format rules # Replace the placeholder that the AI created with actual format rules
if result: if result:
@ -183,7 +192,7 @@ Return only the generation prompt, starting with "Generate a {outputFormat} docu
result = result.replace("PLACEHOLDER_FOR_FORMAT_RULES", formatRules) result = result.replace("PLACEHOLDER_FOR_FORMAT_RULES", formatRules)
# Debug output # Debug output
print(f"🔍 GENERATION PROMPT FINAL: {result if result else 'None'}") services.utils.debugLogToFile(f"GENERATION PROMPT: Generated successfully", "PROMPT_BUILDER")
# Save full generation prompt and AI response to debug file # Save full generation prompt and AI response to debug file
try: try:
@ -203,7 +212,7 @@ Return only the generation prompt, starting with "Generate a {outputFormat} docu
except Exception as e: except Exception as e:
# Fallback on any error - preserve user prompt for language instructions # Fallback on any error - preserve user prompt for language instructions
print(f"🔍 DEBUG: AI generation prompt failed: {str(e)}") services.utils.debugLogToFile(f"DEBUG: AI generation prompt failed: {str(e)}", "PROMPT_BUILDER")
return f"Generate a comprehensive {outputFormat} document titled '{title}' based on the extracted content. User requirements: {userPrompt}" return f"Generate a comprehensive {outputFormat} document titled '{title}' based on the extracted content. User requirements: {userPrompt}"
@ -222,7 +231,7 @@ def _getFormatRules(outputFormat: str) -> str:
""".strip() """.strip()
async def _parseExtractionIntent(userPrompt: str, outputFormat: str, aiService=None) -> str: async def _parseExtractionIntent(userPrompt: str, outputFormat: str, aiService=None, services=None) -> str:
""" """
Use AI to extract the core content intention from the user prompt. Use AI to extract the core content intention from the user prompt.
Focus on WHAT the user wants to extract, not HOW to format it. Focus on WHAT the user wants to extract, not HOW to format it.
@ -250,7 +259,7 @@ Do not include formatting instructions, file types, or output methods.
""" """
# Call AI service to extract intention # Call AI service to extract intention
print(f"🔍 DEBUG: Calling AI for extraction intent...") services.utils.debugLogToFile("DEBUG: Calling AI for extraction intent...", "PROMPT_BUILDER")
# Import and set proper options for AI call # Import and set proper options for AI call
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationType from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationType
@ -260,13 +269,13 @@ Do not include formatting instructions, file types, or output methods.
request = AiCallRequest(prompt=extractionPrompt, context="", options=request_options) request = AiCallRequest(prompt=extractionPrompt, context="", options=request_options)
response = await aiService.aiObjects.call(request) response = await aiService.aiObjects.call(request)
result = response.content if response else "" result = response.content if response else ""
print(f"🔍 DEBUG: AI extraction intent result: '{result}'") services.utils.debugLogToFile(f"DEBUG: Extraction intent processed", "PROMPT_BUILDER")
return result if result else f"Extract all relevant content from the document according to the user's requirements: {userPrompt}" return result if result else f"Extract all relevant content from the document according to the user's requirements: {userPrompt}"
except Exception as e: except Exception as e:
# Fallback on any error - preserve user prompt for language instructions # Fallback on any error - preserve user prompt for language instructions
print(f"🔍 DEBUG: AI extraction intent failed: {str(e)}") services.utils.debugLogToFile(f"DEBUG: AI extraction intent failed: {str(e)}", "PROMPT_BUILDER")
return f"Extract all relevant content from the document according to the user's requirements: {userPrompt}" return f"Extract all relevant content from the document according to the user's requirements: {userPrompt}"

View file

@ -32,7 +32,7 @@ class NeutralizationService:
serviceCenter: Service center instance for accessing other services serviceCenter: Service center instance for accessing other services
NamesToParse: List of names to parse and replace (case-insensitive) NamesToParse: List of names to parse and replace (case-insensitive)
""" """
self.serviceCenter = serviceCenter self.services = serviceCenter
self.interfaceDbApp = serviceCenter.interfaceDbApp self.interfaceDbApp = serviceCenter.interfaceDbApp
# Initialize anonymization processors # Initialize anonymization processors

View file

@ -21,7 +21,7 @@ class SharepointService:
Use setAccessTokenFromConnection() method to configure the access token before making API calls. Use setAccessTokenFromConnection() method to configure the access token before making API calls.
""" """
self.serviceCenter = serviceCenter self.services = serviceCenter
self.access_token = None self.access_token = None
self.base_url = "https://graph.microsoft.com/v1.0" self.base_url = "https://graph.microsoft.com/v1.0"

View file

@ -16,7 +16,7 @@ class TicketService:
Args: Args:
serviceCenter: Service center instance for accessing other services serviceCenter: Service center instance for accessing other services
""" """
self.serviceCenter = serviceCenter self.services = serviceCenter
async def _createTicketInterfaceByType( async def _createTicketInterfaceByType(
self, self,

View file

@ -4,6 +4,7 @@ Provides centralized access to configuration, events, and other utilities.
""" """
import logging import logging
import os
from typing import Any, Optional, Dict, Callable from typing import Any, Optional, Dict, Callable
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.shared.eventManagement import eventManager from modules.shared.eventManagement import eventManager
@ -139,4 +140,43 @@ class UtilsService:
return TokenManager().getFreshToken(connectionId) return TokenManager().getFreshToken(connectionId)
except Exception as e: except Exception as e:
logger.error(f"Error getting fresh token for connection {connectionId}: {str(e)}") logger.error(f"Error getting fresh token for connection {connectionId}: {str(e)}")
return None return None
def debugLogToFile(self, message: str, context: str = "DEBUG"):
"""
Log debug message to file if debug logging is enabled.
Args:
message: Debug message to log
context: Context identifier for the debug message
"""
try:
# Check if debug logging is enabled
debug_enabled = self.configGet("APP_DEBUG_CHAT_WORKFLOW_ENABLED", False)
if not debug_enabled:
return
# Get debug directory
debug_dir = self.configGet("APP_DEBUG_CHAT_WORKFLOW_DIR", "./test-chat")
if not os.path.isabs(debug_dir):
# If relative path, make it relative to the gateway directory
gateway_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
debug_dir = os.path.join(gateway_dir, debug_dir)
# Ensure debug directory exists
os.makedirs(debug_dir, exist_ok=True)
# Create debug file path
debug_file = os.path.join(debug_dir, "debug_workflow.log")
# Format the debug entry
timestamp = self.getUtcTimestamp()
debug_entry = f"[{timestamp}] [{context}] {message}\n"
# Write to debug file
with open(debug_file, "a", encoding="utf-8") as f:
f.write(debug_entry)
except Exception as e:
# Don't log debug errors to avoid recursion
pass

View file

@ -16,7 +16,7 @@ class WorkflowService:
"""Service class containing methods for document processing, chat operations, and workflow management""" """Service class containing methods for document processing, chat operations, and workflow management"""
def __init__(self, serviceCenter): def __init__(self, serviceCenter):
self.serviceCenter = serviceCenter self.services = serviceCenter
self.user = serviceCenter.user self.user = serviceCenter.user
self.workflow = serviceCenter.workflow self.workflow = serviceCenter.workflow
self.interfaceDbChat = serviceCenter.interfaceDbChat self.interfaceDbChat = serviceCenter.interfaceDbChat
@ -79,7 +79,7 @@ class WorkflowService:
"""Get ChatDocuments from a list of document references using all three formats.""" """Get ChatDocuments from a list of document references using all three formats."""
try: try:
# Get the current workflow from services (same pattern as setWorkflowContext) # Get the current workflow from services (same pattern as setWorkflowContext)
workflow = getattr(self.serviceCenter, 'currentWorkflow', None) or self.workflow workflow = getattr(self.services, 'currentWorkflow', None) or self.workflow
if not workflow: if not workflow:
logger.error("No workflow available for document list resolution") logger.error("No workflow available for document list resolution")
return [] return []
@ -241,7 +241,8 @@ class WorkflowService:
token_status = f"error: {str(e)}" token_status = f"error: {str(e)}"
# Build enhanced reference with state information # Build enhanced reference with state information
base_ref = f"connection:{connection.authority.value}:{connection.externalUsername}:{connection.id}" # Format: connection:msft:<username> (without UUID)
base_ref = f"connection:{connection.authority.value}:{connection.externalUsername}"
state_info = f" [status:{connection.status.value}, token:{token_status}]" state_info = f" [status:{connection.status.value}, token:{token_status}]"
logger.debug(f"getConnectionReferenceFromUserConnection: Built reference: {base_ref + state_info}") logger.debug(f"getConnectionReferenceFromUserConnection: Built reference: {base_ref + state_info}")
@ -264,26 +265,25 @@ class WorkflowService:
return None return None
def getUserConnectionFromConnectionReference(self, connectionReference: str) -> Optional[UserConnection]: def getUserConnectionFromConnectionReference(self, connectionReference: str) -> Optional[UserConnection]:
"""Get UserConnection from reference string (handles both old and enhanced formats)""" """Get UserConnection from reference string (handles new format without UUID)"""
try: try:
# Parse reference format: connection:{authority}:{username}:{id} [status:..., token:...] # Parse reference format: connection:{authority}:{username} [status:..., token:...]
# Remove state information if present # Remove state information if present
base_reference = connectionReference.split(' [')[0] base_reference = connectionReference.split(' [')[0]
parts = base_reference.split(':') parts = base_reference.split(':')
if len(parts) != 4 or parts[0] != "connection": if len(parts) != 3 or parts[0] != "connection":
return None return None
authority = parts[1] authority = parts[1]
username = parts[2] username = parts[2]
conn_id = parts[3]
# Get user connections through AppObjects interface # Get user connections through AppObjects interface
user_connections = self.interfaceDbApp.getUserConnections(self.user.id) user_connections = self.interfaceDbApp.getUserConnections(self.user.id)
# Find matching connection # Find matching connection by authority and username (no UUID needed)
for conn in user_connections: for conn in user_connections:
if str(conn.id) == conn_id and conn.authority.value == authority and conn.externalUsername == username: if conn.authority.value == authority and conn.externalUsername == username:
return conn return conn
return None return None
@ -419,7 +419,7 @@ class WorkflowService:
"""Set current workflow context for document generation and routing""" """Set current workflow context for document generation and routing"""
try: try:
# Get the current workflow from services # Get the current workflow from services
workflow = getattr(self.serviceCenter, 'currentWorkflow', None) or self.workflow workflow = getattr(self.services, 'currentWorkflow', None) or self.workflow
if not workflow: if not workflow:
logger.error("No workflow available for context setting") logger.error("No workflow available for context setting")
return return
@ -530,7 +530,7 @@ class WorkflowService:
"""Get document count for task planning (matching old handlingTasks.py logic)""" """Get document count for task planning (matching old handlingTasks.py logic)"""
try: try:
# Get the current workflow from services # Get the current workflow from services
workflow = getattr(self.serviceCenter, 'currentWorkflow', None) or self.workflow workflow = getattr(self.services, 'currentWorkflow', None) or self.workflow
if not workflow: if not workflow:
return "No documents available" return "No documents available"
@ -552,7 +552,7 @@ class WorkflowService:
"""Get workflow history context for task planning (matching old handlingTasks.py logic)""" """Get workflow history context for task planning (matching old handlingTasks.py logic)"""
try: try:
# Get the current workflow from services # Get the current workflow from services
workflow = getattr(self.serviceCenter, 'currentWorkflow', None) or self.workflow workflow = getattr(self.services, 'currentWorkflow', None) or self.workflow
if not workflow: if not workflow:
return "No previous round context available" return "No previous round context available"
@ -832,14 +832,14 @@ class WorkflowService:
"""Get connection reference list (matching old handlingTasks.py logic)""" """Get connection reference list (matching old handlingTasks.py logic)"""
try: try:
# Get connections from the database using the same logic as the old system # Get connections from the database using the same logic as the old system
if hasattr(self.serviceCenter, 'interfaceDbApp') and hasattr(self.serviceCenter, 'user'): if hasattr(self.services, 'interfaceDbApp') and hasattr(self.services, 'user'):
userId = self.serviceCenter.user.id userId = self.services.user.id
connections = self.serviceCenter.interfaceDbApp.getUserConnections(userId) connections = self.services.interfaceDbApp.getUserConnections(userId)
if connections: if connections:
# Format connections as reference strings using the same pattern as the old system # Format connections as reference strings using the same pattern as the old system
connectionRefs = [] connectionRefs = []
for conn in connections: for conn in connections:
# Create reference string in format: connection:{authority}:{username}:{id} [status:..., token:...] # Create reference string in format: connection:{authority}:{username} [status:..., token:...]
# This matches the format expected by getUserConnectionFromConnectionReference() # This matches the format expected by getUserConnectionFromConnectionReference()
ref = self.getConnectionReferenceFromUserConnection(conn) ref = self.getConnectionReferenceFromUserConnection(conn)
connectionRefs.append(ref) connectionRefs.append(ref)

View file

@ -64,7 +64,7 @@ REPLY: Return ONLY a JSON object with the following structure (no comments, no e
EXAMPLE how to assign references from AVAILABLE_DOCUMENTS_INDEX and AVAILABLE_CONNECTIONS_INDEX: EXAMPLE how to assign references from AVAILABLE_DOCUMENTS_INDEX and AVAILABLE_CONNECTIONS_INDEX:
"requiredInputDocuments": ["docList:msg_47a7a578-e8f2-4ba8-ac66-0dbff40605e0:round8_task1_action1_results","docItem:5d8b7aee-b546-4487-b6a8-835c86f7b186:AI_Generated_Document_20251006-104256.docx"], "requiredInputDocuments": ["docList:msg_47a7a578-e8f2-4ba8-ac66-0dbff40605e0:round8_task1_action1_results","docItem:5d8b7aee-b546-4487-b6a8-835c86f7b186:AI_Generated_Document_20251006-104256.docx"],
"requiredConnection": "connection:msft:p.motsch@valueon.ch:1ae8b8e5-128b-49b8-b1cb-7c632669eeae", "requiredConnection": "connection:msft:p.motsch@valueon.ch",
RULES: RULES:
1. Use EXACT action names from AVAILABLE_METHODS 1. Use EXACT action names from AVAILABLE_METHODS

View file

@ -41,6 +41,7 @@ markdown
## Web Scraping & HTTP ## Web Scraping & HTTP
beautifulsoup4==4.12.2 # Required for HTML/XML parsing beautifulsoup4==4.12.2 # Required for HTML/XML parsing
requests==2.31.0 requests==2.31.0
requests-oauthlib==1.3.1 # Required for Google OAuth2Session
chardet>=5.0.0 # Für Zeichensatzerkennung bei Webinhalten chardet>=5.0.0 # Für Zeichensatzerkennung bei Webinhalten
aiohttp>=3.8.0 # Required for SharePoint operations (async HTTP) aiohttp>=3.8.0 # Required for SharePoint operations (async HTTP)
selenium>=4.15.0 # Required for web automation and JavaScript-heavy pages selenium>=4.15.0 # Required for web automation and JavaScript-heavy pages

View file

@ -20,9 +20,15 @@ from modules.services.serviceAi.mainServiceAi import AiService
from modules.services.serviceGeneration.mainServiceGeneration import GenerationService from modules.services.serviceGeneration.mainServiceGeneration import GenerationService
# Set up logging # Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Set all module loggers to DEBUG level
logging.getLogger('modules.services.serviceAi.mainServiceAi').setLevel(logging.DEBUG)
logging.getLogger('modules.services.serviceGeneration.mainServiceGeneration').setLevel(logging.DEBUG)
logging.getLogger('modules.services.serviceGeneration.subPromptBuilder').setLevel(logging.DEBUG)
logging.getLogger('modules.services.serviceExtraction.mainServiceExtraction').setLevel(logging.DEBUG)
async def process_documents_and_generate_summary(): async def process_documents_and_generate_summary():
"""Process documents using the main AI service with intelligent chunk integration.""" """Process documents using the main AI service with intelligent chunk integration."""
@ -86,9 +92,50 @@ async def process_documents_and_generate_summary():
db_interface_module.getInterface = lambda: TestDbInterface(file_data_map) db_interface_module.getInterface = lambda: TestDbInterface(file_data_map)
logger.info("🔧 Database interface mocked successfully") logger.info("🔧 Database interface mocked successfully")
# Create a mock service center with utils
class MockServiceCenter:
def __init__(self):
self.utils = MockUtils()
class MockUtils:
def debugLogToFile(self, message, label):
logger.debug(f"[{label}] {message}")
print(f"DEBUG [{label}]: {message}") # Also print to console for visibility
# Only write to debug file if debug logging is enabled (matching real implementation)
debug_enabled = self.configGet("APP_DEBUG_CHAT_WORKFLOW_ENABLED", False)
if debug_enabled:
try:
import os
from datetime import datetime, UTC
debug_dir = self.configGet("APP_DEBUG_CHAT_WORKFLOW_DIR", "./test-chat")
if not os.path.isabs(debug_dir):
# If relative path, make it relative to the gateway directory
gateway_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
debug_dir = os.path.join(gateway_dir, debug_dir)
os.makedirs(debug_dir, exist_ok=True)
debug_file = os.path.join(debug_dir, "debug_workflow.log")
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
debug_entry = f"[{timestamp}] [{label}] {message}\n"
with open(debug_file, "a", encoding="utf-8") as f:
f.write(debug_entry)
except Exception:
pass # Don't fail on debug logging errors
def configGet(self, key, default):
# Return debug settings
if key == "APP_DEBUG_CHAT_WORKFLOW_ENABLED":
return True
elif key == "APP_DEBUG_CHAT_WORKFLOW_DIR":
return "./test-chat"
return default
mock_service_center = MockServiceCenter()
# Initialize the main AI service - let it handle everything # Initialize the main AI service - let it handle everything
logger.info("🔧 Initializing main AI service...") logger.info("🔧 Initializing main AI service...")
ai_service = await AiService.create() ai_service = await AiService.create(mock_service_center)
# Create test documents - the AI service will handle file access internally # Create test documents - the AI service will handle file access internally
documents = [] documents = []
@ -152,9 +199,9 @@ async def process_documents_and_generate_summary():
# Run a single end-to-end test to avoid the loop issue # Run a single end-to-end test to avoid the loop issue
logger.info("🧪 Running single end-to-end test...") logger.info("🧪 Running single end-to-end test...")
# userPrompt = "Analyze these documents and create a comprehensive DOCX summary document including: 1) Document types and purposes, 2) Key information and main points, 3) Important details and numbers, 4) Notable sections, 5) Overall assessment and recommendations." userPrompt = "Analyze these documents and create a comprehensive summary for all input documents, each input document in a separate chapter summarized in 10-20 sentences."
userPrompt = "Analyze these documents and create a fitting image for the content" # userPrompt = "Analyze these documents and create a fitting image for the content"
# userPrompt = "Extract the table from file and produce 2 lists in excel. one list with all entries, one list only with entries that are yellow highlighted." # userPrompt = "Extract the table from file and produce 2 lists in excel. one list with all entries, one list only with entries that are yellow highlighted."
@ -168,7 +215,7 @@ async def process_documents_and_generate_summary():
prompt=userPrompt, prompt=userPrompt,
documents=documents, documents=documents,
options=ai_options, options=ai_options,
outputFormat="txt", outputFormat="docx",
title="Formulaire" title="Formulaire"
) )
@ -299,16 +346,30 @@ async def process_documents_and_generate_summary():
logger.info(f"✅ Document saved as text: {output_path} ({len(doc_data)} characters)") logger.info(f"✅ Document saved as text: {output_path} ({len(doc_data)} characters)")
elif file_ext in ['.png', '.jpg', '.jpeg']: elif file_ext in ['.png', '.jpg', '.jpeg']:
# Image formats - decode from base64 # Image formats - decode from base64
doc_bytes = base64.b64decode(doc_data) try:
with open(output_path, 'wb') as f: doc_bytes = base64.b64decode(doc_data)
f.write(doc_bytes) with open(output_path, 'wb') as f:
logger.info(f"✅ Image saved: {output_path} ({len(doc_bytes)} bytes)") f.write(doc_bytes)
logger.info(f"✅ Image saved: {output_path} ({len(doc_bytes)} bytes)")
except Exception as e:
logger.warning(f"⚠️ Failed to decode image as base64: {e}")
# Save as text if base64 decoding fails
with open(output_path, 'w', encoding='utf-8') as f:
f.write(doc_data)
logger.info(f"✅ Image saved as text (fallback): {output_path}")
else: else:
# Other binary formats - decode from base64 # Other binary formats - decode from base64
doc_bytes = base64.b64decode(doc_data) try:
with open(output_path, 'wb') as f: doc_bytes = base64.b64decode(doc_data)
f.write(doc_bytes) with open(output_path, 'wb') as f:
logger.info(f"✅ Document saved as binary: {output_path} ({len(doc_bytes)} bytes)") f.write(doc_bytes)
logger.info(f"✅ Document saved as binary: {output_path} ({len(doc_bytes)} bytes)")
except Exception as e:
logger.warning(f"⚠️ Failed to decode document as base64: {e}")
# Save as text if base64 decoding fails
with open(output_path, 'w', encoding='utf-8') as f:
f.write(doc_data)
logger.info(f"✅ Document saved as text (fallback): {output_path}")
# Also save raw content as text # Also save raw content as text
content = response.get('content', '') content = response.get('content', '')
@ -420,6 +481,23 @@ async def process_documents_and_generate_summary():
logger.info(f"✅ Comprehensive test report saved: {report_path}") logger.info(f"✅ Comprehensive test report saved: {report_path}")
# Show debug file locations
debug_files = []
try:
debug_dir = Path("test-chat")
if debug_dir.exists():
debug_files.extend(list(debug_dir.glob("*.log")))
debug_files.extend(list(debug_dir.glob("ai/*.txt")))
if debug_files:
logger.info("📁 Debug files created:")
for debug_file in debug_files:
logger.info(f" - {debug_file}")
else:
logger.info("📁 No debug files found in test-chat directory")
except Exception as e:
logger.warning(f"Could not list debug files: {e}")
# Restore original database interface # Restore original database interface
db_interface_module.getInterface = original_get_interface db_interface_module.getInterface = original_get_interface

View file

@ -0,0 +1,422 @@
#!/usr/bin/env python3
"""
Tool for encrypting all *_SECRET variables in all environment files.
This tool automatically processes all three environment files (dev, int, prod)
and encrypts any unencrypted *_SECRET variables using the appropriate encryption
keys for each environment.
Usage:
# Encrypt all secrets in all environment files
python tool_security_encrypt_all_env_files.py
# Dry run - show what would be changed without making changes
python tool_security_encrypt_all_env_files.py --dry-run
# Skip backup creation
python tool_security_encrypt_all_env_files.py --no-backup
# Process only specific environment files
python tool_security_encrypt_all_env_files.py --files env_dev.env env_prod.env
"""
import sys
import os
import argparse
import shutil
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Any
# Add the modules directory to the Python path
current_dir = Path(__file__).parent
modules_dir = current_dir / 'modules'
if modules_dir.exists():
sys.path.insert(0, str(modules_dir))
else:
print(f"Error: Modules directory not found: {modules_dir}")
print(f"Make sure you're running this script from the gateway directory")
sys.exit(1)
# Import encryption functions
try:
from modules.shared.configuration import encrypt_value
except ImportError as e:
print(f"Error: Could not import encryption functions from shared.configuration: {e}")
print(f"Make sure you're running this script from the gateway directory")
print(f"Modules directory: {modules_dir}")
sys.exit(1)
def get_env_type_from_file(file_path: Path) -> str:
"""
Read the APP_ENV_TYPE from the environment file.
Args:
file_path: Path to the environment file
Returns:
str: The environment type (dev, int, prod) or 'dev' as default
"""
if not file_path.exists():
return 'dev'
try:
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('APP_ENV_TYPE') and '=' in line:
_, value = line.split('=', 1)
return value.strip().lower()
except Exception as e:
print(f"Warning: Could not read APP_ENV_TYPE from {file_path}: {e}")
return 'dev'
def is_any_encrypted_value(value: str) -> bool:
"""
Check if a value has any encryption prefix (DEV_ENC:, INT_ENC:, PROD_ENC:, etc.).
Args:
value: The value to check
Returns:
bool: True if the value has any encryption prefix, False otherwise
"""
if not value or not isinstance(value, str):
return False
# Check for any environment-specific encryption prefixes
return (value.startswith('DEV_ENC:') or
value.startswith('INT_ENC:') or
value.startswith('PROD_ENC:') or
value.startswith('TEST_ENC:') or
value.startswith('STAGING_ENC:'))
def find_secret_keys_in_file(file_path: Path) -> list:
"""
Find all *_SECRET keys in an environment file that are not encrypted.
Args:
file_path: Path to the environment file
Returns:
list: List of tuples (line_number, key, value, full_line)
"""
secret_keys = []
if not file_path.exists():
return secret_keys
# Get the environment type from the file itself
file_env_type = get_env_type_from_file(file_path)
try:
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
i = 0
while i < len(lines):
line = lines[i].strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
i += 1
continue
# Check if line contains a key-value pair
if '=' in line:
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
# Check if it's a secret key and not already encrypted with ANY prefix
if key.endswith('_SECRET') and value and not is_any_encrypted_value(value):
# Check if value starts with { (JSON object)
if value.startswith('{'):
# Collect all lines until we find the closing }
json_lines = [value]
start_line = i + 1
i += 1
brace_count = value.count('{') - value.count('}')
while i < len(lines) and brace_count > 0:
json_lines.append(lines[i].rstrip('\n'))
brace_count += lines[i].count('{') - lines[i].count('}')
i += 1
# Join all lines and create the full JSON value
full_json_value = '\n'.join(json_lines)
secret_keys.append((start_line, key, full_json_value, line))
i -= 1 # Adjust for the loop increment
else:
# Single line value
secret_keys.append((i + 1, key, value, line))
# Check if it's a secret key with multiline JSON (value is just "{")
elif key.endswith('_SECRET') and value == '{' and not is_any_encrypted_value(value):
# Collect all lines until we find the closing }
json_lines = [value]
start_line = i + 1
i += 1
brace_count = 1 # We already have one opening brace
while i < len(lines) and brace_count > 0:
json_lines.append(lines[i].rstrip('\n'))
brace_count += lines[i].count('{') - lines[i].count('}')
i += 1
# Join all lines and create the full JSON value
full_json_value = '\n'.join(json_lines)
secret_keys.append((start_line, key, full_json_value, line))
i -= 1 # Adjust for the loop increment
i += 1
except Exception as e:
print(f"Error reading {file_path}: {e}")
return secret_keys
def backup_file(file_path: Path) -> Path:
"""
Create a backup of the file before modification.
Args:
file_path: Path to the file to backup
Returns:
Path: Path to the backup file
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = file_path.with_suffix(f'.{timestamp}.backup')
shutil.copy2(file_path, backup_path)
return backup_path
def encrypt_all_secrets_in_file(file_path: Path, dry_run: bool = False, create_backup: bool = True) -> Dict[str, Any]:
"""
Encrypt all non-encrypted secrets in a file.
Args:
file_path: Path to the environment file
dry_run: If True, only show what would be changed
create_backup: If True, create a backup before modifying
Returns:
dict: Results of the encryption process
"""
# Get the environment type from the file itself
file_env_type = get_env_type_from_file(file_path)
results = {
'file': str(file_path),
'env_type': file_env_type,
'secrets_found': 0,
'secrets_encrypted': 0,
'errors': [],
'backup_created': None
}
# Find all secret keys
secret_keys = find_secret_keys_in_file(file_path)
results['secrets_found'] = len(secret_keys)
if not secret_keys:
print(f" ✅ No unencrypted secrets found - all values already have encryption prefixes")
return results
print(f" Found {len(secret_keys)} non-encrypted secrets")
if dry_run:
print(" [DRY RUN] Would encrypt the following secrets:")
for line_num, key, value, full_line in secret_keys:
print(f" Line {line_num}: {key} = {value[:50]}{'...' if len(value) > 50 else ''}")
return results
# Create backup if requested
if create_backup:
try:
backup_path = backup_file(file_path)
results['backup_created'] = str(backup_path)
print(f" 📋 Backup created: {backup_path.name}")
except Exception as e:
results['errors'].append(f"Failed to create backup: {e}")
print(f" ⚠️ Warning: Could not create backup: {e}")
# Read the file content
try:
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
except Exception as e:
results['errors'].append(f"Failed to read file: {e}")
return results
# Process each secret key
for line_num, key, value, full_line in secret_keys:
try:
print(f" 🔐 Encrypting {key}...")
# Encrypt the value using the environment type from the file
encrypted_value = encrypt_value(value, file_env_type)
# Replace the line in the file content
new_line = f"{key} = {encrypted_value}\n"
lines[line_num - 1] = new_line
# If this was a multiline JSON, we need to remove the remaining lines
if value.startswith('{') and '\n' in value:
# Count how many lines the original JSON spanned
json_lines = value.split('\n')
lines_to_remove = len(json_lines) - 1 # -1 because we already replaced the first line
# Remove the remaining lines
for i in range(line_num, line_num + lines_to_remove):
if i < len(lines):
lines[i] = ""
results['secrets_encrypted'] += 1
print(f" ✓ Encrypted successfully")
except Exception as e:
error_msg = f"Failed to encrypt {key}: {e}"
results['errors'].append(error_msg)
print(f"{error_msg}")
# Write the modified content back to the file
if results['secrets_encrypted'] > 0:
try:
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines(lines)
print(f" 💾 File updated successfully")
except Exception as e:
results['errors'].append(f"Failed to write file: {e}")
print(f" ✗ Failed to write file: {e}")
return results
def process_all_env_files(env_files: List[str] = None, dry_run: bool = False, create_backup: bool = True) -> Dict[str, Any]:
"""
Process all environment files and encrypt unencrypted secrets.
Args:
env_files: List of specific files to process (if None, processes all three default files)
dry_run: If True, only show what would be changed
create_backup: If True, create backups before modifying
Returns:
dict: Summary of all processing results
"""
# Default environment files if none specified
if env_files is None:
env_files = ['env_dev.env', 'env_int.env', 'env_prod.env']
# Convert to Path objects and check if they exist
env_paths = []
for env_file in env_files:
env_path = Path(env_file)
if not env_path.exists():
print(f"⚠️ Warning: Environment file not found: {env_file}")
continue
env_paths.append(env_path)
if not env_paths:
print("❌ No valid environment files found to process")
return {'total_files': 0, 'total_secrets_found': 0, 'total_secrets_encrypted': 0, 'total_errors': 0, 'files': []}
print("🔐 PowerOn Batch Secret Encryption Tool")
print("=" * 60)
print("⚠️ IMPORTANT: The tool will read APP_ENV_TYPE from each file itself")
print("⚠️ Each file will be processed with its own environment-specific encryption")
print()
if dry_run:
print("🔍 DRY RUN MODE - No changes will be made")
print()
# Process each file
all_results = []
total_secrets_found = 0
total_secrets_encrypted = 0
total_errors = 0
for env_path in env_paths:
print(f"\n📁 Processing {env_path.name}:")
results = encrypt_all_secrets_in_file(env_path, dry_run, create_backup)
all_results.append(results)
total_secrets_found += results['secrets_found']
total_secrets_encrypted += results['secrets_encrypted']
total_errors += len(results['errors'])
# Summary
print("\n" + "=" * 60)
print("📊 SUMMARY")
print("=" * 60)
print(f"Files processed: {len(env_paths)}")
print(f"Total secrets found: {total_secrets_found}")
if not dry_run:
print(f"Total secrets encrypted: {total_secrets_encrypted}")
print(f"Total errors: {total_errors}")
if total_errors == 0 and total_secrets_encrypted > 0:
print("\n🎉 All secrets encrypted successfully!")
elif total_errors > 0:
print(f"\n⚠️ Completed with {total_errors} errors")
else:
print("\n✅ No secrets needed encryption")
else:
print(f"Secrets that would be encrypted: {total_secrets_found}")
# Show backup information
backups_created = [r['backup_created'] for r in all_results if r['backup_created']]
if backups_created:
print(f"\n📋 Backups created: {len(backups_created)}")
for backup in backups_created:
print(f" - {Path(backup).name}")
# Show errors if any
all_errors = []
for results in all_results:
all_errors.extend(results['errors'])
if all_errors:
print(f"\n❌ Errors encountered:")
for error in all_errors:
print(f" - {error}")
return {
'total_files': len(env_paths),
'total_secrets_found': total_secrets_found,
'total_secrets_encrypted': total_secrets_encrypted,
'total_errors': total_errors,
'files': all_results
}
def main():
parser = argparse.ArgumentParser(description='Encrypt all *_SECRET variables in all environment files')
parser.add_argument('--files', '-f', nargs='+',
help='Specific environment files to process (default: all three env files)')
parser.add_argument('--dry-run', action='store_true',
help='Show what would be changed without making changes')
parser.add_argument('--no-backup', action='store_true',
help='Skip creating backup files')
args = parser.parse_args()
try:
results = process_all_env_files(
env_files=args.files,
dry_run=args.dry_run,
create_backup=not args.no_backup
)
# Return appropriate exit code
if results['total_errors'] > 0:
return 1
return 0
except Exception as e:
print(f"Error: {e}")
return 1
if __name__ == '__main__':
sys.exit(main())