ai models and calls: standardized and harmonized all model operation types

This commit is contained in:
ValueOn AG 2025-10-24 19:03:53 +02:00
parent 64131f65ce
commit 89337418f6
30 changed files with 1121 additions and 716 deletions

View file

@ -50,23 +50,11 @@ APP_LOGGING_BACKUP_COUNT = 5
Service_MSFT_REDIRECT_URI = http://localhost:8000/api/msft/auth/callback Service_MSFT_REDIRECT_URI = http://localhost:8000/api/msft/auth/callback
Service_GOOGLE_REDIRECT_URI = http://localhost:8000/api/google/auth/callback Service_GOOGLE_REDIRECT_URI = http://localhost:8000/api/google/auth/callback
# OpenAI configuration # AI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEajBuZmtYTVdqLTBpQm9KZ2pCXzRCV3VhZzlYTEhKb1FqWXNrV3lyb25uZUN1WVVQUEY3dGYtejludV9MNGlKeVREanZGOGloV09mY2ttQ3k5SjBFOGFac2ZQTkNKNUZWVnRINVQyeWhsR2wyYnVrRDNzV2NqSHB0ajQ4UWtGeGZtbmR0Q3VvS0hDZlphVmpSc2Z6RG5nPT0= Connector_AiOpenai_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEajBuZmtYTVdqLTBpQm9KZ2pCXzRCV3VhZzlYTEhKb1FqWXNrV3lyb25uZUN1WVVQUEY3dGYtejludV9MNGlKeVREanZGOGloV09mY2ttQ3k5SjBFOGFac2ZQTkNKNUZWVnRINVQyeWhsR2wyYnVrRDNzV2NqSHB0ajQ4UWtGeGZtbmR0Q3VvS0hDZlphVmpSc2Z6RG5nPT0=
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09 Connector_AiAnthropic_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpENmFBWG16STFQUVZxNzZZRzRLYTA4X3lRanF1VkF4cU45OExNMzlsQmdISGFxTUxud1dXODBKcFhMVG9KNjdWVnlTTFFROVc3NDlsdlNHLUJXeG41NDBHaXhHR0VHVWl5UW9RNkVWbmlhakRKVW5pM0R4VHk0LUw0TV9LdkljNHdBLXJua21NQkl2b3l4UkVkMGN1YjBrMmJEeWtMay1jbmxrYWJNbUV0aktCXzU1djR2d2RSQXZORTNwcG92ZUVvVGMtQzQzTTVncEZTRGRtZUFIZWQ0dz09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
# Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQm82Mzk2Q1MwZ0dNcUVBcUtuRDJIcTZkMXVvYnpjM3JEMzJiT1NKSHljX282ZDIyZTJYc09VSTdVNXAtOWU2UXp5S193NTk5dHJsWlFjRjhWektFOG1DVGY4ZUhHTXMzS0RPN1lNcF9nSlVWbW5BZ1hkZDVTejl6bVZNRFVvX29xamJidWRFMmtjQmkyRUQ2RUh6UTN1aWNPSUJBPT0= Connector_AiPerplexity_API_SECRET = DEV_ENC:Z0FBQUFBQm82Mzk2Q1MwZ0dNcUVBcUtuRDJIcTZkMXVvYnpjM3JEMzJiT1NKSHljX282ZDIyZTJYc09VSTdVNXAtOWU2UXp5S193NTk5dHJsWlFjRjhWektFOG1DVGY4ZUhHTXMzS0RPN1lNcF9nSlVWbW5BZ1hkZDVTejl6bVZNRFVvX29xamJidWRFMmtjQmkyRUQ2RUh6UTN1aWNPSUJBPT0=
Connector_AiPerplexity_MODEL_NAME = sonar Connector_AiTavily_API_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI=
Connector_AiPerplexity_TEMPERATURE = 0.2
# Agent Mail configuration # Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
@ -77,9 +65,6 @@ Service_MSFT_TENANT_ID = common
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
Service_GOOGLE_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM= Service_GOOGLE_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETDJhbGVQMHlFQzNPVFI1ZzBMa3pNMGlQUHhaQm10eVl1bFlSeTBybzlTOWE2MURXQ0hkRlo0NlNGbHQxWEl1OVkxQnVKYlhhOXR1cUF4T3k0WDdscktkY1oyYllRTmdDTWpfbUdwWGtSd1JvNlYxeTBJdEtaaS1vYnItcW0yaFM=
# Tavily Web Search configuration
Connector_WebTavily_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQTdnUHMwd2pIaXNtMmtCTFREd0pyQXRKb1F5eGtHSnkyOGZiUnlBOFc0b3Vzcndrc3ViRm1nMDJIOEZKYWxqdWNkZGh5N0Z4R0JlQmxXSG5pVnJUR2VYckZhMWNMZ1FNeXJ3enJLVlpiblhOZTNleUg3ZzZyUzRZanFSeDlVMkI=
# Google Cloud Speech Services configuration # Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETk5FWWM3Q0JKMzhIYTlyMkhuNjA4NlF4dk82U2NScHhTVGY3UG83NkhfX3RrcWVtWWcyLXRjU1dTT21zWEl6YWRMMUFndXpsUnJOeHh3QThsNDZKRXROTzdXRUdsT0JZajZJNVlfb0gtMXkwWm9DOERPVnpjU0pyUEZfOGJsUnprT3ltMVVhalUyUm9hMUFtZEtHUnJqOGZ4dEZjZm5SWVVTckVCWnY1UkdVSHVmUlgwbnAyc0xDQW84R3ViSko5OHVCVWZRUVNiaG1pVFB6X3EwS0FPd2dUYjhiSmRjcXh2WEZiXzI4SFZqT21tbDduUWRyVWdFZXpmcVM5ZDR0VWtzZnF5UER6cGwwS2JlLV9CSTZ0Z0IyQ1h0YW9TcmhRTXZEckp4bWhmTkt6UTNYMk4zVkpnbUJmaDIxZnoyR2dWTEYwTUFEV0w2eUdUUGpoZk9XRkt4RVF1Z1NPdUpBeTcyWV9PY1Ffd2s0ZEdVekxGekhoeEl4TmNqaXYtbUJuSVdycFducERWdWtZajZnX011Q2w4eE9VMTBqQ1ZxRmdScWhXY1E3WWhzX1JZcHhxam9FbDVPN3Q1MWtrMUZuTUg3LVFQVHp1T1hpQWNDMzEzekVJWk9ybl91YUVjSkFob1VaMi1ONEtuMnRSOEg1S3QybUMwbVZDejItajBLTjM2Zy1hNzZQMW5LLVVDVGdFWm5BZUxNeEFnUkZzU3dxV0lCUlc0LWo4b05GczVpOGZSV2ZxbFBwUml6OU5tYjdnTks3Y3hrVEZVTHlmc1NPdFh4WE5pWldEZklOQUxBbjBpMTlkX3FFQVJ6c2NSZGdzTThycE92VW82enZKamhiRGFnU25aZGlHZHhZd2lUUmhuTVptNjhoWVlJQkxIOEkzbzJNMjZCZFJyM25tdXBnQ2ZWaHV3b2p6UWJpdk9xUEhBc1dyTlNmeF9wbm5yYUhHV01UZnVXWDFlNzBkdXlWUWhvcmJpSmljbmE3LUpUZEg4VzRwZ2JVSjdYUm1sODViQXVxUzdGTmZFbVpiN2V1YW5XV3U4b2VRWmxldGVGVHZsSldoekhVLU9wZ2V0cGZIYkNqM2pXVGctQVAyUm4xTHhpd1VVLXFhcnVEV21Rby1hbTlqTl84TjVveHdYTExUVkhHQ0ltaTB2WXJnY1NQVE5PbWg3ejgySElYc1JSTlQ3NDlFUWR6STZVUjVqaXFRN200NF9LY1ljQ0R2UldlWUtKY1NQVnJ4QXRyYTBGSWVuenhyM0Z0cWtndTd1eG8xRzY5a2dNZ1hkQm5MV3BHVzA2N1QwUkd6WlRGYTZQOUhnVWQ2S0Y5U0s1dXFNVXh5Q2pLWVUxSUQ2MlR1ak52NmRIZ2hlYTk1SGZGWS1RV3hWVU9rR3d1Rk9MLS11REZXbzhqMHpsSm1HYW1jMUNLT29YOHZsRWNaLTVvOFpmT3l3MHVwaERTT0dNLWFjcGRYZ25qT2szTkVFUnRFR3JWYS1aNXFIRnMyalozTlQzNFF2NXJLVHVPVF9zdTF6ZjlkbzJ4RFc2ZENmNFFxZDZzTzhfMUl0bW96V0lPZkh1dXFYZlEteFBlSG84Si1FNS1TTi1OMkFnX2pOYW8xY3MxMVJnVC02MDUyaXZfMEVHWDQtVlRpcENmV0h3V0dCWEFRS2prQXdNRlQ5dnRFVHU0Q1dNTmh0SlBCaU55bFMydWM1TTFFLW96ODBnV3dNZHFZTWZhRURYSHlrdzF3RlRuWDBoQUhSOUJWemtRM3pxcDJFbGJoaTJ3ZktRTlJxbXltaHBoZXVJVDlxS3cxNWo2c0ZBV0NzaUstRWdsMW1xLXFkanZGYUFiU0tSLXFQa0tkcDFoMV9kak41ZjQ0R214UmtOR1ZBanRuemY3Mmw1SkZ5aDZodGIzT3N2aV85MW9kcld6c0g0ZDgtTWo3b3Y3VjJCRnR2U2tMVm9rUXNVRnVHbzZXVTZ6RmI2RkNmajBfMWVnODVFbnpkT0oyci15czJHU0p1cUowTGZJMzVnd3hIRjQyTVhKOGRkcFRKdVpyQ3Yzd01Jb1lSajFmV0paeEV0cjk1SmpmdWpDVFJMUmMtUFctOGhaTmlKQXNRVlVUNlhJemxudHZCR056SVlBb3NOTEYxRTRLaFlVd2d3TWtxVlB6ZEtQLTkxOGMyY3N0a2pYRFUweDBNaGhja2xSSklPOUZla1dKTWRNbG8tUGdSNEV5cW90OWlOZFlIUExBd3U2b2hyS1owbXVMM3p0Qm41cUtzWUxYNzB1N3JpUTNBSGdsT0NuamNTb1lIbXR4MG1sakNPVkxBUXRLVE1xX0YxWDhOcERIY1lTQVFqS01CaXZKNllFaXlIR0JsM1pKMmV1OUo3TGI1WkRaVnYxUTl1LTM0SU1qN1V1b0RCT0x0VHNLTmNLZnk1S0MxYnBBcm03WnVua0xqaEhGUzhOU253ZkppRzdudXBSVlMxeFVOSWxtZ1o2RVBSQUhEUEFuQ1hxSVZMME4yWUtaU3VyRGo3RkUyRUNjT0pNcE1BdE1ZRzdXVl8ydUtXZjdMdHdEVW4teHUtTi1HSGliLUxud21TX0NtcGVkRFBHNkZ1WTlNczR4OUJfUVluc1BoV09oWS1scUdsNnB5d1U5M1huX3k4QzAyNldtb2hybktYN2xKZ1NTNWFsaWwzV3pCRVhkaGR5eTNlV1d6ZzFfaFZTT0E4UjRpQ3pKdEZxUlJ6UFZXM3laUndyWEk2NlBXLUpoajVhZzVwQXpWVzUtVjVNZFBwdWdQa3AxZC1KdGdqNnhibjN4dmFYb2cxcEVwc1g5R09zRUdINUZtOE5QRjVUU0dpZy1QVl9odnFtVDNuWFZLSURtMXlSMlhRNTBWSVFJbEdOOWpfVWV0SmdRWDdlUXZZWE8xRUxDN1I0aEN6MHYwNzM1cmpJS0ZpMnBYWkxfb3FsbEV1VnlqWGxqdVJ6SHlwSjAzRlMycTBaQ295NXNnZERpUnJQcjhrUUd3bkI4bDVzRmxQblhkaFJPTTdISnVUQmhET3BOMTM4bjVvUEc2VmZhb2lrR1FyTUl2RWNEeGg0U0dsNnV6eU5zOUxiNDY5SXBxR0hBS00wOTgyWTFnWkQyaEtLVUloT3ZxZGh0RWVGRmJzenFsaUtfZENQM0JzdkVVeTdXR3hUSmJST1NBMUI1NkVFWncwNW5JZVVLX1p1RXdqVnFfQWpvQ08yQjZhN1NkTkpTSnUxOVRXZXE0WFEtZWxhZW1NNXYtQ2sya0VGLURmS01lMkctNVY3c2ZhN0ZGRFgwWHlabTFkeS1hcUZ1dDZ3cnpPQ3hha2IzVE11M0pqbklmU0diczBqTFBNZC1QZGp6VzNTSnJVSjJoWkJUQjVORG4tYUJmMEJtSUNUdVpEaGt6OTM3TjFOdVhXUHItZjRtZ25nU3NhZC1sVTVXNTRDTmxZbnlfeHNsdkpuMXhUYnE1MnpVQ0ZOclRWM1M4eHdXTzRXbFRZZVQtTS1iRVdXVWZMSGotcWg3MUxUYTFnSEEtanBCRHlZRUNIdGdpUFhsYjdYUndCZnRITzhMZVJ1dHFoVlVNb0duVjlxd0U4OGRuQVV3MG90R0hiYW5MWkxWVklzbWFRNzBfSUNrdzc5bVdtTXg0dExEYnRCaDI3c1I4TWFwLXZKR0wxSjRZYjZIV3ZqZjNqTWhFT0RGSDVMc1A1UzY2bDBiMGFSUy1fNVRQRzRJWDVydUpqb1ZfSHNVbldVeUN2YlAxSW5WVDdxVzJ1WHpLeUdmb0xWMDNHN05oQzY3YnhvUUdhS2xaOHNidkVvbTZtSHFlblhOYmwyR3NQdVJDRUdxREhWdF9ZcXhwUWxHc2hyLW5vUGhIUVhJNUNhY0hFU0ptVnI0TFVhZDE1TFBBUEstSkRoZWJ5MHJhUmZrR1ZrRlFtRGpxS1pOMmFMQjBsdjluY3FiYUU4eGJVVXlZVEpuNWdHVVhJMGtwaTdZR2NDbXd2eHpOQ09SeTV6N1BaVUpsR1pQVDBZcElJUUt6VnVpQmxSYnE4Y1BCWV9IRWdVV0p3enBGVHItdnBGN3NyNWFBWmkySnByWThsbDliSlExQmp3LVlBaDIyZXp6UnR6cU9rTzJmTDBlSVpON0tiWllMdm1oME1zTFl2S2ZYYllhQlY2VHNZRGtHUDY4U1lIVExLZTU4VzZxSTZrZHl1ZTBDc0g4SjI4WGYyZHV1bm9wQ3R2Z09ld1ZmUkN5alJGeHZKSHl1bWhQVXpNMzdjblpLcUhfSm02Qlh5S1FVN3lIcHl0NnlRPT0= Connector_GoogleSpeech_API_KEY_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpETk5FWWM3Q0JKMzhIYTlyMkhuNjA4NlF4dk82U2NScHhTVGY3UG83NkhfX3RrcWVtWWcyLXRjU1dTT21zWEl6YWRMMUFndXpsUnJOeHh3QThsNDZKRXROTzdXRUdsT0JZajZJNVlfb0gtMXkwWm9DOERPVnpjU0pyUEZfOGJsUnprT3ltMVVhalUyUm9hMUFtZEtHUnJqOGZ4dEZjZm5SWVVTckVCWnY1UkdVSHVmUlgwbnAyc0xDQW84R3ViSko5OHVCVWZRUVNiaG1pVFB6X3EwS0FPd2dUYjhiSmRjcXh2WEZiXzI4SFZqT21tbDduUWRyVWdFZXpmcVM5ZDR0VWtzZnF5UER6cGwwS2JlLV9CSTZ0Z0IyQ1h0YW9TcmhRTXZEckp4bWhmTkt6UTNYMk4zVkpnbUJmaDIxZnoyR2dWTEYwTUFEV0w2eUdUUGpoZk9XRkt4RVF1Z1NPdUpBeTcyWV9PY1Ffd2s0ZEdVekxGekhoeEl4TmNqaXYtbUJuSVdycFducERWdWtZajZnX011Q2w4eE9VMTBqQ1ZxRmdScWhXY1E3WWhzX1JZcHhxam9FbDVPN3Q1MWtrMUZuTUg3LVFQVHp1T1hpQWNDMzEzekVJWk9ybl91YUVjSkFob1VaMi1ONEtuMnRSOEg1S3QybUMwbVZDejItajBLTjM2Zy1hNzZQMW5LLVVDVGdFWm5BZUxNeEFnUkZzU3dxV0lCUlc0LWo4b05GczVpOGZSV2ZxbFBwUml6OU5tYjdnTks3Y3hrVEZVTHlmc1NPdFh4WE5pWldEZklOQUxBbjBpMTlkX3FFQVJ6c2NSZGdzTThycE92VW82enZKamhiRGFnU25aZGlHZHhZd2lUUmhuTVptNjhoWVlJQkxIOEkzbzJNMjZCZFJyM25tdXBnQ2ZWaHV3b2p6UWJpdk9xUEhBc1dyTlNmeF9wbm5yYUhHV01UZnVXWDFlNzBkdXlWUWhvcmJpSmljbmE3LUpUZEg4VzRwZ2JVSjdYUm1sODViQXVxUzdGTmZFbVpiN2V1YW5XV3U4b2VRWmxldGVGVHZsSldoekhVLU9wZ2V0cGZIYkNqM2pXVGctQVAyUm4xTHhpd1VVLXFhcnVEV21Rby1hbTlqTl84TjVveHdYTExUVkhHQ0ltaTB2WXJnY1NQVE5PbWg3ejgySElYc1JSTlQ3NDlFUWR6STZVUjVqaXFRN200NF9LY1ljQ0R2UldlWUtKY1NQVnJ4QXRyYTBGSWVuenhyM0Z0cWtndTd1eG8xRzY5a2dNZ1hkQm5MV3BHVzA2N1QwUkd6WlRGYTZQOUhnVWQ2S0Y5U0s1dXFNVXh5Q2pLWVUxSUQ2MlR1ak52NmRIZ2hlYTk1SGZGWS1RV3hWVU9rR3d1Rk9MLS11REZXbzhqMHpsSm1HYW1jMUNLT29YOHZsRWNaLTVvOFpmT3l3MHVwaERTT0dNLWFjcGRYZ25qT2szTkVFUnRFR3JWYS1aNXFIRnMyalozTlQzNFF2NXJLVHVPVF9zdTF6ZjlkbzJ4RFc2ZENmNFFxZDZzTzhfMUl0bW96V0lPZkh1dXFYZlEteFBlSG84Si1FNS1TTi1OMkFnX2pOYW8xY3MxMVJnVC02MDUyaXZfMEVHWDQtVlRpcENmV0h3V0dCWEFRS2prQXdNRlQ5dnRFVHU0Q1dNTmh0SlBCaU55bFMydWM1TTFFLW96ODBnV3dNZHFZTWZhRURYSHlrdzF3RlRuWDBoQUhSOUJWemtRM3pxcDJFbGJoaTJ3ZktRTlJxbXltaHBoZXVJVDlxS3cxNWo2c0ZBV0NzaUstRWdsMW1xLXFkanZGYUFiU0tSLXFQa0tkcDFoMV9kak41ZjQ0R214UmtOR1ZBanRuemY3Mmw1SkZ5aDZodGIzT3N2aV85MW9kcld6c0g0ZDgtTWo3b3Y3VjJCRnR2U2tMVm9rUXNVRnVHbzZXVTZ6RmI2RkNmajBfMWVnODVFbnpkT0oyci15czJHU0p1cUowTGZJMzVnd3hIRjQyTVhKOGRkcFRKdVpyQ3Yzd01Jb1lSajFmV0paeEV0cjk1SmpmdWpDVFJMUmMtUFctOGhaTmlKQXNRVlVUNlhJemxudHZCR056SVlBb3NOTEYxRTRLaFlVd2d3TWtxVlB6ZEtQLTkxOGMyY3N0a2pYRFUweDBNaGhja2xSSklPOUZla1dKTWRNbG8tUGdSNEV5cW90OWlOZFlIUExBd3U2b2hyS1owbXVMM3p0Qm41cUtzWUxYNzB1N3JpUTNBSGdsT0NuamNTb1lIbXR4MG1sakNPVkxBUXRLVE1xX0YxWDhOcERIY1lTQVFqS01CaXZKNllFaXlIR0JsM1pKMmV1OUo3TGI1WkRaVnYxUTl1LTM0SU1qN1V1b0RCT0x0VHNLTmNLZnk1S0MxYnBBcm03WnVua0xqaEhGUzhOU253ZkppRzdudXBSVlMxeFVOSWxtZ1o2RVBSQUhEUEFuQ1hxSVZMME4yWUtaU3VyRGo3RkUyRUNjT0pNcE1BdE1ZRzdXVl8ydUtXZjdMdHdEVW4teHUtTi1HSGliLUxud21TX0NtcGVkRFBHNkZ1WTlNczR4OUJfUVluc1BoV09oWS1scUdsNnB5d1U5M1huX3k4QzAyNldtb2hybktYN2xKZ1NTNWFsaWwzV3pCRVhkaGR5eTNlV1d6ZzFfaFZTT0E4UjRpQ3pKdEZxUlJ6UFZXM3laUndyWEk2NlBXLUpoajVhZzVwQXpWVzUtVjVNZFBwdWdQa3AxZC1KdGdqNnhibjN4dmFYb2cxcEVwc1g5R09zRUdINUZtOE5QRjVUU0dpZy1QVl9odnFtVDNuWFZLSURtMXlSMlhRNTBWSVFJbEdOOWpfVWV0SmdRWDdlUXZZWE8xRUxDN1I0aEN6MHYwNzM1cmpJS0ZpMnBYWkxfb3FsbEV1VnlqWGxqdVJ6SHlwSjAzRlMycTBaQ295NXNnZERpUnJQcjhrUUd3bkI4bDVzRmxQblhkaFJPTTdISnVUQmhET3BOMTM4bjVvUEc2VmZhb2lrR1FyTUl2RWNEeGg0U0dsNnV6eU5zOUxiNDY5SXBxR0hBS00wOTgyWTFnWkQyaEtLVUloT3ZxZGh0RWVGRmJzenFsaUtfZENQM0JzdkVVeTdXR3hUSmJST1NBMUI1NkVFWncwNW5JZVVLX1p1RXdqVnFfQWpvQ08yQjZhN1NkTkpTSnUxOVRXZXE0WFEtZWxhZW1NNXYtQ2sya0VGLURmS01lMkctNVY3c2ZhN0ZGRFgwWHlabTFkeS1hcUZ1dDZ3cnpPQ3hha2IzVE11M0pqbklmU0diczBqTFBNZC1QZGp6VzNTSnJVSjJoWkJUQjVORG4tYUJmMEJtSUNUdVpEaGt6OTM3TjFOdVhXUHItZjRtZ25nU3NhZC1sVTVXNTRDTmxZbnlfeHNsdkpuMXhUYnE1MnpVQ0ZOclRWM1M4eHdXTzRXbFRZZVQtTS1iRVdXVWZMSGotcWg3MUxUYTFnSEEtanBCRHlZRUNIdGdpUFhsYjdYUndCZnRITzhMZVJ1dHFoVlVNb0duVjlxd0U4OGRuQVV3MG90R0hiYW5MWkxWVklzbWFRNzBfSUNrdzc5bVdtTXg0dExEYnRCaDI3c1I4TWFwLXZKR0wxSjRZYjZIV3ZqZjNqTWhFT0RGSDVMc1A1UzY2bDBiMGFSUy1fNVRQRzRJWDVydUpqb1ZfSHNVbldVeUN2YlAxSW5WVDdxVzJ1WHpLeUdmb0xWMDNHN05oQzY3YnhvUUdhS2xaOHNidkVvbTZtSHFlblhOYmwyR3NQdVJDRUdxREhWdF9ZcXhwUWxHc2hyLW5vUGhIUVhJNUNhY0hFU0ptVnI0TFVhZDE1TFBBUEstSkRoZWJ5MHJhUmZrR1ZrRlFtRGpxS1pOMmFMQjBsdjluY3FiYUU4eGJVVXlZVEpuNWdHVVhJMGtwaTdZR2NDbXd2eHpOQ09SeTV6N1BaVUpsR1pQVDBZcElJUUt6VnVpQmxSYnE4Y1BCWV9IRWdVV0p3enBGVHItdnBGN3NyNWFBWmkySnByWThsbDliSlExQmp3LVlBaDIyZXp6UnR6cU9rTzJmTDBlSVpON0tiWllMdm1oME1zTFl2S2ZYYllhQlY2VHNZRGtHUDY4U1lIVExLZTU4VzZxSTZrZHl1ZTBDc0g4SjI4WGYyZHV1bm9wQ3R2Z09ld1ZmUkN5alJGeHZKSHl1bWhQVXpNMzdjblpLcUhfSm02Qlh5S1FVN3lIcHl0NnlRPT0=

View file

@ -50,23 +50,11 @@ APP_LOGGING_BACKUP_COUNT = 5
Service_MSFT_REDIRECT_URI = https://gateway-int.poweron-center.net/api/msft/auth/callback Service_MSFT_REDIRECT_URI = https://gateway-int.poweron-center.net/api/msft/auth/callback
Service_GOOGLE_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/callback Service_GOOGLE_REDIRECT_URI = https://gateway-int.poweron-center.net/api/google/auth/callback
# OpenAI configuration # AI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjSDBNYkptSkQxTUotYVVpZVNZc0dxNGNwSEtkOEE0T3RZWjROTEhSRlRXdlZmQUxxZ0w3Y0xOV2JNV19LNF9yTUZiU1pUNG15U2VDUDdSVlI4VlpnR3JXVFFtcXBaTEZiaUtSclVFd0lCZG1rWVhra1dfWTVQOTBEYUU0MjByYVNEMTFmeXNOcmpUT216MmJKdlVPeW5nPT0= Connector_AiOpenai_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjSDBNYkptSkQxTUotYVVpZVNZc0dxNGNwSEtkOEE0T3RZWjROTEhSRlRXdlZmQUxxZ0w3Y0xOV2JNV19LNF9yTUZiU1pUNG15U2VDUDdSVlI4VlpnR3JXVFFtcXBaTEZiaUtSclVFd0lCZG1rWVhra1dfWTVQOTBEYUU0MjByYVNEMTFmeXNOcmpUT216MmJKdlVPeW5nPT0=
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09 Connector_AiAnthropic_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjT1ZlRWVJdVZMT3ljSFJDcFdxRFBRVkZhS204NnN5RDBlQ0tpenhTM0FFVktuWW9mWHNwRWx2dHB0eDBSZ0JFQnZKWlp6c01pVGREWHd1eGpERnU0Q2xhaks1clQ1ZXVsdnd2ZzhpNXNQS1BhY3FjSkdkVEhHalNaRGR4emhpakZncnpDQUVxOHVXQzVUWmtQc0FsYmFwTF9TSG5FOUFtWk5Ick1NcHFvY2s1T1c2WXlRUFFJZnh6TWhuaVpMYmppcDR0QUx0a0R6RXlwbGRYb1R4dzJkUT09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
# Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQm82Mzk2UWZJdUFhSW8yc3RKc0tKRXphd0xWMkZOVlFpSGZ4SGhFWnk0cTF5VjlKQVZjdS1QSWdkS0pUSWw4OFU5MjUxdTVQel9aeWVIZTZ5TXRuVmFkZG0zWEdTOGdHMHpsTzI0TGlWYURKU1Q0VVpKTlhxUk5FTmN6SUJScDZ3ZldIaUJZcWpaQVRiSEpyQm9tRTNDWk9KTnZBPT0= Connector_AiPerplexity_API_SECRET = INT_ENC:Z0FBQUFBQm82Mzk2UWZJdUFhSW8yc3RKc0tKRXphd0xWMkZOVlFpSGZ4SGhFWnk0cTF5VjlKQVZjdS1QSWdkS0pUSWw4OFU5MjUxdTVQel9aeWVIZTZ5TXRuVmFkZG0zWEdTOGdHMHpsTzI0TGlWYURKU1Q0VVpKTlhxUk5FTmN6SUJScDZ3ZldIaUJZcWpaQVRiSEpyQm9tRTNDWk9KTnZBPT0=
Connector_AiPerplexity_MODEL_NAME = sonar Connector_AiTavily_API_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk=
Connector_AiPerplexity_TEMPERATURE = 0.2
# Agent Mail configuration # Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
@ -77,9 +65,6 @@ Service_MSFT_TENANT_ID = common
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo= Service_GOOGLE_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNThGeVRNd3hacThtRnE0bzlDa0JPUWQyaEd6QjlFckdsMGZjRlRfUks2bXV3aDdVRTF3LVRlZVY5WjVzSXV4ZGNnX002RDl3dkNYdGFzZkxVUW01My1wTHRCanVCLUozZEx4TlduQlB5MnpvNTR2SGlvbFl1YkhzTEtsSi1SOEo=
# Tavily Web Search configuration
Connector_WebTavily_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkdkJMTDY0akhXNzZDWHVYSEt1cDZoOWEzSktneHZEV2JndTNmWlNSMV9KbFNIZmQzeVlrNE5qUEIwcUlBSGM1a0hOZ3J6djIyOVhnZzI3M1dIUkdicl9FVXF3RGktMmlEYmhnaHJfWTdGUkktSXVUSGdQMC1vSEV6VE8zR2F1SVk=
# Google Cloud Speech Services configuration # Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkNmVXZ1pWcHcydTF2MXF0ZGJoWHBydF85bTczTktiaEJ3Wk1vMW1mZVhDSG1yd0ZxR2ZuSGJTX0N3MWptWXFJTkNTWjh1SUVVTXI4UDVzcGdLMkU5SHJ2TUpkRlRoRWdnSldtYjNTQkh4UDJHY2xmdTdZQ1ZiMTZZcGZxS3RzaHdjV3dtVkZUcEpJcWx0b2xuQVR6ZmpoVFZPY1hNMTV2SnhDaC1IZEh4UUpLTy1ILXA4RG1zamJTbUJ4X0t2M2NkdzJPbEJxSmFpRzV3WC0wZThoVzlxcmpHZ3ZkLVlVY3REZk1vV19WQ05BOWN6cnJ4MWNYYnNiQ0FQSUVnUlpfM3BhMnlsVlZUOG5wM3pzM1lSN1UzWlZKUXRLczlHbjI1LTFvSUJ4SlVXMy1BNk43bE5Hb0RfTTVlWk9oZnFIaVg0SW5pbm9EcXRTTzU1RFlYY3dTcnpKWWNyNjN5T1BGZ0FmX253cEFncmhvZVRuM05KYzhkOEhFMFJsc2NBSEwzZVZ1R0JMOGxsekVwUE55alZaRXFrdzNWWVNGWXNmbnhKeWhQSFo2VXBTUlRPeHdvdVdncEFuOWgydEtsSUFneUN6cGVaTnBSdjNCdVJseGJFdmlMc203UFhLVlYyTENkaGg2dVN6Z2xwT1ZmTmN5bVZGUkM3ZWcyVkt2ckFUVVd3WFFwYnJjNVRobEh2SkVJbXRwUUpEOFJKQ1NUc0Q4NHNqUFhPSDh5cTV6MEcwSDEwRUJCQ2JiTTJlOE5nd3pMMkJaQ1dVYjMwZVVWWnlETmp2dkZ3aXEtQ29WNkxZTFkzYUkxdTlQUU1OTnhWWU12YU9MVnJQa1d2ZjRtUlhneTNubEMxTmp1eUNPOThSMlB3Y1F0T2tCdFNsNFlKalZPV25yR2QycVBUb096RmZ1V0FTaGsxLV9FWDBmenBIOXpMdGpLcUc0TWRoY2hlMFhYTzlET1ZRekw0ZHNwUVBQdVJBX2h6Q2ZzWVZJWTNybTJiekp3WmhmWF9SUFBXQzlqUjctcVlHWWVMZWVQallzR0JGTVF0WmtnWlg1aTM1bFprNVExZXY5dnNvWF93UjhwbkJ3RzNXaVJ2d2RRU3JJVlBvaVh4eTlBRUtqWkJia3dJQVVBV2Nqdm9FUTRUVW1TaHp2ZUwxT0N2ZndxQ2Nka1RYWXF0LWxIWFE0dTFQcVhncFFPM0hFdUUtYlFnemx3WkF4bjA1aDFULUdrZlVZbEJtRGRCdjJyVkdJSXozd0I0dF9zbWhOeHFqRDA4T1NVaWR5cjBwSVgwbllPU294NjZGTnM1bFhIdGpNQUxFOENWd3FCbGpSRFRmRXotQnU0N2lCVEU5RGF6Qi10S2U2NGdadDlrRjZtVE5oZkw5ZWFjXzhCTmxXQzNFTFgxRXVYY3J3YkxnbnlBSm9PY3h4MlM1NVFQbVNDRW5Ld1dvNWMxSmdoTXJuaE1pT2VFeXYwWXBHZ29MZDVlN2lwUUNIeGNCVVdQVi1rRXdJMWFncUlPTXR0MmZVQ1l0d09mZTdzWGFBWUJMUFd3b0RSOU8zeER2UWpNdzAxS0ZJWnB5S3FJdU9wUDJnTTNwMWw3VFVqVXQ3ZGZnU1RkUktkc0NhUHJ0SGFxZ0lVWDEzYjNtU2JfMGNWM1Y0dHlCTzNESEdENC1jUWF5MVppRzR1QlBNSUJySjFfRi1ENHEwcmJ4S3hQUFpXVHA0TG9DZWdoUlo5WnNSM1lCZm1KbEs2ak1yUUU4Wk9JcVJGUkJwc0NvUkMyTjhoTWxtZmVQeDREZVRKZkhYN2duLVNTeGZzdFdBVnhEandJSXB5QjM0azF0ckI3Tk1wSzFhNGVOUVRrNjU0cG9JQ29pN09xOFkwR1lMTlktaGp4TktxdTVtTnNEcldsV2pEZm5nQWpJc2hxY0hjQnVSWUR5VVdaUXBHWUloTzFZUC1oNzJ4UjZ1dnpLcDJxWEZtQlNIMWkzZ0hXWXdKeC1iLXdZWVJhcU04VFlpMU5pd2ZIdTdCdkVWVFVBdmJuRk16bEFFQTh4alBrcTV2RzliT2hGdTVPOXlRMjFuZktiRTZIamQ1VFVqS0hRTXhxcU1mdkgyQ1NjQmZfcjl4c3NJd0RIeDVMZUFBbHJqdEJxWWl3aWdGUEQxR3ZnMkNGdVB4RUxkZi1xOVlFQXh1NjRfbkFEaEJ5TVZlUGFrWVhSTVRPeGxqNlJDTHNsRWRrei1pYjhnUmZrb3BvWkQ2QXBzYjFHNXZoWU1LSExhLWtlYlJTZlJmYUM5Y1Rhb1pkMVYyWTByM3NTS0VXMG1ybm1BTVN2QXRYaXZqX2dKSkZrajZSS2cyVlNOQnd5Y29zMlVyaWlNbTJEb3FuUFFtbWNTNVpZTktUenFZSl91cVFXZjRkQUZyYmtPczU2S1RKQ19ONGFOTHlwX2hOOEE1UHZEVjhnT0xxRjMxTEE4SHhRbmlmTkZwVXJBdlJDbU5oZS05SzI4QVhEWDZaN2ZiSlFwUGRXSnB5TE9MZV9ia3pYcmZVa1dicG5FMHRXUFZXMWJQVDAwOEdDQzJmZEl0ZDhUOEFpZXZWWXl5Q2xwSmFienNCMldlb2NKb2ZRYV9KbUdHRzNUcjU1VUFhMzk1a2J6dDVuNTl6NTdpM0hGa3k0UWVtbF9pdDVsQVp2cndDLUU5dnNYOF9CLS0ySXhBSFdCSnpqV010bllBb3U0cEZZYVF5R2tSNFM5NlRhdS1fb1NqbDBKMkw0V2N0VEZhNExtQlR3ckZ3cVlCeHVXdXJ6X0s4cEtsaG5rVUxCN2RRbHQxTmcyVFBqYUxyOHJzeFBXVUJaRHpXbUoxdHZzMFBzQk1UTUFvX1pGNFNMNDFvZWdTdEUtMUNKMXNIeVlvQk1CeEdpZVdmN0tsSDVZZHJXSGt5c2o2MHdwSTZIMVBhRzM1eU43Q2FtcVNidExxczNJeUx5U2RuUG5EeHpCTlg2SV9WNk1ET3BRNXFuc0pNWlVvZUYtY21oRGtJSmwxQ09QbHBUV3BuS3B5NE9RVkhfellqZjJUQ0diSV94QlhQWmdaaC1TRWxsMUVWSXB0aE1McFZDZDNwQUVKZ2t5cXRTXzlRZVJwN0pZSnJSV21XMlh0TzFRVEl0c2I4QjBxOGRCYkNxek04a011X1lrb2poQ3h2LUhKTGJiUlhneHp5QWFBcE5nMElkNTVzM3JGOWtUQ19wNVBTaVVHUHFDNFJnNXJaWDNBSkMwbi1WbTdtSnFySkhNQl9ZQjZrR2xDcXhTRExhMmNHcGlyWjR3ZU9SSjRZd1l4ZjVPeHNiYk53SW5SYnZPTzNkd1lnZmFseV9tQ3BxM3lNYVBHT0J0elJnMTByZ3VHemxta0tVQzZZRllmQ2VLZ1ZCNDhUUTc3LWNCZXBMekFwWW1fQkQ1NktzNGFMYUdYTU0xbXprY1FONUNlUHNMY3h2NFJMMmhNa3VNdzF4TVFWQk9odnJUMjFJMVd3Z2N6Sms5aEM2SWlWZFViZ0JWTEpUWWM5NmIzOS1oQmRqdkt1NUUycFlVcUxERUZGbnZqTUxIYnJmMDBHZDEzbnJsWEEzSUo3UmNPUDg1dnRUU1FzcWtjTWZwUG9zM0JTY3RqMDdST2UxcXFTM0d0bGkwdFhnMk5LaUlxNWx3V1pLaVlLUFJXZzBzVl9Ia1V1OHdYUEFWOU50UndycGtCdzM0Q0NQamp2VTNqbFBLaGhsbUk5dUI5MjU5OHVySk1oY0drUWtXUloyVVRvOWJmbUVYRzFVeWNQczh2NXJCeVppRlZiWDNJaDhOSmRmX2lURTNVS3NXQXFZT1QtUmdvMWJoVWYxU3lqUUJhbzEyX3I3TXhwbm9wc1FoQ1ZUTlNBRjMyQTBTY2tzbHZ3RFUtTjVxQ0o1QXRTVks2WENwMGZCRGstNU1jN3FhUFJCQThyaFhhMVRsbnlSRXNGRmt3Yk01X21ldmV3bTItWm1JaGpZQWZROEFtT1d1UUtPQlhYVVFqT2NxLUxQenJHX3JfMEdscDRiMXcyZ1ZmU3NFMzVoelZJaDlvT0ZoRGQ2bmtlM0M5ZHlCd2ZMbnRZRkZUWHVBUEx4czNfTmtMckh5eXZrZFBzOEItOGRYOEhsMzBhZ0xlOWFjZzgteVBsdnpPT1pYdUxnbFNXYnhKaVB6QUxVdUJCOFpvU2x2c1FHZV94MDBOVWJhYkxISkswc0U5UmdPWFJLXzZNYklHTjN1QzRKaldKdEVHb0pOU284N3c2LXZGMGVleEZ5NGZ6OGV1dm1tM0J0aTQ3VFlNOEJrdEh3PT0= Connector_GoogleSpeech_API_KEY_SECRET = INT_ENC:Z0FBQUFBQm8xSVRkNmVXZ1pWcHcydTF2MXF0ZGJoWHBydF85bTczTktiaEJ3Wk1vMW1mZVhDSG1yd0ZxR2ZuSGJTX0N3MWptWXFJTkNTWjh1SUVVTXI4UDVzcGdLMkU5SHJ2TUpkRlRoRWdnSldtYjNTQkh4UDJHY2xmdTdZQ1ZiMTZZcGZxS3RzaHdjV3dtVkZUcEpJcWx0b2xuQVR6ZmpoVFZPY1hNMTV2SnhDaC1IZEh4UUpLTy1ILXA4RG1zamJTbUJ4X0t2M2NkdzJPbEJxSmFpRzV3WC0wZThoVzlxcmpHZ3ZkLVlVY3REZk1vV19WQ05BOWN6cnJ4MWNYYnNiQ0FQSUVnUlpfM3BhMnlsVlZUOG5wM3pzM1lSN1UzWlZKUXRLczlHbjI1LTFvSUJ4SlVXMy1BNk43bE5Hb0RfTTVlWk9oZnFIaVg0SW5pbm9EcXRTTzU1RFlYY3dTcnpKWWNyNjN5T1BGZ0FmX253cEFncmhvZVRuM05KYzhkOEhFMFJsc2NBSEwzZVZ1R0JMOGxsekVwUE55alZaRXFrdzNWWVNGWXNmbnhKeWhQSFo2VXBTUlRPeHdvdVdncEFuOWgydEtsSUFneUN6cGVaTnBSdjNCdVJseGJFdmlMc203UFhLVlYyTENkaGg2dVN6Z2xwT1ZmTmN5bVZGUkM3ZWcyVkt2ckFUVVd3WFFwYnJjNVRobEh2SkVJbXRwUUpEOFJKQ1NUc0Q4NHNqUFhPSDh5cTV6MEcwSDEwRUJCQ2JiTTJlOE5nd3pMMkJaQ1dVYjMwZVVWWnlETmp2dkZ3aXEtQ29WNkxZTFkzYUkxdTlQUU1OTnhWWU12YU9MVnJQa1d2ZjRtUlhneTNubEMxTmp1eUNPOThSMlB3Y1F0T2tCdFNsNFlKalZPV25yR2QycVBUb096RmZ1V0FTaGsxLV9FWDBmenBIOXpMdGpLcUc0TWRoY2hlMFhYTzlET1ZRekw0ZHNwUVBQdVJBX2h6Q2ZzWVZJWTNybTJiekp3WmhmWF9SUFBXQzlqUjctcVlHWWVMZWVQallzR0JGTVF0WmtnWlg1aTM1bFprNVExZXY5dnNvWF93UjhwbkJ3RzNXaVJ2d2RRU3JJVlBvaVh4eTlBRUtqWkJia3dJQVVBV2Nqdm9FUTRUVW1TaHp2ZUwxT0N2ZndxQ2Nka1RYWXF0LWxIWFE0dTFQcVhncFFPM0hFdUUtYlFnemx3WkF4bjA1aDFULUdrZlVZbEJtRGRCdjJyVkdJSXozd0I0dF9zbWhOeHFqRDA4T1NVaWR5cjBwSVgwbllPU294NjZGTnM1bFhIdGpNQUxFOENWd3FCbGpSRFRmRXotQnU0N2lCVEU5RGF6Qi10S2U2NGdadDlrRjZtVE5oZkw5ZWFjXzhCTmxXQzNFTFgxRXVYY3J3YkxnbnlBSm9PY3h4MlM1NVFQbVNDRW5Ld1dvNWMxSmdoTXJuaE1pT2VFeXYwWXBHZ29MZDVlN2lwUUNIeGNCVVdQVi1rRXdJMWFncUlPTXR0MmZVQ1l0d09mZTdzWGFBWUJMUFd3b0RSOU8zeER2UWpNdzAxS0ZJWnB5S3FJdU9wUDJnTTNwMWw3VFVqVXQ3ZGZnU1RkUktkc0NhUHJ0SGFxZ0lVWDEzYjNtU2JfMGNWM1Y0dHlCTzNESEdENC1jUWF5MVppRzR1QlBNSUJySjFfRi1ENHEwcmJ4S3hQUFpXVHA0TG9DZWdoUlo5WnNSM1lCZm1KbEs2ak1yUUU4Wk9JcVJGUkJwc0NvUkMyTjhoTWxtZmVQeDREZVRKZkhYN2duLVNTeGZzdFdBVnhEandJSXB5QjM0azF0ckI3Tk1wSzFhNGVOUVRrNjU0cG9JQ29pN09xOFkwR1lMTlktaGp4TktxdTVtTnNEcldsV2pEZm5nQWpJc2hxY0hjQnVSWUR5VVdaUXBHWUloTzFZUC1oNzJ4UjZ1dnpLcDJxWEZtQlNIMWkzZ0hXWXdKeC1iLXdZWVJhcU04VFlpMU5pd2ZIdTdCdkVWVFVBdmJuRk16bEFFQTh4alBrcTV2RzliT2hGdTVPOXlRMjFuZktiRTZIamQ1VFVqS0hRTXhxcU1mdkgyQ1NjQmZfcjl4c3NJd0RIeDVMZUFBbHJqdEJxWWl3aWdGUEQxR3ZnMkNGdVB4RUxkZi1xOVlFQXh1NjRfbkFEaEJ5TVZlUGFrWVhSTVRPeGxqNlJDTHNsRWRrei1pYjhnUmZrb3BvWkQ2QXBzYjFHNXZoWU1LSExhLWtlYlJTZlJmYUM5Y1Rhb1pkMVYyWTByM3NTS0VXMG1ybm1BTVN2QXRYaXZqX2dKSkZrajZSS2cyVlNOQnd5Y29zMlVyaWlNbTJEb3FuUFFtbWNTNVpZTktUenFZSl91cVFXZjRkQUZyYmtPczU2S1RKQ19ONGFOTHlwX2hOOEE1UHZEVjhnT0xxRjMxTEE4SHhRbmlmTkZwVXJBdlJDbU5oZS05SzI4QVhEWDZaN2ZiSlFwUGRXSnB5TE9MZV9ia3pYcmZVa1dicG5FMHRXUFZXMWJQVDAwOEdDQzJmZEl0ZDhUOEFpZXZWWXl5Q2xwSmFienNCMldlb2NKb2ZRYV9KbUdHRzNUcjU1VUFhMzk1a2J6dDVuNTl6NTdpM0hGa3k0UWVtbF9pdDVsQVp2cndDLUU5dnNYOF9CLS0ySXhBSFdCSnpqV010bllBb3U0cEZZYVF5R2tSNFM5NlRhdS1fb1NqbDBKMkw0V2N0VEZhNExtQlR3ckZ3cVlCeHVXdXJ6X0s4cEtsaG5rVUxCN2RRbHQxTmcyVFBqYUxyOHJzeFBXVUJaRHpXbUoxdHZzMFBzQk1UTUFvX1pGNFNMNDFvZWdTdEUtMUNKMXNIeVlvQk1CeEdpZVdmN0tsSDVZZHJXSGt5c2o2MHdwSTZIMVBhRzM1eU43Q2FtcVNidExxczNJeUx5U2RuUG5EeHpCTlg2SV9WNk1ET3BRNXFuc0pNWlVvZUYtY21oRGtJSmwxQ09QbHBUV3BuS3B5NE9RVkhfellqZjJUQ0diSV94QlhQWmdaaC1TRWxsMUVWSXB0aE1McFZDZDNwQUVKZ2t5cXRTXzlRZVJwN0pZSnJSV21XMlh0TzFRVEl0c2I4QjBxOGRCYkNxek04a011X1lrb2poQ3h2LUhKTGJiUlhneHp5QWFBcE5nMElkNTVzM3JGOWtUQ19wNVBTaVVHUHFDNFJnNXJaWDNBSkMwbi1WbTdtSnFySkhNQl9ZQjZrR2xDcXhTRExhMmNHcGlyWjR3ZU9SSjRZd1l4ZjVPeHNiYk53SW5SYnZPTzNkd1lnZmFseV9tQ3BxM3lNYVBHT0J0elJnMTByZ3VHemxta0tVQzZZRllmQ2VLZ1ZCNDhUUTc3LWNCZXBMekFwWW1fQkQ1NktzNGFMYUdYTU0xbXprY1FONUNlUHNMY3h2NFJMMmhNa3VNdzF4TVFWQk9odnJUMjFJMVd3Z2N6Sms5aEM2SWlWZFViZ0JWTEpUWWM5NmIzOS1oQmRqdkt1NUUycFlVcUxERUZGbnZqTUxIYnJmMDBHZDEzbnJsWEEzSUo3UmNPUDg1dnRUU1FzcWtjTWZwUG9zM0JTY3RqMDdST2UxcXFTM0d0bGkwdFhnMk5LaUlxNWx3V1pLaVlLUFJXZzBzVl9Ia1V1OHdYUEFWOU50UndycGtCdzM0Q0NQamp2VTNqbFBLaGhsbUk5dUI5MjU5OHVySk1oY0drUWtXUloyVVRvOWJmbUVYRzFVeWNQczh2NXJCeVppRlZiWDNJaDhOSmRmX2lURTNVS3NXQXFZT1QtUmdvMWJoVWYxU3lqUUJhbzEyX3I3TXhwbm9wc1FoQ1ZUTlNBRjMyQTBTY2tzbHZ3RFUtTjVxQ0o1QXRTVks2WENwMGZCRGstNU1jN3FhUFJCQThyaFhhMVRsbnlSRXNGRmt3Yk01X21ldmV3bTItWm1JaGpZQWZROEFtT1d1UUtPQlhYVVFqT2NxLUxQenJHX3JfMEdscDRiMXcyZ1ZmU3NFMzVoelZJaDlvT0ZoRGQ2bmtlM0M5ZHlCd2ZMbnRZRkZUWHVBUEx4czNfTmtMckh5eXZrZFBzOEItOGRYOEhsMzBhZ0xlOWFjZzgteVBsdnpPT1pYdUxnbFNXYnhKaVB6QUxVdUJCOFpvU2x2c1FHZV94MDBOVWJhYkxISkswc0U5UmdPWFJLXzZNYklHTjN1QzRKaldKdEVHb0pOU284N3c2LXZGMGVleEZ5NGZ6OGV1dm1tM0J0aTQ3VFlNOEJrdEh3PT0=

View file

@ -50,23 +50,11 @@ APP_LOGGING_BACKUP_COUNT = 5
Service_MSFT_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/msft/auth/callback Service_MSFT_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/msft/auth/callback
Service_GOOGLE_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/callback Service_GOOGLE_REDIRECT_URI = https://gateway-prod.poweron-center.net/api/google/auth/callback
# OpenAI configuration # AI configuration
Connector_AiOpenai_API_URL = https://api.openai.com/v1/chat/completions
Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pU05XM2hMaExPMnpYeFpwRVhyYl9JZmRITmlmRDlWOUJSSWE4NTFLZUptSkJhNlEycHBLZmh3WFA2ZmU5VmxHZks1UUNVOUZnckZNdXZ2MTY2dFg1Nl8yWDRrcTRlT0tHYkhyRGZINTEzU25iYVFRMzJGeUZIdlc4LU9GbmpQYmtmU3lJT2VVZ1UzLVd3R25ZQ092SUVnPT0= Connector_AiOpenai_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pU05XM2hMaExPMnpYeFpwRVhyYl9JZmRITmlmRDlWOUJSSWE4NTFLZUptSkJhNlEycHBLZmh3WFA2ZmU5VmxHZks1UUNVOUZnckZNdXZ2MTY2dFg1Nl8yWDRrcTRlT0tHYkhyRGZINTEzU25iYVFRMzJGeUZIdlc4LU9GbmpQYmtmU3lJT2VVZ1UzLVd3R25ZQ092SUVnPT0=
Connector_AiOpenai_MODEL_NAME = gpt-4o
Connector_AiOpenai_TEMPERATURE = 0.2
# Anthropic configuration
Connector_AiAnthropic_API_URL = https://api.anthropic.com/v1/messages
Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNTA1RkZ3UllCOXVsNVZzbkw2Rkl1TWxCZ0wwWEVXUm9ReUhBcVl1cGFUdW9FRVh4elVxR0x3NVRxZkc4SkxHVFdzSU1YNG5Rb0FqSHJhdElwWm1iLWdubTVDcUl3UkVjVHNoU0xLa0ZTSFlfTlJUVXg4cVVwUWdlVDBTSFU5SnBzS0ZnVjlQcmtiNzV2UTNMck1IakZ0OWlubUtlWDZnMk4yX2JsZ1U4Wm1yT29fM2d2NVBNOWNBbWtTRWNyQ2tZNjhwSVF6bG5SU3dTenR2MzA3Z19NUT09 Connector_AiAnthropic_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNTA1RkZ3UllCOXVsNVZzbkw2Rkl1TWxCZ0wwWEVXUm9ReUhBcVl1cGFUdW9FRVh4elVxR0x3NVRxZkc4SkxHVFdzSU1YNG5Rb0FqSHJhdElwWm1iLWdubTVDcUl3UkVjVHNoU0xLa0ZTSFlfTlJUVXg4cVVwUWdlVDBTSFU5SnBzS0ZnVjlQcmtiNzV2UTNMck1IakZ0OWlubUtlWDZnMk4yX2JsZ1U4Wm1yT29fM2d2NVBNOWNBbWtTRWNyQ2tZNjhwSVF6bG5SU3dTenR2MzA3Z19NUT09
Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
Connector_AiAnthropic_TEMPERATURE = 0.2
# Perplexity AI configuration
Connector_AiPerplexity_API_URL = https://api.perplexity.ai/chat/completions
Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQm82Mzk2Q1FGRkJEUkI4LXlQbHYzT2RkdVJEcmM4WGdZTWpJTEhoeUF1NW5LUVpJdDBYN3k1WFN4a2FQSWJSQmd0U0xJbzZDTmFFN05FcXl0Z3V1OEpsZjYydV94TXVjVjVXRTRYSWdLMkd5XzZIbFV6emRCZHpuOUpQeThadE5xcDNDVGV1RHJrUEN0c1BBYXctZFNWcFRuVXhRPT0= Connector_AiPerplexity_API_SECRET = PROD_ENC:Z0FBQUFBQm82Mzk2Q1FGRkJEUkI4LXlQbHYzT2RkdVJEcmM4WGdZTWpJTEhoeUF1NW5LUVpJdDBYN3k1WFN4a2FQSWJSQmd0U0xJbzZDTmFFN05FcXl0Z3V1OEpsZjYydV94TXVjVjVXRTRYSWdLMkd5XzZIbFV6emRCZHpuOUpQeThadE5xcDNDVGV1RHJrUEN0c1BBYXctZFNWcFRuVXhRPT0=
Connector_AiPerplexity_MODEL_NAME = sonar Connector_AiTavily_API_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pMjhJNS1CZFJubUlkN3ZrTUoxR0Y1QzJFWEJSMk0wQkI0UndqOW1UelVieWhGaTVBcHoxRXo1VjRzVVRROHFIeHMyS3Q5cDZCeUlEMzE1ZlhVTmNveFk5VmFQMm80NTRyVW1TZHVsR3dUN0RtMnd4LW1VWlpqOXJPeXZBTmg4OEM=
Connector_AiPerplexity_TEMPERATURE = 0.2
# Agent Mail configuration # Agent Mail configuration
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
@ -77,9 +65,6 @@ Service_MSFT_TENANT_ID = common
Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com Service_GOOGLE_CLIENT_ID = 354925410565-aqs2b2qaiqmm73qpjnel6al8eid78uvg.apps.googleusercontent.com
Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pV2JEV0lNUXhwa1VTUGh2RWcyYnJHSFQyTmdBOEhwRkJWc3MwOFZlcHJGUmlGOVVFbG1XalNyUXVuaExESy1xeFNIQlRiSFVIWTB6Rm1fNFg0OHZZSkF4ZlBIcFZDMjZHcFRERXJ0WlVFclhHa29Za1BqWGxsM05NZGFRc1BLZnE= Service_GOOGLE_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pV2JEV0lNUXhwa1VTUGh2RWcyYnJHSFQyTmdBOEhwRkJWc3MwOFZlcHJGUmlGOVVFbG1XalNyUXVuaExESy1xeFNIQlRiSFVIWTB6Rm1fNFg0OHZZSkF4ZlBIcFZDMjZHcFRERXJ0WlVFclhHa29Za1BqWGxsM05NZGFRc1BLZnE=
# Tavily Web Search configuration
Connector_WebTavily_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pMjhJNS1CZFJubUlkN3ZrTUoxR0Y1QzJFWEJSMk0wQkI0UndqOW1UelVieWhGaTVBcHoxRXo1VjRzVVRROHFIeHMyS3Q5cDZCeUlEMzE1ZlhVTmNveFk5VmFQMm80NTRyVW1TZHVsR3dUN0RtMnd4LW1VWlpqOXJPeXZBTmg4OEM=
# Google Cloud Speech Services configuration # Google Cloud Speech Services configuration
Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNjlJdmFMeERXUUQzR0duRUY4cGRZRzdwQlpnVFAzSzQ5cHZNRnVUZ0xWd3dQMHR3QjVsdF92NmdUQlJGRk1RcG1RYWZzcE9RbEhjQmR5Yk5Ud3ZKTW5jbmpEVGJ2ZkxVeVJpcUxaT2lNREFXaks5WHg5aVlHcXlUZldMdnZGYklHWjlJOWJ6Wm5RSkNmdm5feENjS1E0QUVXTTE5SW5sNFBEeTJ1RjRmVm9SQUNIYmF2U1U2dklsbTVlWFpCcHMwTFF1SUg5NmNfcWhQRFlpeWt0U19HMXNuUHd2RFdrVl9XdUFaY0hWdVBPYWlybU1CdGlCN1A0RzZBbi1IUVJ1TWMxTE9Ea09sTURhcDFZb1JIUW1zUFJybW15MDcxOUtfVXA2N0xwMnFrczA1YTJaN05pRHhOYWNzMjVmUHdhbVdlemF3TEIzN0pJaVo3bGJBMXJnZmNYTXVJVDdmYkRXWTlBT2F2NmN4eTlteUI1SlJTOXc2WWFWUTBCZTJBVHRLVDhEVjBFeHE0Nmk1YkxYd3N3RXgtVUdGdlZFSmk4dHM0QjFmbktsQTctbmJMT0MtMDlKS1pUR0pELXBxckhULUUycjlBZmVJQjFrM0xEUm50U2ZabExtVjZ1WWZ1WnlobUZIOVlndjNydUZfczJUWVVRZURTd1lYazllaER4VU10cXUyVS1ZNG9Ha2hnbTAzOEpGMklFSWpWeVV5eFB2UlVWYmJJakZnOVM2R2lJSXRSM3VzVEZZNUVpNmVjRzdXRUJsT2hzcjhZWERFeGV5c1dFQVM3dkhGY2Q3ckNBRDZCcVdhZnZkdzM3QVNpODZYWE81TEIyZGUycldkSVRvbm5hR3Jib2UzOEtXdUpHQ2FyWDQtMDdQbC1ycEdfUzdXd0U2dHFIVjhoRDJ0YkNsWUpva1dzOGNPdXRpZjVwUldtT3FVN3RrZUhTN3JfX1M3LU9PaXZELWkzRmtMbjgxZGZ6ZjVJNW9RZW1nM2hqUXo4Z2I5Z2tSVTVMdUNLblRxOGQ1Y3F4SGZIbWo4YkFBV3FIbjB6LUxGNHdsQWgxQUM4bzVrblBObFFfVWNaQ3QwejQ1eGFlSXVIcXlyVEZEdzVKNV9pd2o4RW1UVjlqb3VMWnF0V1JTcWF1R0RjdUNjM2lLUHRqZDl2WWtXUnhmbVdxeHA3REFHTkdkMjM4LTllajBWQnd3RHlFSVdiUThfQnduOVFJdmR6OUVGN1lOYjBqclhadHozX21kRzlUT2EtWVBkYWFRSjRGdW80dmlEUTVrVjhWbjJYNGtCeGNtNzRHQXJsRlZyWjBYdHltVDM2MV9IT0RFT2dLLTVBREtsS09HdUxrODRLcEQ1TmRoVDh6WmgybGc5MzgtbmJSYThQd3FFaUcxbmg3eE95RkJVX2hHM20wT1k2c21qd24wSkFWNGROaklQeHZrc21PdTVsdHVxR0pxd3Ztb1NQVHEtd25URHRNa1pqa3BLdVdkTnNFeDNManJST0dOb1RWM2hqekxFTlFSZkd6TlZBY1VQT1NFOVlDQzlPQWVlVXQ4MW0wdGkzd0Myam1lSWE2aEtVVTVNc3N3dENpa1BWRl9ZQ3daYllONWRmRUF0THpleFRmdWRqTFM2aldmLUFuZzFGdkFQNHR6d21SdzRGQ0Q4cU8yV0xGUTVUY01TZlYxSzZ4cmtfUGZvVDhmYmNBX1pibTVTcl9lenJoME9KSnBucUxPRU1PRXBmLWFENEgwRWZOU0RvRDlvQk9ueVp0dXJrUVgtQUk5VldVbV9MS19PYmlua3liWl80Z2hMcFRnTXBkZDA3enIxRWFzaU56TEZKa0hPQUtNY0dCY1pnQ2V3Zml6ZFczWFBESUlLd3BSVEs5ZXlGLUpINDRsd1NBVjBkR1dvbE8wLWZBeEhFQ0hvY3E5UGJsTDdteGdSRjBIZTRobXpsd29PMmhKQkxXY3Znd2FMdWtZU1VkQlVRZXlSZ3FaVnNqcXpwR3N3SktOTDA3aUZIcE9TR1VDcXdaTDhQX2E5VDlwckoyX0xlNmFQcnoydEkwc0s1S08yaVlsM0pwYktUVWl3LU5hQzF2UVZNSm9ZR3QyQWdrUXB2a25QNzhkVEFOYmZ0b1BmTXRCMmVQZTAtYzdOeUlBYlNINlZNZW1nUTFfSV92UlJiWGt6Qms1c1hBc3kzZkVRMzEwNVJDOS1JeVg4YWtVeUJyOTZPQ0FnSUs1Z25sMlY0S1V1c0dIWEpuX2pMQmZ4Z29SY1U0bVZscXNWcjJwRy1UZEFYSXBzQURGblRTelBybU5BeDF6N3hZLXZwSHBkMmlzbHZWN2JkU3hRcE0zQ0hna3QwYWlJX3hBdGcxUHdGRE55cndUNHRvbXU5VTRMRmZDRjhvXzIwajI1Y0RCcmR2OV94cS1XYkNwalNHS2lObHlkNGZBbklycnZMSlJYVnlfakRXb1ZfWUo2MGxzYUNIektYeENGTkUzMUJXRE9WRHRrY2o5UFJHckZza2RQbjNPUkstbG9GZG4yNmxKeEdtbHo4WDZFc0lvT01wZkxuN29ycXl3X1hTN1prRGdvWG9hRFYwNzBwVVpuMW0wQlZYbGZxZjFQUHp2XzBQT3Fqa3lzejVKZmJDMG0wRzhqWV9HY1dxaXB2VFNQUzV2LUJSOXRFRUllak83cUI3RGUtYVBJakF1YUVOV0otT1BxUHJqS0NLdFVHc0tsT2RGcWd6UTU4Yi1kc0JZS1VPT1NXSlc3TDM5ZDVEZlRDOURZU1hMT0YxZ25ndVBUaG1VcGsxWFZSS1RxT1ZZTU1vclZjVU5iYmZMd0VBTXlvdTE0YjdoclZ6ZnNKMmE2Yy1ORmNCMnJNX3dwcVJSN2RSd2d6aENLRXQyTjhkcDlLTFVZMHBydFowNTJoZm1mVHNRVHI1YjhTNnl1Vll4dFZhenZfa0dybk9KYVh6LUluSUo0djUzRFNEdzBoVGt5UU9tMlg5UnBLbk9WaEhoU2txY2tUSXJmemlmNEExb3Q1blI5bE9adHluWVI3NXZQNUtXdmpra05aNy15dTBXdlVqcXhteFVqSXFxNnlQR2FGeVNONkx3NVpQUk1FNk5yTUY4T1hQV1FCdm9PYzdFTGl4QXZkODltSlprbGJ6cWREcEM1VlNwN3V5aWdWYXNkekk4X3U0cjJjZ1k2X190cmNnMlpMQVlLdExxM3pFNkZudVFKci1CalE1U3kzdmotQ01LV0ZzWnp0VUxRblhkdlN6VG1MWHNQdGlrNmF4RnFtd0c3UXNqZFVRZTRFMGl1NFU5T2k3VEpjZXA1U052VkJtdUhDWEpTaDRGQnM0SDQwY2IxdDVNbUtELTQ0R0s0OHpfTHdFOHZ0VmRMTC1FUVpPSkJ4QXRWNnl5MURUdjVyUk53emRwbDBxUnloUmlheXhKY3RBUG1mX3JxM2w0VlZvcE40b2ROeG15NS01RFlvUHdoYllLNVhCZUNEd0dwQnFCLVdZU0RhVEFzR2gxTVpub3FGRnl4VDNiSVZrTnpMQUlxeGJGQzh5WlNZR2NKbklHRVRTaVJ2REduN0hXaGo5MHFGb1FOa0U5TUFwQ09zOXVWMnRRNVlJWmZpaTUxLWFIeWR0UEFtaVNDX1k5Q1p3Y2V4ckVXQVBRYzV1eGwwMWd0SE15WUxiYzUyLTUzTGlyTUhZUDFlRTFjcFpieWQwU0pxRWJXSE53Nkd5aHp5T28wZVd6Z1phLTQ4TmgxU3hvNHpySzExUk5WZlFFS3VpOXNHMDdZU0gzSGxYUlU4WmgwNUlPdlhQcUI0cGtITmQ4SlByczN0THUxNHc0a21vUEp6S1hLNnFRNmFfdlpmUWpJQ1VNYXVEOW1abzlsd2RoRG5pVXRVbjBKV2RFTGFEa3ZYTHByOTJjalc1b3hTWkFmS2RPdVlTUTVkRkpSTnZsMWtnYWZEUm1SR3lBemdON2xiN3pkZlNfX2NSYU5wWHNybHh4V0lnNHJjQ2NON1hiRHMycUdmNC1kay13bUE0OTBPN0xmNDA1NlQxVmRySEJvM1VUN2Y2Sl9KX2pZVHRPWEdfR2RYNUoxY01Va3pXb2VBd3lZb3BSXzU5NVJfWlhEYXFSVDJrUnFHWG42RVZJUVQ2RlJWUEkyQnRnREI3eHNiRERiQ3FUczJsRTBDZ3pUUGZPcjExZUFKc21QUWxVYVBmV2hPZXRGd3lJX3ZTczhCVG1jWFVwanhIZHlyTTdiR2c5cTBVSXBRV1U4ZExtWWdub1pTSHU0cU5aYWJVWmExbXI0MjE3WUVnPT0= Connector_GoogleSpeech_API_KEY_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pNjlJdmFMeERXUUQzR0duRUY4cGRZRzdwQlpnVFAzSzQ5cHZNRnVUZ0xWd3dQMHR3QjVsdF92NmdUQlJGRk1RcG1RYWZzcE9RbEhjQmR5Yk5Ud3ZKTW5jbmpEVGJ2ZkxVeVJpcUxaT2lNREFXaks5WHg5aVlHcXlUZldMdnZGYklHWjlJOWJ6Wm5RSkNmdm5feENjS1E0QUVXTTE5SW5sNFBEeTJ1RjRmVm9SQUNIYmF2U1U2dklsbTVlWFpCcHMwTFF1SUg5NmNfcWhQRFlpeWt0U19HMXNuUHd2RFdrVl9XdUFaY0hWdVBPYWlybU1CdGlCN1A0RzZBbi1IUVJ1TWMxTE9Ea09sTURhcDFZb1JIUW1zUFJybW15MDcxOUtfVXA2N0xwMnFrczA1YTJaN05pRHhOYWNzMjVmUHdhbVdlemF3TEIzN0pJaVo3bGJBMXJnZmNYTXVJVDdmYkRXWTlBT2F2NmN4eTlteUI1SlJTOXc2WWFWUTBCZTJBVHRLVDhEVjBFeHE0Nmk1YkxYd3N3RXgtVUdGdlZFSmk4dHM0QjFmbktsQTctbmJMT0MtMDlKS1pUR0pELXBxckhULUUycjlBZmVJQjFrM0xEUm50U2ZabExtVjZ1WWZ1WnlobUZIOVlndjNydUZfczJUWVVRZURTd1lYazllaER4VU10cXUyVS1ZNG9Ha2hnbTAzOEpGMklFSWpWeVV5eFB2UlVWYmJJakZnOVM2R2lJSXRSM3VzVEZZNUVpNmVjRzdXRUJsT2hzcjhZWERFeGV5c1dFQVM3dkhGY2Q3ckNBRDZCcVdhZnZkdzM3QVNpODZYWE81TEIyZGUycldkSVRvbm5hR3Jib2UzOEtXdUpHQ2FyWDQtMDdQbC1ycEdfUzdXd0U2dHFIVjhoRDJ0YkNsWUpva1dzOGNPdXRpZjVwUldtT3FVN3RrZUhTN3JfX1M3LU9PaXZELWkzRmtMbjgxZGZ6ZjVJNW9RZW1nM2hqUXo4Z2I5Z2tSVTVMdUNLblRxOGQ1Y3F4SGZIbWo4YkFBV3FIbjB6LUxGNHdsQWgxQUM4bzVrblBObFFfVWNaQ3QwejQ1eGFlSXVIcXlyVEZEdzVKNV9pd2o4RW1UVjlqb3VMWnF0V1JTcWF1R0RjdUNjM2lLUHRqZDl2WWtXUnhmbVdxeHA3REFHTkdkMjM4LTllajBWQnd3RHlFSVdiUThfQnduOVFJdmR6OUVGN1lOYjBqclhadHozX21kRzlUT2EtWVBkYWFRSjRGdW80dmlEUTVrVjhWbjJYNGtCeGNtNzRHQXJsRlZyWjBYdHltVDM2MV9IT0RFT2dLLTVBREtsS09HdUxrODRLcEQ1TmRoVDh6WmgybGc5MzgtbmJSYThQd3FFaUcxbmg3eE95RkJVX2hHM20wT1k2c21qd24wSkFWNGROaklQeHZrc21PdTVsdHVxR0pxd3Ztb1NQVHEtd25URHRNa1pqa3BLdVdkTnNFeDNManJST0dOb1RWM2hqekxFTlFSZkd6TlZBY1VQT1NFOVlDQzlPQWVlVXQ4MW0wdGkzd0Myam1lSWE2aEtVVTVNc3N3dENpa1BWRl9ZQ3daYllONWRmRUF0THpleFRmdWRqTFM2aldmLUFuZzFGdkFQNHR6d21SdzRGQ0Q4cU8yV0xGUTVUY01TZlYxSzZ4cmtfUGZvVDhmYmNBX1pibTVTcl9lenJoME9KSnBucUxPRU1PRXBmLWFENEgwRWZOU0RvRDlvQk9ueVp0dXJrUVgtQUk5VldVbV9MS19PYmlua3liWl80Z2hMcFRnTXBkZDA3enIxRWFzaU56TEZKa0hPQUtNY0dCY1pnQ2V3Zml6ZFczWFBESUlLd3BSVEs5ZXlGLUpINDRsd1NBVjBkR1dvbE8wLWZBeEhFQ0hvY3E5UGJsTDdteGdSRjBIZTRobXpsd29PMmhKQkxXY3Znd2FMdWtZU1VkQlVRZXlSZ3FaVnNqcXpwR3N3SktOTDA3aUZIcE9TR1VDcXdaTDhQX2E5VDlwckoyX0xlNmFQcnoydEkwc0s1S08yaVlsM0pwYktUVWl3LU5hQzF2UVZNSm9ZR3QyQWdrUXB2a25QNzhkVEFOYmZ0b1BmTXRCMmVQZTAtYzdOeUlBYlNINlZNZW1nUTFfSV92UlJiWGt6Qms1c1hBc3kzZkVRMzEwNVJDOS1JeVg4YWtVeUJyOTZPQ0FnSUs1Z25sMlY0S1V1c0dIWEpuX2pMQmZ4Z29SY1U0bVZscXNWcjJwRy1UZEFYSXBzQURGblRTelBybU5BeDF6N3hZLXZwSHBkMmlzbHZWN2JkU3hRcE0zQ0hna3QwYWlJX3hBdGcxUHdGRE55cndUNHRvbXU5VTRMRmZDRjhvXzIwajI1Y0RCcmR2OV94cS1XYkNwalNHS2lObHlkNGZBbklycnZMSlJYVnlfakRXb1ZfWUo2MGxzYUNIektYeENGTkUzMUJXRE9WRHRrY2o5UFJHckZza2RQbjNPUkstbG9GZG4yNmxKeEdtbHo4WDZFc0lvT01wZkxuN29ycXl3X1hTN1prRGdvWG9hRFYwNzBwVVpuMW0wQlZYbGZxZjFQUHp2XzBQT3Fqa3lzejVKZmJDMG0wRzhqWV9HY1dxaXB2VFNQUzV2LUJSOXRFRUllak83cUI3RGUtYVBJakF1YUVOV0otT1BxUHJqS0NLdFVHc0tsT2RGcWd6UTU4Yi1kc0JZS1VPT1NXSlc3TDM5ZDVEZlRDOURZU1hMT0YxZ25ndVBUaG1VcGsxWFZSS1RxT1ZZTU1vclZjVU5iYmZMd0VBTXlvdTE0YjdoclZ6ZnNKMmE2Yy1ORmNCMnJNX3dwcVJSN2RSd2d6aENLRXQyTjhkcDlLTFVZMHBydFowNTJoZm1mVHNRVHI1YjhTNnl1Vll4dFZhenZfa0dybk9KYVh6LUluSUo0djUzRFNEdzBoVGt5UU9tMlg5UnBLbk9WaEhoU2txY2tUSXJmemlmNEExb3Q1blI5bE9adHluWVI3NXZQNUtXdmpra05aNy15dTBXdlVqcXhteFVqSXFxNnlQR2FGeVNONkx3NVpQUk1FNk5yTUY4T1hQV1FCdm9PYzdFTGl4QXZkODltSlprbGJ6cWREcEM1VlNwN3V5aWdWYXNkekk4X3U0cjJjZ1k2X190cmNnMlpMQVlLdExxM3pFNkZudVFKci1CalE1U3kzdmotQ01LV0ZzWnp0VUxRblhkdlN6VG1MWHNQdGlrNmF4RnFtd0c3UXNqZFVRZTRFMGl1NFU5T2k3VEpjZXA1U052VkJtdUhDWEpTaDRGQnM0SDQwY2IxdDVNbUtELTQ0R0s0OHpfTHdFOHZ0VmRMTC1FUVpPSkJ4QXRWNnl5MURUdjVyUk53emRwbDBxUnloUmlheXhKY3RBUG1mX3JxM2w0VlZvcE40b2ROeG15NS01RFlvUHdoYllLNVhCZUNEd0dwQnFCLVdZU0RhVEFzR2gxTVpub3FGRnl4VDNiSVZrTnpMQUlxeGJGQzh5WlNZR2NKbklHRVRTaVJ2REduN0hXaGo5MHFGb1FOa0U5TUFwQ09zOXVWMnRRNVlJWmZpaTUxLWFIeWR0UEFtaVNDX1k5Q1p3Y2V4ckVXQVBRYzV1eGwwMWd0SE15WUxiYzUyLTUzTGlyTUhZUDFlRTFjcFpieWQwU0pxRWJXSE53Nkd5aHp5T28wZVd6Z1phLTQ4TmgxU3hvNHpySzExUk5WZlFFS3VpOXNHMDdZU0gzSGxYUlU4WmgwNUlPdlhQcUI0cGtITmQ4SlByczN0THUxNHc0a21vUEp6S1hLNnFRNmFfdlpmUWpJQ1VNYXVEOW1abzlsd2RoRG5pVXRVbjBKV2RFTGFEa3ZYTHByOTJjalc1b3hTWkFmS2RPdVlTUTVkRkpSTnZsMWtnYWZEUm1SR3lBemdON2xiN3pkZlNfX2NSYU5wWHNybHh4V0lnNHJjQ2NON1hiRHMycUdmNC1kay13bUE0OTBPN0xmNDA1NlQxVmRySEJvM1VUN2Y2Sl9KX2pZVHRPWEdfR2RYNUoxY01Va3pXb2VBd3lZb3BSXzU5NVJfWlhEYXFSVDJrUnFHWG42RVZJUVQ2RlJWUEkyQnRnREI3eHNiRERiQ3FUczJsRTBDZ3pUUGZPcjExZUFKc21QUWxVYVBmV2hPZXRGd3lJX3ZTczhCVG1jWFVwanhIZHlyTTdiR2c5cTBVSXBRV1U4ZExtWWdub1pTSHU0cU5aYWJVWmExbXI0MjE3WUVnPT0=

View file

@ -66,10 +66,6 @@ class BaseConnectorAi(ABC):
return model return model
return None return None
def getModelsByCapability(self, capability: str) -> List[AiModel]:
"""Get models that support a specific capability."""
models = self.getCachedModels()
return [model for model in models if capability in model.capabilities]
def getModelsByPriority(self, priority: str) -> List[AiModel]: def getModelsByPriority(self, priority: str) -> List[AiModel]:
"""Get models that have a specific priority.""" """Get models that have a specific priority."""

View file

@ -112,10 +112,6 @@ class ModelRegistry:
self.refreshModels() self.refreshModels()
return [model for model in self._models.values() if model.connectorType == connectorType] return [model for model in self._models.values() if model.connectorType == connectorType]
def getModelsByCapability(self, capability: str) -> List[AiModel]:
"""Get models that support a specific capability."""
self.refreshModels()
return [model for model in self._models.values() if capability in model.capabilities]
def getModelsByPriority(self, priority: str) -> List[AiModel]: def getModelsByPriority(self, priority: str) -> List[AiModel]:
"""Get models that have a specific priority.""" """Get models that have a specific priority."""

View file

@ -71,8 +71,13 @@ class ModelSelector:
contextSize = len(context.encode("utf-8")) contextSize = len(context.encode("utf-8"))
totalSize = promptSize + contextSize totalSize = promptSize + contextSize
# Step 1: Filter by operation type (MUST match) # Step 1: Filter by operation type (MUST match) - check if model has this operation type
operationFiltered = [m for m in availableModels if options.operationType in m.operationTypes] operationFiltered = []
for model in availableModels:
# Check if model has the required operation type
hasOperationType = any(ot.operationType == options.operationType for ot in model.operationTypes)
if hasOperationType:
operationFiltered.append(model)
logger.debug(f"After operation type filtering: {len(operationFiltered)} models") logger.debug(f"After operation type filtering: {len(operationFiltered)} models")
# Step 2: Filter by prompt size (MUST be <= 80% of context size) # Step 2: Filter by prompt size (MUST be <= 80% of context size)
@ -100,6 +105,7 @@ class ModelSelector:
def _calculateModelScore(self, model: AiModel, promptSize: int, contextSize: int, totalSize: int, options: AiCallOptions) -> float: def _calculateModelScore(self, model: AiModel, promptSize: int, contextSize: int, totalSize: int, options: AiCallOptions) -> float:
""" """
Calculate a score for a model based on how well it fulfills the criteria. Calculate a score for a model based on how well it fulfills the criteria.
Operation type rating is the PRIMARY sorting criteria (multiplied by 1000).
Args: Args:
model: The model to score model: The model to score
@ -113,7 +119,11 @@ class ModelSelector:
""" """
score = 0.0 score = 0.0
# 1. Prompt + Context size rating # 1. PRIMARY: Operation Type Rating (multiplied by 1000 for primary sorting)
operationTypeRating = self._getOperationTypeRating(model, options.operationType)
score += operationTypeRating * 1000.0 # Primary sorting criteria
# 2. Prompt + Context size rating
if model.contextLength > 0: if model.contextLength > 0:
modelMaxSize = model.contextLength * 0.8 # 80% of model context length modelMaxSize = model.contextLength * 0.8 # 80% of model context length
if totalSize <= modelMaxSize: if totalSize <= modelMaxSize:
@ -126,13 +136,13 @@ class ModelSelector:
# No context length limit # No context length limit
score += 1.0 score += 1.0
# 2. Processing Mode rating # 3. Processing Mode rating
if hasattr(options, 'processingMode') and options.processingMode: if hasattr(options, 'processingMode') and options.processingMode:
score += self._getProcessingModeRating(model.processingMode, options.processingMode) score += self._getProcessingModeRating(model.processingMode, options.processingMode)
else: else:
score += 1.0 # No preference score += 1.0 # No preference
# 3. Priority rating # 4. Priority rating
if hasattr(options, 'priority') and options.priority: if hasattr(options, 'priority') and options.priority:
score += self._getPriorityRating(model, options.priority) score += self._getPriorityRating(model, options.priority)
else: else:
@ -140,6 +150,22 @@ class ModelSelector:
return score return score
def _getOperationTypeRating(self, model: AiModel, operationType: OperationTypeEnum) -> float:
"""
Get the operation type rating for a model.
Args:
model: The model to check
operationType: The operation type to get rating for
Returns:
Rating (1-10) or 0 if model doesn't support this operation type
"""
for ot_rating in model.operationTypes:
if ot_rating.operationType == operationType:
return float(ot_rating.rating)
return 0.0 # Model doesn't support this operation type
def _getProcessingModeRating(self, modelMode: ProcessingModeEnum, requestedMode: ProcessingModeEnum) -> float: def _getProcessingModeRating(self, modelMode: ProcessingModeEnum, requestedMode: ProcessingModeEnum) -> float:
"""Get processing mode rating based on compatibility.""" """Get processing mode rating based on compatibility."""
if modelMode == requestedMode: if modelMode == requestedMode:
@ -215,10 +241,10 @@ class ModelSelector:
logger.info(f" Quality Rating: {model.qualityRating}/10") logger.info(f" Quality Rating: {model.qualityRating}/10")
logger.info(f" Speed Rating: {model.speedRating}/10") logger.info(f" Speed Rating: {model.speedRating}/10")
logger.info(f" Cost: ${model.costPer1kTokensInput:.4f}/1k tokens") logger.info(f" Cost: ${model.costPer1kTokensInput:.4f}/1k tokens")
logger.info(f" Capabilities: {', '.join(model.capabilities)}")
logger.info(f" Priority: {model.priority}") logger.info(f" Priority: {model.priority}")
logger.info(f" Processing Mode: {model.processingMode}") logger.info(f" Processing Mode: {model.processingMode}")
logger.info(f" Operation Types: {', '.join(model.operationTypes)}") operationTypesStr = ', '.join([f"{ot.operationType.value}({ot.rating})" for ot in model.operationTypes])
logger.info(f" Operation Types: {operationTypesStr}")
# Global model selector instance # Global model selector instance

View file

@ -5,7 +5,7 @@ from typing import Dict, Any, List, Union
from fastapi import HTTPException from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.aicore.aicoreBase import BaseConnectorAi from modules.aicore.aicoreBase import BaseConnectorAi
from modules.datamodels.datamodelAi import AiModel, ModelCapabilitiesEnum, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
# Configure logger # Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -14,9 +14,6 @@ def loadConfigData():
"""Load configuration data for Anthropic connector""" """Load configuration data for Anthropic connector"""
return { return {
"apiKey": APP_CONFIG.get('Connector_AiAnthropic_API_SECRET'), "apiKey": APP_CONFIG.get('Connector_AiAnthropic_API_SECRET'),
"apiUrl": APP_CONFIG.get('Connector_AiAnthropic_API_URL'),
"modelName": APP_CONFIG.get('Connector_AiAnthropic_MODEL_NAME'),
"temperature": float(APP_CONFIG.get('Connector_AiAnthropic_TEMPERATURE')),
} }
class AiAnthropic(BaseConnectorAi): class AiAnthropic(BaseConnectorAi):
@ -27,8 +24,6 @@ class AiAnthropic(BaseConnectorAi):
# Load configuration # Load configuration
self.config = loadConfigData() self.config = loadConfigData()
self.apiKey = self.config["apiKey"] self.apiKey = self.config["apiKey"]
self.apiUrl = self.config["apiUrl"]
self.modelName = self.config["modelName"]
# HttpClient for API calls # HttpClient for API calls
self.httpClient = httpx.AsyncClient( self.httpClient = httpx.AsyncClient(
@ -40,7 +35,7 @@ class AiAnthropic(BaseConnectorAi):
} }
) )
logger.info(f"Anthropic Connector initialized with model: {self.modelName}") logger.info("Anthropic Connector initialized")
def getConnectorType(self) -> str: def getConnectorType(self) -> str:
"""Get the connector type identifier.""" """Get the connector type identifier."""
@ -50,38 +45,49 @@ class AiAnthropic(BaseConnectorAi):
"""Get all available Anthropic models.""" """Get all available Anthropic models."""
return [ return [
AiModel( AiModel(
name="anthropic_callAiBasic", name="claude-3-5-sonnet-20241022",
displayName="Claude 3.5 Sonnet", displayName="Anthropic Claude 3.5 Sonnet",
connectorType="anthropic", connectorType="anthropic",
apiUrl="https://api.anthropic.com/v1/messages",
temperature=0.2,
maxTokens=200000, maxTokens=200000,
contextLength=200000, contextLength=200000,
costPer1kTokensInput=0.015, costPer1kTokensInput=0.015,
costPer1kTokensOutput=0.075, costPer1kTokensOutput=0.075,
speedRating=7, speedRating=6, # Slower due to high-quality processing
qualityRating=10, qualityRating=10, # Best quality available
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.CHAT, ModelCapabilitiesEnum.REASONING, ModelCapabilitiesEnum.ANALYSIS], # capabilities removed (not used in business logic)
functionCall=self.callAiBasic, functionCall=self.callAiBasic,
priority=PriorityEnum.QUALITY, priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED, processingMode=ProcessingModeEnum.DETAILED,
operationTypes=[OperationTypeEnum.GENERAL, OperationTypeEnum.PLAN, OperationTypeEnum.ANALYSE, OperationTypeEnum.GENERATE], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 9),
(OperationTypeEnum.DATA_ANALYSE, 10),
(OperationTypeEnum.DATA_GENERATE, 9),
(OperationTypeEnum.DATA_EXTRACT, 8)
),
version="claude-3-5-sonnet-20241022", version="claude-3-5-sonnet-20241022",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075
), ),
AiModel( AiModel(
name="anthropic_callAiImage", name="claude-3-5-sonnet-20241022-vision",
displayName="Claude 3.5 Sonnet Vision", displayName="Anthropic Claude 3.5 Sonnet Vision",
connectorType="anthropic", connectorType="anthropic",
apiUrl="https://api.anthropic.com/v1/messages",
temperature=0.2,
maxTokens=200000, maxTokens=200000,
contextLength=200000, contextLength=200000,
costPer1kTokensInput=0.015, costPer1kTokensInput=0.015,
costPer1kTokensOutput=0.075, costPer1kTokensOutput=0.075,
speedRating=7, speedRating=6, # Slower due to high-quality processing
qualityRating=10, qualityRating=10, # Best quality available
capabilities=[ModelCapabilitiesEnum.IMAGE_ANALYSE, ModelCapabilitiesEnum.VISION, ModelCapabilitiesEnum.MULTIMODAL], # capabilities removed (not used in business logic)
functionCall=self.callAiImage, functionCall=self.callAiImage,
priority=PriorityEnum.QUALITY, priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED, processingMode=ProcessingModeEnum.DETAILED,
operationTypes=[OperationTypeEnum.IMAGE_ANALYSE], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.IMAGE_ANALYSE, 10)
),
version="claude-3-5-sonnet-20241022", version="claude-3-5-sonnet-20241022",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.015 + (bytesReceived / 4 / 1000) * 0.075
) )
@ -106,7 +112,7 @@ class AiAnthropic(BaseConnectorAi):
messages = modelCall.messages messages = modelCall.messages
model = modelCall.model model = modelCall.model
options = modelCall.options options = modelCall.options
temperature = options.get("temperature", self.config.get("temperature", 0.2)) temperature = options.get("temperature", model.temperature)
maxTokens = model.maxTokens maxTokens = model.maxTokens
# Transform OpenAI-style messages to Anthropic format: # Transform OpenAI-style messages to Anthropic format:
@ -148,7 +154,7 @@ class AiAnthropic(BaseConnectorAi):
# Create Anthropic API payload # Create Anthropic API payload
payload: Dict[str, Any] = { payload: Dict[str, Any] = {
"model": self.modelName, "model": model.name,
"messages": converted_messages, "messages": converted_messages,
"temperature": temperature, "temperature": temperature,
} }
@ -161,7 +167,7 @@ class AiAnthropic(BaseConnectorAi):
payload["system"] = system_prompt payload["system"] = system_prompt
response = await self.httpClient.post( response = await self.httpClient.post(
self.apiUrl, model.apiUrl,
json=payload json=payload
) )
@ -207,7 +213,7 @@ class AiAnthropic(BaseConnectorAi):
return AiModelResponse( return AiModelResponse(
content=content, content=content,
success=True, success=True,
modelId=self.modelName, modelId=model.name,
metadata={"response_id": anthropicResponse.get("id", "")} metadata={"response_id": anthropicResponse.get("id", "")}
) )
@ -215,19 +221,25 @@ class AiAnthropic(BaseConnectorAi):
logger.error(f"Error calling Anthropic API: {str(e)}") logger.error(f"Error calling Anthropic API: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error calling Anthropic API: {str(e)}") raise HTTPException(status_code=500, detail=f"Error calling Anthropic API: {str(e)}")
async def callAiImage(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None) -> str: async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Analyzes an image using Anthropic's vision capabilities. Analyzes an image using Anthropic's vision capabilities using standardized pattern.
Args: Args:
imageData: Either a file path (str) or image data (bytes) modelCall: AiModelCall with messages and image data in options
mimeType: The MIME type of the image (optional, only for binary data)
prompt: The prompt for analysis
Returns: Returns:
The analysis response as text AiModelResponse with analysis content
""" """
try: try:
# Extract parameters from modelCall
messages = modelCall.messages
model = modelCall.model
options = modelCall.options
prompt = messages[0]["content"] if messages else ""
imageData = options.get("imageData")
mimeType = options.get("mimeType")
# Debug logging # Debug logging
logger.info(f"callAiImage called with imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}") logger.info(f"callAiImage called with imageData type: {type(imageData)}, length: {len(imageData) if imageData else 0}, mimeType: {mimeType}")
@ -272,20 +284,22 @@ class AiAnthropic(BaseConnectorAi):
} }
] ]
# Use the existing callAiBasic function with the Vision model # Create a modelCall for the basic AI function
response = await self.callAiBasic(messages) basicModelCall = AiModelCall(
messages=messages,
model=model
)
# Extract and return content with proper error handling # Use the existing callAiBasic function with the Vision model
try: response = await self.callAiBasic(basicModelCall)
content = response["choices"][0]["message"]["content"]
if content is None or content.strip() == "": # Return the standardized response
return "[AI returned empty response for image analysis]" return response
return content
except (KeyError, IndexError, TypeError) as e:
logger.error(f"Error extracting content from AI response: {str(e)}")
logger.error(f"Response structure: {response}")
return f"[Error extracting AI response: {str(e)}]"
except Exception as e: except Exception as e:
logger.error(f"Error during image analysis: {str(e)}", exc_info=True) logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
return f"[Error during image analysis: {str(e)}]" return AiModelResponse(
content="",
success=False,
error=f"Error during image analysis: {str(e)}"
)

View file

@ -1,7 +1,7 @@
import logging import logging
from typing import Dict, Any, List, Union from typing import Dict, Any, List, Union
from modules.aicore.aicoreBase import BaseConnectorAi from modules.aicore.aicoreBase import BaseConnectorAi
from modules.datamodels.datamodelAi import AiModel, ModelCapabilitiesEnum, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
# Configure logger # Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -21,56 +21,62 @@ class AiInternal(BaseConnectorAi):
"""Get all available internal models.""" """Get all available internal models."""
return [ return [
AiModel( AiModel(
name="internal_extraction", name="internal-extractor",
displayName="Internal Document Extractor", displayName="Internal Document Extractor",
connectorType="internal", connectorType="internal",
apiUrl="internal://extract",
temperature=0.0, # Not applicable for extraction
maxTokens=0, # Not token-based maxTokens=0, # Not token-based
contextLength=0, contextLength=0,
costPer1kTokensInput=0.0, costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=8, speedRating=9, # Very fast for internal operations
qualityRating=8, qualityRating=8, # Good quality
capabilities=[ModelCapabilitiesEnum.CONTENT_EXTRACTION, ModelCapabilitiesEnum.TEXT_EXTRACTION], # capabilities removed (not used in business logic)
functionCall=self.extractDocument, functionCall=self.extractDocument,
priority=PriorityEnum.COST, priority=PriorityEnum.COST,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.EXTRACT], operationTypes=createOperationTypeRatings(),
version="internal-extractor-v1", version="internal-extractor-v1",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.001 + (bytesSent + bytesReceived) / (1024 * 1024) * 0.01 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.001 + (bytesSent + bytesReceived) / (1024 * 1024) * 0.01
), ),
AiModel( AiModel(
name="internal_generation", name="internal-generator",
displayName="Internal Document Generator", displayName="Internal Document Generator",
connectorType="internal", connectorType="internal",
apiUrl="internal://generate",
temperature=0.0, # Not applicable for generation
maxTokens=0, # Not token-based maxTokens=0, # Not token-based
contextLength=0, contextLength=0,
costPer1kTokensInput=0.0, costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=7, speedRating=8, # Fast for generation
qualityRating=8, qualityRating=8, # Good quality
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.ANALYSIS], # capabilities removed (not used in business logic)
functionCall=self.generateDocument, functionCall=self.generateDocument,
priority=PriorityEnum.COST, priority=PriorityEnum.COST,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.GENERATE], operationTypes=createOperationTypeRatings(),
version="internal-generator-v1", version="internal-generator-v1",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.002 + (bytesReceived / (1024 * 1024)) * 0.005 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.002 + (bytesReceived / (1024 * 1024)) * 0.005
), ),
AiModel( AiModel(
name="internal_rendering", name="internal-renderer",
displayName="Internal Document Renderer", displayName="Internal Document Renderer",
connectorType="internal", connectorType="internal",
apiUrl="internal://render",
temperature=0.0, # Not applicable for rendering
maxTokens=0, # Not token-based maxTokens=0, # Not token-based
contextLength=0, contextLength=0,
costPer1kTokensInput=0.0, costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=6, speedRating=7, # Good for rendering
qualityRating=9, qualityRating=9, # High quality rendering
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.ANALYSIS], # capabilities removed (not used in business logic)
functionCall=self.renderDocument, functionCall=self.renderDocument,
priority=PriorityEnum.QUALITY, priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED, processingMode=ProcessingModeEnum.DETAILED,
operationTypes=[OperationTypeEnum.GENERATE], operationTypes=createOperationTypeRatings(),
version="internal-renderer-v1", version="internal-renderer-v1",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.003 + (bytesReceived / (1024 * 1024)) * 0.008 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: 0.003 + (bytesReceived / (1024 * 1024)) * 0.008
) )

View file

@ -5,7 +5,7 @@ from typing import Dict, Any, List, Union
from fastapi import HTTPException from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.aicore.aicoreBase import BaseConnectorAi from modules.aicore.aicoreBase import BaseConnectorAi
from modules.datamodels.datamodelAi import AiModel, ModelCapabilitiesEnum, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
# Configure logger # Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -18,9 +18,6 @@ def loadConfigData():
"""Load configuration data for OpenAI connector""" """Load configuration data for OpenAI connector"""
return { return {
"apiKey": APP_CONFIG.get('Connector_AiOpenai_API_SECRET'), "apiKey": APP_CONFIG.get('Connector_AiOpenai_API_SECRET'),
"apiUrl": APP_CONFIG.get('Connector_AiOpenai_API_URL'),
"modelName": APP_CONFIG.get('Connector_AiOpenai_MODEL_NAME'),
"temperature": float(APP_CONFIG.get('Connector_AiOpenai_TEMPERATURE')),
} }
class AiOpenai(BaseConnectorAi): class AiOpenai(BaseConnectorAi):
@ -31,8 +28,6 @@ class AiOpenai(BaseConnectorAi):
# Load configuration # Load configuration
self.config = loadConfigData() self.config = loadConfigData()
self.apiKey = self.config["apiKey"] self.apiKey = self.config["apiKey"]
self.apiUrl = self.config["apiUrl"]
self.modelName = self.config["modelName"]
# HttpClient for API calls # HttpClient for API calls
self.httpClient = httpx.AsyncClient( self.httpClient = httpx.AsyncClient(
@ -42,7 +37,7 @@ class AiOpenai(BaseConnectorAi):
"Content-Type": "application/json" "Content-Type": "application/json"
} }
) )
logger.info(f"OpenAI Connector initialized with model: {self.modelName}") logger.info("OpenAI Connector initialized")
def getConnectorType(self) -> str: def getConnectorType(self) -> str:
"""Get the connector type identifier.""" """Get the connector type identifier."""
@ -52,74 +47,95 @@ class AiOpenai(BaseConnectorAi):
"""Get all available OpenAI models.""" """Get all available OpenAI models."""
return [ return [
AiModel( AiModel(
name="openai_callAiBasic", name="gpt-4o",
displayName="GPT-4o", displayName="OpenAI GPT-4o",
connectorType="openai", connectorType="openai",
apiUrl="https://api.openai.com/v1/chat/completions",
temperature=0.2,
maxTokens=128000, maxTokens=128000,
contextLength=128000, contextLength=128000,
costPer1kTokensInput=0.03, costPer1kTokensInput=0.03,
costPer1kTokensOutput=0.06, costPer1kTokensOutput=0.06,
speedRating=8, speedRating=7, # Good speed for complex tasks
qualityRating=9, qualityRating=9, # High quality
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.CHAT, ModelCapabilitiesEnum.REASONING, ModelCapabilitiesEnum.ANALYSIS], # capabilities removed (not used in business logic)
functionCall=self.callAiBasic, functionCall=self.callAiBasic,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED, processingMode=ProcessingModeEnum.ADVANCED,
operationTypes=[OperationTypeEnum.GENERAL, OperationTypeEnum.PLAN, OperationTypeEnum.ANALYSE, OperationTypeEnum.GENERATE], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 8),
(OperationTypeEnum.DATA_ANALYSE, 9),
(OperationTypeEnum.DATA_GENERATE, 9),
(OperationTypeEnum.DATA_EXTRACT, 7)
),
version="gpt-4o", version="gpt-4o",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
), ),
AiModel( AiModel(
name="openai_callAiBasic_gpt35", name="gpt-3.5-turbo",
displayName="GPT-3.5 Turbo", displayName="OpenAI GPT-3.5 Turbo",
connectorType="openai", connectorType="openai",
apiUrl="https://api.openai.com/v1/chat/completions",
temperature=0.2,
maxTokens=16000, maxTokens=16000,
contextLength=16000, contextLength=16000,
costPer1kTokensInput=0.0015, costPer1kTokensInput=0.0015,
costPer1kTokensOutput=0.002, costPer1kTokensOutput=0.002,
speedRating=9, speedRating=9, # Very fast
qualityRating=7, qualityRating=7, # Good but not premium
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.CHAT, ModelCapabilitiesEnum.REASONING], # capabilities removed (not used in business logic)
functionCall=self.callAiBasic, functionCall=self.callAiBasic,
priority=PriorityEnum.SPEED, priority=PriorityEnum.SPEED,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.GENERAL, OperationTypeEnum.PLAN, OperationTypeEnum.GENERATE], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 7),
(OperationTypeEnum.DATA_ANALYSE, 8),
(OperationTypeEnum.DATA_GENERATE, 8)
),
version="gpt-3.5-turbo", version="gpt-3.5-turbo",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0015 + (bytesReceived / 4 / 1000) * 0.002 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0015 + (bytesReceived / 4 / 1000) * 0.002
), ),
AiModel( AiModel(
name="openai_callAiImage", name="gpt-4o-vision",
displayName="GPT-4o Vision", displayName="OpenAI GPT-4o Vision",
connectorType="openai", connectorType="openai",
apiUrl="https://api.openai.com/v1/chat/completions",
temperature=0.2,
maxTokens=128000, maxTokens=128000,
contextLength=128000, contextLength=128000,
costPer1kTokensInput=0.03, costPer1kTokensInput=0.03,
costPer1kTokensOutput=0.06, costPer1kTokensOutput=0.06,
speedRating=7, speedRating=6, # Slower for vision tasks
qualityRating=9, qualityRating=9, # High quality vision
capabilities=[ModelCapabilitiesEnum.IMAGE_ANALYSE, ModelCapabilitiesEnum.VISION, ModelCapabilitiesEnum.MULTIMODAL], # capabilities removed (not used in business logic)
functionCall=self.callAiImage, functionCall=self.callAiImage,
priority=PriorityEnum.QUALITY, priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED, processingMode=ProcessingModeEnum.DETAILED,
operationTypes=[OperationTypeEnum.IMAGE_ANALYSE], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.IMAGE_ANALYSE, 9)
),
version="gpt-4o", version="gpt-4o",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.03 + (bytesReceived / 4 / 1000) * 0.06
), ),
AiModel( AiModel(
name="openai_generateImage", name="dall-e-3",
displayName="DALL-E 3", displayName="OpenAI DALL-E 3",
connectorType="openai", connectorType="openai",
apiUrl="https://api.openai.com/v1/images/generations",
temperature=0.0, # Image generation doesn't use temperature
maxTokens=0, # Image generation doesn't use tokens maxTokens=0, # Image generation doesn't use tokens
contextLength=0, contextLength=0,
costPer1kTokensInput=0.04, costPer1kTokensInput=0.04,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=6, speedRating=5, # Slow for image generation
qualityRating=9, qualityRating=9, # High quality art generation
capabilities=[ModelCapabilitiesEnum.IMAGE_GENERATE, ModelCapabilitiesEnum.ART, ModelCapabilitiesEnum.VISUAL_CREATION], # capabilities removed (not used in business logic)
functionCall=self.generateImage, functionCall=self.generateImage,
priority=PriorityEnum.QUALITY, priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED, processingMode=ProcessingModeEnum.DETAILED,
operationTypes=[OperationTypeEnum.IMAGE_GENERATE], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.IMAGE_GENERATE, 10)
),
version="dall-e-3", version="dall-e-3",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.04 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.04
) )
@ -143,18 +159,18 @@ class AiOpenai(BaseConnectorAi):
messages = modelCall.messages messages = modelCall.messages
model = modelCall.model model = modelCall.model
options = modelCall.options options = modelCall.options
temperature = options.get("temperature", self.config.get("temperature", 0.2)) temperature = options.get("temperature", model.temperature)
maxTokens = model.maxTokens maxTokens = model.maxTokens
payload = { payload = {
"model": self.modelName, "model": model.name,
"messages": messages, "messages": messages,
"temperature": temperature, "temperature": temperature,
"max_tokens": maxTokens "max_tokens": maxTokens
} }
response = await self.httpClient.post( response = await self.httpClient.post(
self.apiUrl, model.apiUrl,
json=payload json=payload
) )
@ -184,7 +200,7 @@ class AiOpenai(BaseConnectorAi):
return AiModelResponse( return AiModelResponse(
content=content, content=content,
success=True, success=True,
modelId=self.modelName, modelId=model.name,
metadata={"response_id": responseJson.get("id", "")} metadata={"response_id": responseJson.get("id", "")}
) )
@ -195,19 +211,25 @@ class AiOpenai(BaseConnectorAi):
logger.error(f"Error calling OpenAI API: {str(e)}") logger.error(f"Error calling OpenAI API: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error calling OpenAI API: {str(e)}") raise HTTPException(status_code=500, detail=f"Error calling OpenAI API: {str(e)}")
async def callAiImage(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None) -> str: async def callAiImage(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Analyzes an image with the OpenAI Vision API. Analyzes an image with the OpenAI Vision API using standardized pattern.
Args: Args:
imageData: base64encoded data modelCall: AiModelCall with messages and image data in options
mimeType: The MIME type of the image (optional, only for binary data)
prompt: The prompt for analysis
Returns: Returns:
The response from the OpenAI Vision API as text AiModelResponse with analysis content
""" """
try: try:
# Extract parameters from modelCall
messages = modelCall.messages
model = modelCall.model
options = modelCall.options
prompt = messages[0]["content"] if messages else ""
imageData = options.get("imageData")
mimeType = options.get("mimeType", "image/jpeg")
logger.debug(f"Starting image analysis with query '{prompt}' for size {len(imageData)}B...") logger.debug(f"Starting image analysis with query '{prompt}' for size {len(imageData)}B...")
# Ensure imageData is a string (base64 encoded) # Ensure imageData is a string (base64 encoded)
@ -219,10 +241,6 @@ class AiOpenai(BaseConnectorAi):
if padding_needed: if padding_needed:
imageData += '=' * (4 - padding_needed) imageData += '=' * (4 - padding_needed)
# Use default MIME type if not provided
if not mimeType:
mimeType = "image/jpeg"
logger.debug(f"Using MIME type: {mimeType}") logger.debug(f"Using MIME type: {mimeType}")
logger.debug(f"Base64 data length: {len(imageData)} characters") logger.debug(f"Base64 data length: {len(imageData)} characters")
@ -248,18 +266,18 @@ class AiOpenai(BaseConnectorAi):
# Override the model for vision tasks # Override the model for vision tasks
visionModel = "gpt-4o" # or "gpt-4-vision-preview" depending on availability visionModel = "gpt-4o" # or "gpt-4-vision-preview" depending on availability
# Use parameters from configuration # Use parameters from model
temperature = self.config.get("temperature", 0.2) temperature = model.temperature
# Don't set maxTokens - let the model use its full context length # Don't set maxTokens - let the model use its full context length
payload = { payload = {
"model": visionModel, "model": model.name,
"messages": messages, "messages": messages,
"temperature": temperature "temperature": temperature
} }
response = await self.httpClient.post( response = await self.httpClient.post(
self.apiUrl, model.apiUrl,
json=payload json=payload
) )
@ -269,29 +287,42 @@ class AiOpenai(BaseConnectorAi):
responseJson = response.json() responseJson = response.json()
content = responseJson["choices"][0]["message"]["content"] content = responseJson["choices"][0]["message"]["content"]
return content
# Return content return AiModelResponse(
return response content=content,
success=True,
modelId=model.name,
metadata={"response_id": responseJson.get("id", "")}
)
except Exception as e: except Exception as e:
logger.error(f"Error during image analysis: {str(e)}", exc_info=True) logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
return f"[Error during image analysis: {str(e)}]" return AiModelResponse(
content="",
success=False,
error=f"Error during image analysis: {str(e)}"
)
async def generateImage(self, prompt: str, size: str = "1024x1024", quality: str = "standard", style: str = "vivid") -> Dict[str, Any]: async def generateImage(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Generate an image using DALL-E 3. Generate an image using DALL-E 3 using standardized pattern.
Args: Args:
prompt: The text prompt for image generation modelCall: AiModelCall with messages and generation options
size: Image size (1024x1024, 1792x1024, or 1024x1792)
quality: Image quality (standard or hd)
style: Image style (vivid or natural)
Returns: Returns:
Dictionary with success status and image data AiModelResponse with generated image data
""" """
try: try:
# Extract parameters from modelCall
messages = modelCall.messages
model = modelCall.model
options = modelCall.options
prompt = messages[0]["content"] if messages else ""
size = options.get("size", "1024x1024")
quality = options.get("quality", "standard")
style = options.get("style", "vivid")
logger.debug(f"Starting image generation with prompt: '{prompt[:100]}...'") logger.debug(f"Starting image generation with prompt: '{prompt[:100]}...'")
# DALL-E 3 API endpoint # DALL-E 3 API endpoint
@ -336,23 +367,29 @@ class AiOpenai(BaseConnectorAi):
image_data = responseJson["data"][0]["b64_json"] image_data = responseJson["data"][0]["b64_json"]
logger.info(f"Successfully generated image: {len(image_data)} characters") logger.info(f"Successfully generated image: {len(image_data)} characters")
return { return AiModelResponse(
"success": True, content=image_data,
"image_data": image_data, success=True,
"size": size, modelId="dall-e-3",
"quality": quality, metadata={
"style": style "size": size,
} "quality": quality,
"style": style,
"response_id": responseJson.get("id", "")
}
)
else: else:
logger.error("No image data in DALL-E response") logger.error("No image data in DALL-E response")
return { return AiModelResponse(
"success": False, content="",
"error": "No image data in DALL-E response" success=False,
} error="No image data in DALL-E response"
)
except Exception as e: except Exception as e:
logger.error(f"Error during image generation: {str(e)}", exc_info=True) logger.error(f"Error during image generation: {str(e)}", exc_info=True)
return { return AiModelResponse(
"success": False, content="",
"error": f"Error during image generation: {str(e)}" success=False,
} error=f"Error during image generation: {str(e)}"
)

View file

@ -5,7 +5,7 @@ from typing import Dict, Any, List, Union, Optional
from fastapi import HTTPException from fastapi import HTTPException
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.aicore.aicoreBase import BaseConnectorAi from modules.aicore.aicoreBase import BaseConnectorAi
from modules.datamodels.datamodelAi import AiModel, ModelCapabilitiesEnum, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelCall, AiModelResponse, createOperationTypeRatings
# Configure logger # Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -14,9 +14,6 @@ def loadConfigData():
"""Load configuration data for Perplexity connector""" """Load configuration data for Perplexity connector"""
return { return {
"apiKey": APP_CONFIG.get('Connector_AiPerplexity_API_SECRET'), "apiKey": APP_CONFIG.get('Connector_AiPerplexity_API_SECRET'),
"apiUrl": APP_CONFIG.get('Connector_AiPerplexity_API_URL'),
"modelName": APP_CONFIG.get('Connector_AiPerplexity_MODEL_NAME'),
"temperature": float(APP_CONFIG.get('Connector_AiPerplexity_TEMPERATURE')),
} }
class AiPerplexity(BaseConnectorAi): class AiPerplexity(BaseConnectorAi):
@ -27,8 +24,6 @@ class AiPerplexity(BaseConnectorAi):
# Load configuration # Load configuration
self.config = loadConfigData() self.config = loadConfigData()
self.apiKey = self.config["apiKey"] self.apiKey = self.config["apiKey"]
self.apiUrl = self.config["apiUrl"]
self.modelName = self.config["modelName"]
# HttpClient for API calls # HttpClient for API calls
self.httpClient = httpx.AsyncClient( self.httpClient = httpx.AsyncClient(
@ -40,7 +35,7 @@ class AiPerplexity(BaseConnectorAi):
} }
) )
logger.info(f"Perplexity Connector initialized with model: {self.modelName}") logger.info("Perplexity Connector initialized")
def getConnectorType(self) -> str: def getConnectorType(self) -> str:
"""Get the connector type identifier.""" """Get the connector type identifier."""
@ -50,92 +45,130 @@ class AiPerplexity(BaseConnectorAi):
"""Get all available Perplexity models.""" """Get all available Perplexity models."""
return [ return [
AiModel( AiModel(
name="perplexity_callAiBasic", name="llama-3.1-sonar-large-128k-online",
displayName="Llama 3.1 Sonar Large 128k", displayName="Perplexity Llama 3.1 Sonar Large 128k",
connectorType="perplexity", connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=128000, maxTokens=128000,
contextLength=128000, contextLength=128000,
costPer1kTokensInput=0.005, costPer1kTokensInput=0.005,
costPer1kTokensOutput=0.005, costPer1kTokensOutput=0.005,
speedRating=8, speedRating=8,
qualityRating=8, qualityRating=8,
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.CHAT, ModelCapabilitiesEnum.REASONING, ModelCapabilitiesEnum.WEB_SEARCH], # capabilities removed (not used in business logic)
functionCall=self.callAiBasic, functionCall=self.callAiBasic,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED, processingMode=ProcessingModeEnum.ADVANCED,
operationTypes=[OperationTypeEnum.GENERAL, OperationTypeEnum.PLAN, OperationTypeEnum.ANALYSE, OperationTypeEnum.GENERATE, OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.PLAN, 7),
(OperationTypeEnum.DATA_ANALYSE, 8),
(OperationTypeEnum.DATA_GENERATE, 7)
),
version="llama-3.1-sonar-large-128k-online", version="llama-3.1-sonar-large-128k-online",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.005 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.005
), ),
AiModel( AiModel(
name="perplexity_callAiWithWebSearch", name="sonar-pro",
displayName="Sonar Pro", displayName="Perplexity Sonar Pro",
connectorType="perplexity", connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=128000, maxTokens=128000,
contextLength=128000, contextLength=128000,
costPer1kTokensInput=0.01, costPer1kTokensInput=0.01,
costPer1kTokensOutput=0.01, costPer1kTokensOutput=0.01,
speedRating=7, speedRating=6, # Slower due to AI analysis
qualityRating=9, qualityRating=10, # Best AI analysis quality
capabilities=[ModelCapabilitiesEnum.TEXT_GENERATION, ModelCapabilitiesEnum.WEB_SEARCH, ModelCapabilitiesEnum.RESEARCH], # capabilities removed (not used in business logic)
functionCall=self.callAiWithWebSearch, functionCall=self.callAiWithWebSearch,
priority=PriorityEnum.QUALITY, priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED, processingMode=ProcessingModeEnum.DETAILED,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.WEB_RESEARCH, 10),
(OperationTypeEnum.WEB_SEARCH, 9),
(OperationTypeEnum.WEB_CRAWL, 8),
(OperationTypeEnum.WEB_NEWS, 8),
(OperationTypeEnum.WEB_QUESTIONS, 9)
),
version="sonar-pro", version="sonar-pro",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.01 + (bytesReceived / 4 / 1000) * 0.01 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.01 + (bytesReceived / 4 / 1000) * 0.01
), ),
AiModel( AiModel(
name="perplexity_researchTopic", name="mistral-7b-instruct",
displayName="Mistral 7B Instruct", displayName="Perplexity Mistral 7B Instruct",
connectorType="perplexity", connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=32000, maxTokens=32000,
contextLength=32000, contextLength=32000,
costPer1kTokensInput=0.002, costPer1kTokensInput=0.002,
costPer1kTokensOutput=0.002, costPer1kTokensOutput=0.002,
speedRating=8, speedRating=9, # Fast for basic AI tasks
qualityRating=8, qualityRating=7, # Good but not premium quality
capabilities=[ModelCapabilitiesEnum.WEB_SEARCH, ModelCapabilitiesEnum.RESEARCH, ModelCapabilitiesEnum.INFORMATION_GATHERING], # capabilities removed (not used in business logic)
functionCall=self.researchTopic, functionCall=self.researchTopic,
priority=PriorityEnum.COST, priority=PriorityEnum.COST,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.WEB_RESEARCH, 7),
(OperationTypeEnum.WEB_SEARCH, 6),
(OperationTypeEnum.WEB_CRAWL, 5),
(OperationTypeEnum.WEB_NEWS, 5),
(OperationTypeEnum.WEB_QUESTIONS, 6)
),
version="mistral-7b-instruct", version="mistral-7b-instruct",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.002 + (bytesReceived / 4 / 1000) * 0.002 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.002 + (bytesReceived / 4 / 1000) * 0.002
), ),
AiModel( AiModel(
name="perplexity_answerQuestion", name="mistral-7b-instruct-qa",
displayName="Mistral 7B Instruct QA", displayName="Perplexity Mistral 7B Instruct QA",
connectorType="perplexity", connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=32000, maxTokens=32000,
contextLength=32000, contextLength=32000,
costPer1kTokensInput=0.002, costPer1kTokensInput=0.002,
costPer1kTokensOutput=0.002, costPer1kTokensOutput=0.002,
speedRating=8, speedRating=9, # Fast for Q&A tasks
qualityRating=8, qualityRating=7, # Good but not premium quality
capabilities=[ModelCapabilitiesEnum.WEB_SEARCH, ModelCapabilitiesEnum.QUESTION_ANSWERING, ModelCapabilitiesEnum.RESEARCH], # capabilities removed (not used in business logic)
functionCall=self.answerQuestion, functionCall=self.answerQuestion,
priority=PriorityEnum.COST, priority=PriorityEnum.COST,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.WEB_RESEARCH, 6),
(OperationTypeEnum.WEB_SEARCH, 5),
(OperationTypeEnum.WEB_CRAWL, 4),
(OperationTypeEnum.WEB_NEWS, 4),
(OperationTypeEnum.WEB_QUESTIONS, 10)
),
version="mistral-7b-instruct", version="mistral-7b-instruct",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.002 + (bytesReceived / 4 / 1000) * 0.002 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.002 + (bytesReceived / 4 / 1000) * 0.002
), ),
AiModel( AiModel(
name="perplexity_getCurrentNews", name="mistral-7b-instruct-news",
displayName="Mistral 7B Instruct News", displayName="Perplexity Mistral 7B Instruct News",
connectorType="perplexity", connectorType="perplexity",
apiUrl="https://api.perplexity.ai/chat/completions",
temperature=0.2,
maxTokens=32000, maxTokens=32000,
contextLength=32000, contextLength=32000,
costPer1kTokensInput=0.002, costPer1kTokensInput=0.002,
costPer1kTokensOutput=0.002, costPer1kTokensOutput=0.002,
speedRating=8, speedRating=9, # Fast for news tasks
qualityRating=8, qualityRating=7, # Good but not premium quality
capabilities=[ModelCapabilitiesEnum.WEB_SEARCH, ModelCapabilitiesEnum.NEWS, ModelCapabilitiesEnum.CURRENT_EVENTS], # capabilities removed (not used in business logic)
functionCall=self.getCurrentNews, functionCall=self.getCurrentNews,
priority=PriorityEnum.COST, priority=PriorityEnum.COST,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.WEB_RESEARCH, 6),
(OperationTypeEnum.WEB_SEARCH, 5),
(OperationTypeEnum.WEB_CRAWL, 4),
(OperationTypeEnum.WEB_NEWS, 10),
(OperationTypeEnum.WEB_QUESTIONS, 4)
),
version="mistral-7b-instruct", version="mistral-7b-instruct",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.002 + (bytesReceived / 4 / 1000) * 0.002 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.002 + (bytesReceived / 4 / 1000) * 0.002
) )
@ -159,18 +192,18 @@ class AiPerplexity(BaseConnectorAi):
messages = modelCall.messages messages = modelCall.messages
model = modelCall.model model = modelCall.model
options = modelCall.options options = modelCall.options
temperature = options.get("temperature", self.config.get("temperature", 0.2)) temperature = options.get("temperature", model.temperature)
maxTokens = model.maxTokens maxTokens = model.maxTokens
payload = { payload = {
"model": self.modelName, "model": model.name,
"messages": messages, "messages": messages,
"temperature": temperature, "temperature": temperature,
"max_tokens": maxTokens "max_tokens": maxTokens
} }
response = await self.httpClient.post( response = await self.httpClient.post(
self.apiUrl, model.apiUrl,
json=payload json=payload
) )
@ -196,7 +229,7 @@ class AiPerplexity(BaseConnectorAi):
return AiModelResponse( return AiModelResponse(
content=content, content=content,
success=True, success=True,
modelId=self.modelName, modelId=model.name,
metadata={"response_id": responseJson.get("id", "")} metadata={"response_id": responseJson.get("id", "")}
) )
@ -204,47 +237,33 @@ class AiPerplexity(BaseConnectorAi):
logger.error(f"Error calling Perplexity API: {str(e)}") logger.error(f"Error calling Perplexity API: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error calling Perplexity API: {str(e)}") raise HTTPException(status_code=500, detail=f"Error calling Perplexity API: {str(e)}")
async def callAiWithWebSearch(self, query: str, temperature: float = None, maxTokens: int = None) -> str: async def callAiWithWebSearch(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Calls Perplexity API with web search capabilities for research. Calls Perplexity API with web search capabilities for research using standardized pattern.
Args: Args:
query: The research query or question modelCall: AiModelCall with messages and options
temperature: Temperature for response generation (0.0-1.0)
maxTokens: Maximum number of tokens in the response
Returns: Returns:
The response from Perplexity with web search context AiModelResponse with content and metadata
""" """
try: try:
# Use parameters from configuration if none were overridden # Extract parameters from modelCall
if temperature is None: messages = modelCall.messages
temperature = self.config.get("temperature", 0.2) model = modelCall.model
options = modelCall.options
# Don't set maxTokens from config - let the model use its full context length temperature = options.get("temperature", model.temperature)
# Our continuation system handles stopping early via prompt engineering maxTokens = model.maxTokens
# For web search, we use the configured model name
webSearchModel = self.modelName
payload = { payload = {
"model": webSearchModel, "model": model.name,
"messages": [ "messages": messages,
{ "temperature": temperature,
"role": "user", "max_tokens": maxTokens
"content": query
}
],
"temperature": temperature
} }
# Add max_tokens - use provided value or throw error
if maxTokens is None:
raise ValueError("maxTokens must be provided for Perplexity API calls")
payload["max_tokens"] = maxTokens
response = await self.httpClient.post( response = await self.httpClient.post(
self.apiUrl, model.apiUrl,
json=payload json=payload
) )
@ -265,79 +284,190 @@ class AiPerplexity(BaseConnectorAi):
responseJson = response.json() responseJson = response.json()
content = responseJson["choices"][0]["message"]["content"] content = responseJson["choices"][0]["message"]["content"]
return content
return AiModelResponse(
content=content,
success=True,
modelId=model.name,
metadata={"response_id": responseJson.get("id", "")}
)
except Exception as e: except Exception as e:
logger.error(f"Error calling Perplexity Web Search API: {str(e)}") logger.error(f"Error calling Perplexity Web Search API: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error calling Perplexity Web Search API: {str(e)}") raise HTTPException(status_code=500, detail=f"Error calling Perplexity Web Search API: {str(e)}")
async def researchTopic(self, topic: str, depth: str = "basic") -> str: async def researchTopic(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Research a topic using Perplexity's web search capabilities. Research a topic using Perplexity's web search capabilities using standardized pattern.
Args: Args:
topic: The topic to research modelCall: AiModelCall with messages and options
depth: Research depth - "basic", "detailed", or "comprehensive"
Returns: Returns:
Comprehensive research results on the topic AiModelResponse with research content
""" """
try: try:
# Create research prompts based on depth # Extract parameters from modelCall
if depth == "basic": messages = modelCall.messages
prompt = f"Provide a basic overview of: {topic}" model = modelCall.model
elif depth == "detailed": options = modelCall.options
prompt = f"Provide a detailed analysis of: {topic}. Include recent developments, key facts, and important information." temperature = options.get("temperature", model.temperature)
else: # comprehensive maxTokens = model.maxTokens
prompt = f"Provide a comprehensive research report on: {topic}. Include recent developments, key facts, statistics, expert opinions, and current trends."
return await self.callAiWithWebSearch(prompt) payload = {
"model": model.name,
"messages": messages,
"temperature": temperature,
"max_tokens": maxTokens
}
response = await self.httpClient.post(
model.apiUrl,
json=payload
)
if response.status_code != 200:
error_detail = f"Perplexity Research API error: {response.status_code} - {response.text}"
logger.error(error_detail)
if response.status_code == 429:
error_message = "Rate limit exceeded for research. Please wait before making another request."
elif response.status_code == 401:
error_message = "Invalid API key for research. Please check your Perplexity API configuration."
elif response.status_code == 400:
error_message = f"Invalid request to Perplexity Research API: {response.text}"
else:
error_message = f"Perplexity Research API error ({response.status_code}): {response.text}"
raise HTTPException(status_code=500, detail=error_message)
responseJson = response.json()
content = responseJson["choices"][0]["message"]["content"]
return AiModelResponse(
content=content,
success=True,
modelId=model.name,
metadata={"response_id": responseJson.get("id", "")}
)
except Exception as e: except Exception as e:
logger.error(f"Error researching topic: {str(e)}") logger.error(f"Error researching topic: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error researching topic: {str(e)}") raise HTTPException(status_code=500, detail=f"Error researching topic: {str(e)}")
async def answerQuestion(self, question: str, context: str = None) -> str: async def answerQuestion(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Answer a question using web search for current information. Answer a question using web search for current information using standardized pattern.
Args: Args:
question: The question to answer modelCall: AiModelCall with messages and options
context: Optional context to provide
Returns: Returns:
Answer with web search context AiModelResponse with answer content
""" """
try: try:
if context: # Extract parameters from modelCall
prompt = f"Context: {context}\n\nQuestion: {question}\n\nPlease provide a comprehensive answer using current information from the web." messages = modelCall.messages
else: model = modelCall.model
prompt = f"Question: {question}\n\nPlease provide a comprehensive answer using current information from the web." options = modelCall.options
temperature = options.get("temperature", model.temperature)
maxTokens = model.maxTokens
return await self.callAiWithWebSearch(prompt) payload = {
"model": model.name,
"messages": messages,
"temperature": temperature,
"max_tokens": maxTokens
}
response = await self.httpClient.post(
model.apiUrl,
json=payload
)
if response.status_code != 200:
error_detail = f"Perplexity Q&A API error: {response.status_code} - {response.text}"
logger.error(error_detail)
if response.status_code == 429:
error_message = "Rate limit exceeded for Q&A. Please wait before making another request."
elif response.status_code == 401:
error_message = "Invalid API key for Q&A. Please check your Perplexity API configuration."
elif response.status_code == 400:
error_message = f"Invalid request to Perplexity Q&A API: {response.text}"
else:
error_message = f"Perplexity Q&A API error ({response.status_code}): {response.text}"
raise HTTPException(status_code=500, detail=error_message)
responseJson = response.json()
content = responseJson["choices"][0]["message"]["content"]
return AiModelResponse(
content=content,
success=True,
modelId=model.name,
metadata={"response_id": responseJson.get("id", "")}
)
except Exception as e: except Exception as e:
logger.error(f"Error answering question: {str(e)}") logger.error(f"Error answering question: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error answering question: {str(e)}") raise HTTPException(status_code=500, detail=f"Error answering question: {str(e)}")
async def getCurrentNews(self, topic: str = None, limit: int = 5) -> str: async def getCurrentNews(self, modelCall: AiModelCall) -> AiModelResponse:
""" """
Get current news on a specific topic. Get current news on a specific topic using standardized pattern.
Args: Args:
topic: The topic to get news about (optional) modelCall: AiModelCall with messages and options
limit: Number of news items to retrieve
Returns: Returns:
Current news information AiModelResponse with news content
""" """
try: try:
if topic: # Extract parameters from modelCall
prompt = f"Get the latest news about {topic}. Provide {limit} recent news items with sources and dates." messages = modelCall.messages
else: model = modelCall.model
prompt = f"Get the latest news. Provide {limit} recent news items with sources and dates." options = modelCall.options
temperature = options.get("temperature", model.temperature)
maxTokens = model.maxTokens
return await self.callAiWithWebSearch(prompt) payload = {
"model": model.name,
"messages": messages,
"temperature": temperature,
"max_tokens": maxTokens
}
response = await self.httpClient.post(
model.apiUrl,
json=payload
)
if response.status_code != 200:
error_detail = f"Perplexity News API error: {response.status_code} - {response.text}"
logger.error(error_detail)
if response.status_code == 429:
error_message = "Rate limit exceeded for news. Please wait before making another request."
elif response.status_code == 401:
error_message = "Invalid API key for news. Please check your Perplexity API configuration."
elif response.status_code == 400:
error_message = f"Invalid request to Perplexity News API: {response.text}"
else:
error_message = f"Perplexity News API error ({response.status_code}): {response.text}"
raise HTTPException(status_code=500, detail=error_message)
responseJson = response.json()
content = responseJson["choices"][0]["message"]["content"]
return AiModelResponse(
content=content,
success=True,
modelId=model.name,
metadata={"response_id": responseJson.get("id", "")}
)
except Exception as e: except Exception as e:
logger.error(f"Error getting current news: {str(e)}") logger.error(f"Error getting current news: {str(e)}")

View file

@ -3,13 +3,14 @@
import logging import logging
import asyncio import asyncio
import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional, List from typing import Optional, List
from tavily import AsyncTavilyClient from tavily import AsyncTavilyClient
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.shared.timezoneUtils import get_utc_timestamp from modules.shared.timezoneUtils import get_utc_timestamp
from modules.aicore.aicoreBase import BaseConnectorAi from modules.aicore.aicoreBase import BaseConnectorAi
from modules.datamodels.datamodelAi import AiModel, ModelCapabilitiesEnum, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelResponse from modules.datamodels.datamodelAi import AiModel, PriorityEnum, ProcessingModeEnum, OperationTypeEnum, AiModelResponse, createOperationTypeRatings
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -69,78 +70,102 @@ class ConnectorWeb(BaseConnectorAi):
"""Get the connector type identifier.""" """Get the connector type identifier."""
return "tavily" return "tavily"
def _extractUrlsFromPrompt(self, prompt: str) -> List[str]:
"""Extract URLs from a text prompt using regex."""
if not prompt:
return []
# URL regex pattern - matches http/https URLs
url_pattern = r'https?://(?:[-\w.])+(?:[:\d]+)?(?:/(?:[\w/_.])*(?:\?(?:[\w&=%.])*)?(?:#(?:[\w.])*)?)?'
urls = re.findall(url_pattern, prompt)
# Remove duplicates while preserving order
seen = set()
unique_urls = []
for url in urls:
if url not in seen:
seen.add(url)
unique_urls.append(url)
return unique_urls
def getModels(self) -> List[AiModel]: def getModels(self) -> List[AiModel]:
"""Get all available Tavily models.""" """Get all available Tavily models."""
return [ return [
AiModel( AiModel(
name="tavily_search", name="tavily-search",
displayName="Tavily Search", displayName="Tavily Search",
connectorType="tavily", connectorType="tavily",
apiUrl="https://api.tavily.com/search",
temperature=0.0, # Web search doesn't use temperature
maxTokens=0, # Web search doesn't use tokens maxTokens=0, # Web search doesn't use tokens
contextLength=0, contextLength=0,
costPer1kTokensInput=0.0, costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=8, speedRating=9, # Very fast for URL discovery
qualityRating=8, qualityRating=9, # Excellent URL discovery quality
capabilities=[ModelCapabilitiesEnum.WEB_SEARCH, ModelCapabilitiesEnum.INFORMATION_RETRIEVAL, ModelCapabilitiesEnum.URL_DISCOVERY], # capabilities removed (not used in business logic)
functionCall=self.search, functionCall=self.search,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.WEB_SEARCH, 10),
(OperationTypeEnum.WEB_RESEARCH, 3),
(OperationTypeEnum.WEB_CRAWL, 2),
(OperationTypeEnum.WEB_NEWS, 3),
(OperationTypeEnum.WEB_QUESTIONS, 2)
),
version="tavily-search", version="tavily-search",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, searchDepth="basic", numRequests=1: numRequests * (1 if searchDepth == "basic" else 2) * 0.008 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, searchDepth="basic", numRequests=1: numRequests * (1 if searchDepth == "basic" else 2) * 0.008
), ),
AiModel( AiModel(
name="tavily_extract", name="tavily-extract",
displayName="Tavily Extract", displayName="Tavily Extract",
connectorType="tavily", connectorType="tavily",
maxTokens=0, # Web extraction doesn't use tokens apiUrl="https://api.tavily.com/extract",
contextLength=0, temperature=0.0, # Web crawling doesn't use temperature
costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0,
speedRating=6,
qualityRating=8,
capabilities=[ModelCapabilitiesEnum.WEB_CRAWLING, ModelCapabilitiesEnum.CONTENT_EXTRACTION, ModelCapabilitiesEnum.TEXT_EXTRACTION],
functionCall=self.crawl,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH],
version="tavily-extract",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, extractionDepth="basic", numSuccessfulUrls=1: (numSuccessfulUrls / 5) * (1 if extractionDepth == "basic" else 2) * 0.008
),
AiModel(
name="tavily_crawl",
displayName="Tavily Crawl",
connectorType="tavily",
maxTokens=0, # Web crawling doesn't use tokens maxTokens=0, # Web crawling doesn't use tokens
contextLength=0, contextLength=0,
costPer1kTokensInput=0.0, costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=6, speedRating=7, # Good for content extraction
qualityRating=8, qualityRating=9, # Excellent content extraction quality
capabilities=[ModelCapabilitiesEnum.WEB_CRAWLING, ModelCapabilitiesEnum.CONTENT_EXTRACTION, ModelCapabilitiesEnum.MAPPING], # capabilities removed (not used in business logic)
functionCall=self.crawl, functionCall=self.crawl,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
version="tavily-crawl", (OperationTypeEnum.WEB_RESEARCH, 3),
(OperationTypeEnum.WEB_CRAWL, 10),
(OperationTypeEnum.WEB_NEWS, 3),
(OperationTypeEnum.WEB_QUESTIONS, 2)
),
version="tavily-extract",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, numPages=10, extractionDepth="basic", withInstructions=False, numSuccessfulExtractions=10: ((numPages / 10) * (2 if withInstructions else 1) + (numSuccessfulExtractions / 5) * (1 if extractionDepth == "basic" else 2)) * 0.008 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, numPages=10, extractionDepth="basic", withInstructions=False, numSuccessfulExtractions=10: ((numPages / 10) * (2 if withInstructions else 1) + (numSuccessfulExtractions / 5) * (1 if extractionDepth == "basic" else 2)) * 0.008
), ),
AiModel( AiModel(
name="tavily_scrape", name="tavily-search-extract",
displayName="Tavily Scrape", displayName="Tavily Search & Extract",
connectorType="tavily", connectorType="tavily",
apiUrl="https://api.tavily.com/search",
temperature=0.0, # Web scraping doesn't use temperature
maxTokens=0, # Web scraping doesn't use tokens maxTokens=0, # Web scraping doesn't use tokens
contextLength=0, contextLength=0,
costPer1kTokensInput=0.0, costPer1kTokensInput=0.0,
costPer1kTokensOutput=0.0, costPer1kTokensOutput=0.0,
speedRating=6, speedRating=7, # Good for combined search+extract
qualityRating=8, qualityRating=8, # Good quality for structured data
capabilities=[ModelCapabilitiesEnum.WEB_SEARCH, ModelCapabilitiesEnum.WEB_CRAWLING, ModelCapabilitiesEnum.CONTENT_EXTRACTION, ModelCapabilitiesEnum.INFORMATION_RETRIEVAL], # capabilities removed (not used in business logic)
functionCall=self.scrape, functionCall=self.scrape,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC, processingMode=ProcessingModeEnum.BASIC,
operationTypes=[OperationTypeEnum.WEB_RESEARCH], operationTypes=createOperationTypeRatings(
(OperationTypeEnum.WEB_RESEARCH, 8),
(OperationTypeEnum.WEB_SEARCH, 6),
(OperationTypeEnum.WEB_CRAWL, 6),
(OperationTypeEnum.WEB_NEWS, 5),
(OperationTypeEnum.WEB_QUESTIONS, 5)
),
version="tavily-search-extract", version="tavily-search-extract",
calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, searchDepth="basic", numSuccessfulUrls=1, extractionDepth="basic": ((1 if searchDepth == "basic" else 2) + (numSuccessfulUrls / 5) * (1 if extractionDepth == "basic" else 2)) * 0.008 calculatePriceUsd=lambda processingTime, bytesSent, bytesReceived, searchDepth="basic", numSuccessfulUrls=1, extractionDepth="basic": ((1 if searchDepth == "basic" else 2) + (numSuccessfulUrls / 5) * (1 if extractionDepth == "basic" else 2)) * 0.008
) )
@ -148,9 +173,9 @@ class ConnectorWeb(BaseConnectorAi):
@classmethod @classmethod
async def create(cls): async def create(cls):
api_key = APP_CONFIG.get("Connector_WebTavily_API_KEY_SECRET") api_key = APP_CONFIG.get("Connector_AiTavily_API_SECRET")
if not api_key: if not api_key:
raise ValueError("Tavily API key not configured. Please set Connector_WebTavily_API_KEY_SECRET in config.ini") raise ValueError("Tavily API key not configured. Please set Connector_AiTavily_API_SECRET in config.ini")
# Load and cache web crawl related configuration # Load and cache web crawl related configuration
crawlTimeout = int(APP_CONFIG.get("Web_Crawl_TIMEOUT", "30")) crawlTimeout = int(APP_CONFIG.get("Web_Crawl_TIMEOUT", "30"))
crawlMaxRetries = int(APP_CONFIG.get("Web_Crawl_MAX_RETRIES", "3")) crawlMaxRetries = int(APP_CONFIG.get("Web_Crawl_MAX_RETRIES", "3"))
@ -226,6 +251,18 @@ class ConnectorWeb(BaseConnectorAi):
options = modelCall.options options = modelCall.options
urls = options.get("urls", []) urls = options.get("urls", [])
# If no URLs provided, try to extract URLs from the prompt
if not urls and modelCall.messages:
prompt = modelCall.messages[0]["content"] if modelCall.messages else ""
urls = self._extractUrlsFromPrompt(prompt)
if not urls:
return AiModelResponse(
content="No URLs provided for crawling",
success=False,
error="No URLs found in options or prompt"
)
raw_results = await self._crawl( raw_results = await self._crawl(
urls, urls,
extract_depth=options.get("extract_depth"), extract_depth=options.get("extract_depth"),

View file

@ -3,10 +3,9 @@ Unified modules.datamodels package.
Usage examples: Usage examples:
from modules.datamodels import ai from modules.datamodels import ai
from modules.datamodels import web from modules.datamodels import uam
""" """
from . import datamodelAi as ai from . import datamodelAi as ai
from . import datamodelWeb as web
from . import datamodelUam as uam from . import datamodelUam as uam
from . import datamodelSecurity as security from . import datamodelSecurity as security
from . import datamodelNeutralizer as neutralizer from . import datamodelNeutralizer as neutralizer

View file

@ -1,4 +1,4 @@
from typing import Optional, List, Dict, Any, Literal, Callable, TYPE_CHECKING from typing import Optional, List, Dict, Any, Literal, Callable, TYPE_CHECKING, Tuple
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from enum import Enum from enum import Enum
@ -7,15 +7,54 @@ if TYPE_CHECKING:
# Operation Types # Operation Types
class OperationTypeEnum(str, Enum): class OperationTypeEnum(str, Enum):
GENERAL = "general"
# Planning Operation
PLAN = "plan" PLAN = "plan"
ANALYSE = "analyse"
GENERATE = "generate" # Data Operations
EXTRACT = "extract" DATA_ANALYSE = "dataAnalyse"
WEB_RESEARCH = "webResearch" DATA_GENERATE = "dataGenerate"
DATA_EXTRACT = "dataExtract"
# Image Operations
IMAGE_ANALYSE = "imageAnalyse" IMAGE_ANALYSE = "imageAnalyse"
IMAGE_GENERATE = "imageGenerate" IMAGE_GENERATE = "imageGenerate"
# Web Operations
WEB_SEARCH = "webSearch" # Returns list of URLs only
WEB_CRAWL = "webCrawl" # Returns content from given URLs
WEB_RESEARCH = "webResearch" # WEB_SEARCH + WEB_CRAWL combined (scrape function)
WEB_QUESTIONS = "webQuestions" # Question-answering web research
WEB_NEWS = "webNews" # News-specific web research
# Operation Type Rating - Helper class for capability ratings
class OperationTypeRating(BaseModel):
"""Represents an operation type with its capability rating (1-10)."""
operationType: OperationTypeEnum = Field(description="The operation type")
rating: int = Field(ge=1, le=10, description="Capability rating (1-10, higher = better for this operation type)")
def __str__(self) -> str:
return f"{self.operationType.value}({self.rating})"
def __repr__(self) -> str:
return f"OperationTypeRating({self.operationType.value}, {self.rating})"
# Helper function to create operation type ratings easily
def createOperationTypeRatings(*ratings: Tuple[OperationTypeEnum, int]) -> List[OperationTypeRating]:
"""
Helper function to create operation type ratings easily.
Usage:
operationTypes = createOperationTypeRatings(
(OperationTypeEnum.DATA_ANALYSE, 8),
(OperationTypeEnum.WEB_RESEARCH, 10),
(OperationTypeEnum.WEB_NEWS, 7)
)
"""
return [OperationTypeRating(operationType=ot, rating=rating) for ot, rating in ratings]
# Processing Modes # Processing Modes
class ProcessingModeEnum(str, Enum): class ProcessingModeEnum(str, Enum):
@ -31,47 +70,21 @@ class PriorityEnum(str, Enum):
BALANCED = "balanced" BALANCED = "balanced"
# Model Capabilities Enumeration # Model Capabilities - REMOVED: Not used in business logic
class ModelCapabilitiesEnum(str, Enum):
# Text generation capabilities
TEXT_GENERATION = "text_generation"
CHAT = "chat"
REASONING = "reasoning"
ANALYSIS = "analysis"
# Image capabilities
IMAGE_ANALYSE = "imageAnalyse"
IMAGE_GENERATE = "imageGenerate"
VISION = "vision"
MULTIMODAL = "multimodal"
ART = "art"
VISUAL_CREATION = "visual_creation"
# Web capabilities
WEB_SEARCH = "web_search"
WEB_CRAWLING = "web_crawling"
CONTENT_EXTRACTION = "content_extraction"
TEXT_EXTRACTION = "text_extraction"
INFORMATION_RETRIEVAL = "information_retrieval"
URL_DISCOVERY = "url_discovery"
MAPPING = "mapping"
# Research capabilities
RESEARCH = "research"
QUESTION_ANSWERING = "question_answering"
INFORMATION_GATHERING = "information_gathering"
NEWS = "news"
CURRENT_EVENTS = "current_events"
class AiModel(BaseModel): class AiModel(BaseModel):
"""Enhanced AI model definition with dynamic capabilities.""" """Enhanced AI model definition with dynamic capabilities."""
# Core identification # Core identification
name: str = Field(description="Unique model identifier") name: str = Field(description="Actual LLM model name used for API calls")
displayName: str = Field(description="Human-readable model name") displayName: str = Field(description="Human-readable model name with module prefix")
connectorType: str = Field(description="Type of connector (openai, anthropic, perplexity, tavily, etc.)") connectorType: str = Field(description="Type of connector (openai, anthropic, perplexity, tavily, etc.)")
# API configuration
apiUrl: str = Field(description="API endpoint URL for this model")
temperature: float = Field(default=0.2, ge=0.0, le=2.0, description="Default temperature for this model")
# Token and context limits # Token and context limits
maxTokens: int = Field(description="Maximum tokens this model can generate") maxTokens: int = Field(description="Maximum tokens this model can generate")
contextLength: int = Field(description="Maximum context length this model can handle") contextLength: int = Field(description="Maximum context length this model can handle")
@ -88,11 +101,10 @@ class AiModel(BaseModel):
functionCall: Optional[Callable] = Field(default=None, exclude=True, description="Function to call for this model") functionCall: Optional[Callable] = Field(default=None, exclude=True, description="Function to call for this model")
calculatePriceUsd: Optional[Callable] = Field(default=None, exclude=True, description="Function to calculate price in USD") calculatePriceUsd: Optional[Callable] = Field(default=None, exclude=True, description="Function to calculate price in USD")
# Selection criteria # Selection criteria - capabilities with ratings
capabilities: List[ModelCapabilitiesEnum] = Field(description="List of model capabilities. See ModelCapabilitiesEnum enum for available values.")
priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Default priority for this model. See PriorityEnum for available values.") priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Default priority for this model. See PriorityEnum for available values.")
processingMode: ProcessingModeEnum = Field(default=ProcessingModeEnum.BASIC, description="Default processing mode. See ProcessingModeEnum for available values.") processingMode: ProcessingModeEnum = Field(default=ProcessingModeEnum.BASIC, description="Default processing mode. See ProcessingModeEnum for available values.")
operationTypes: List[OperationTypeEnum] = Field(default=[], description="Operation types this model should avoid") operationTypes: List[OperationTypeRating] = Field(default=[], description="Operation types this model can handle with capability ratings (1-10)")
minContextLength: Optional[int] = Field(default=None, description="Minimum context length required") minContextLength: Optional[int] = Field(default=None, description="Minimum context length required")
isAvailable: bool = Field(default=True, description="Whether model is currently available") isAvailable: bool = Field(default=True, description="Whether model is currently available")
@ -111,7 +123,6 @@ class SelectionRule(BaseModel):
weight: float = Field(description="Weight for scoring (higher = more important)") weight: float = Field(description="Weight for scoring (higher = more important)")
operationTypes: List[OperationTypeEnum] = Field(description="Operation types this rule applies to") operationTypes: List[OperationTypeEnum] = Field(description="Operation types this rule applies to")
priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Priority level for this rule") priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Priority level for this rule")
capabilities: List[ModelCapabilitiesEnum] = Field(default=[], description="Required capabilities for this rule")
minQualityRating: Optional[int] = Field(default=None, description="Minimum quality rating") minQualityRating: Optional[int] = Field(default=None, description="Minimum quality rating")
maxCost: Optional[float] = Field(default=None, description="Maximum cost threshold") maxCost: Optional[float] = Field(default=None, description="Maximum cost threshold")
minContextLength: Optional[int] = Field(default=None, description="Minimum context length required") minContextLength: Optional[int] = Field(default=None, description="Minimum context length required")
@ -119,7 +130,7 @@ class SelectionRule(BaseModel):
class AiCallOptions(BaseModel): class AiCallOptions(BaseModel):
"""Options for centralized AI processing with clear operation types and tags.""" """Options for centralized AI processing with clear operation types and tags."""
operationType: OperationTypeEnum = Field(default=OperationTypeEnum.GENERAL, description="Type of operation") operationType: OperationTypeEnum = Field(default=OperationTypeEnum.DATA_ANALYSE, description="Type of operation")
priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Priority level") priority: PriorityEnum = Field(default=PriorityEnum.BALANCED, description="Priority level")
compressPrompt: bool = Field(default=True, description="Whether to compress the prompt") compressPrompt: bool = Field(default=True, description="Whether to compress the prompt")
compressContext: bool = Field(default=True, description="If False: process each chunk; If True: summarize and work on summary") compressContext: bool = Field(default=True, description="If False: process each chunk; If True: summarize and work on summary")
@ -131,7 +142,6 @@ class AiCallOptions(BaseModel):
resultFormat: Optional[str] = Field(default=None, description="Expected result format: txt, json, csv, xml, etc.") resultFormat: Optional[str] = Field(default=None, description="Expected result format: txt, json, csv, xml, etc.")
safetyMargin: float = Field(default=0.1, ge=0.0, le=0.5, description="Safety margin for token limits (0.0-0.5)") safetyMargin: float = Field(default=0.1, ge=0.0, le=0.5, description="Safety margin for token limits (0.0-0.5)")
capabilities: Optional[List[ModelCapabilitiesEnum]] = Field(default=None, description="Required model capabilities for filtering")
# Model generation parameters # Model generation parameters
temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0, description="Temperature for response generation (0.0-2.0, lower = more consistent)") temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0, description="Temperature for response generation (0.0-2.0, lower = more consistent)")

View file

@ -537,9 +537,21 @@ class AiObjects:
# Start timing # Start timing
startTime = time.time() startTime = time.time()
# Call the model's function directly # Create standardized call object for image analysis
modelCall = AiModelCall(
messages=[{"role": "user", "content": prompt}],
model=model,
options={"imageData": imageData, "mimeType": mimeType}
)
# Call the model with standardized interface
if model.functionCall: if model.functionCall:
content = await model.functionCall(prompt, imageData, mimeType) modelResponse = await model.functionCall(modelCall)
# Extract content from standardized response
if not modelResponse.success:
raise ValueError(f"Model call failed: {modelResponse.error}")
content = modelResponse.content
else: else:
raise ValueError(f"Model {model.name} has no function call defined") raise ValueError(f"Model {model.name} has no function call defined")
@ -586,10 +598,21 @@ class AiObjects:
# Start timing # Start timing
startTime = time.time() startTime = time.time()
# Call the model's function directly # Create standardized call object for image generation
modelCall = AiModelCall(
messages=[{"role": "user", "content": prompt}],
model=selectedModel,
options={"size": size, "quality": quality, "style": style}
)
# Call the model with standardized interface
if selectedModel.functionCall: if selectedModel.functionCall:
result = await selectedModel.functionCall(prompt, size, quality, style) modelResponse = await selectedModel.functionCall(modelCall)
content = str(result)
# Extract content from standardized response
if not modelResponse.success:
raise ValueError(f"Model call failed: {modelResponse.error}")
content = modelResponse.content
else: else:
raise ValueError(f"Model {modelName} has no function call defined") raise ValueError(f"Model {modelName} has no function call defined")
@ -1061,10 +1084,6 @@ Format your response in a clear, professional manner that would be helpful for s
raise ValueError(f"Model {modelName} not found") raise ValueError(f"Model {modelName} not found")
return model.dict() return model.dict()
async def getModelsByCapability(self, capability: str) -> List[str]:
"""Get model names that support a specific capability."""
models = modelRegistry.getModelsByCapability(capability)
return [model.name for model in models]
async def getModelsByTag(self, tag: str) -> List[str]: async def getModelsByTag(self, tag: str) -> List[str]:
"""Get model names that have a specific tag.""" """Get model names that have a specific tag."""

View file

@ -337,7 +337,7 @@ class SubDocumentGeneration:
# Prepare the AI call # Prepare the AI call
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
requestOptions = AiCallOptions() requestOptions = AiCallOptions()
requestOptions.operationType = OperationTypeEnum.GENERAL requestOptions.operationType = OperationTypeEnum.DATA_GENERATE
# Create context with the extracted JSON content # Create context with the extracted JSON content
context = f"Extracted JSON content:\n{json.dumps(docData, indent=2)}" context = f"Extracted JSON content:\n{json.dumps(docData, indent=2)}"
@ -485,7 +485,7 @@ Return only the JSON response.
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_GENERATE
request = AiCallRequest(prompt=analysis_prompt, context="", options=request_options) request = AiCallRequest(prompt=analysis_prompt, context="", options=request_options)
response = await ai_service.aiObjects.call(request) response = await ai_service.aiObjects.call(request)

View file

@ -71,7 +71,7 @@ class SubDocumentProcessing:
# Build extraction options WITHOUT chunking parameters # Build extraction options WITHOUT chunking parameters
extractionOptions: Dict[str, Any] = { extractionOptions: Dict[str, Any] = {
"prompt": prompt, "prompt": prompt,
"operationType": options.operationType if options else "general", "operationType": options.operationType if options else OperationTypeEnum.DATA_EXTRACT,
"processDocumentsIndividually": True, "processDocumentsIndividually": True,
# REMOVED: maxSize, textChunkSize, imageChunkSize # REMOVED: maxSize, textChunkSize, imageChunkSize
"mergeStrategy": { "mergeStrategy": {
@ -123,7 +123,7 @@ class SubDocumentProcessing:
# Build extraction options WITHOUT chunking parameters # Build extraction options WITHOUT chunking parameters
extractionOptions: Dict[str, Any] = { extractionOptions: Dict[str, Any] = {
"prompt": prompt, "prompt": prompt,
"operationType": options.operationType if options else "general", "operationType": options.operationType if options else OperationTypeEnum.DATA_EXTRACT,
"processDocumentsIndividually": True, "processDocumentsIndividually": True,
"mergeStrategy": { "mergeStrategy": {
"useIntelligentMerging": True, "useIntelligentMerging": True,
@ -211,7 +211,7 @@ class SubDocumentProcessing:
# Build extraction options for chunking with intelligent merging # Build extraction options for chunking with intelligent merging
extractionOptions: Dict[str, Any] = { extractionOptions: Dict[str, Any] = {
"prompt": custom_prompt, # Use the custom prompt instead of default "prompt": custom_prompt, # Use the custom prompt instead of default
"operationType": options.operationType if options else "general", "operationType": options.operationType if options else OperationTypeEnum.DATA_EXTRACT,
"processDocumentsIndividually": True, # Process each document separately "processDocumentsIndividually": True, # Process each document separately
"maxSize": model_capabilities["maxContextBytes"], "maxSize": model_capabilities["maxContextBytes"],
"chunkAllowed": True, "chunkAllowed": True,
@ -766,7 +766,7 @@ CONTINUATION INSTRUCTIONS:
elif part.mimeType and part.data and len(part.data.strip()) > 0: elif part.mimeType and part.data and len(part.data.strip()) > 0:
# Process any document container as text content # Process any document container as text content
request_options = options if options is not None else AiCallOptions() request_options = options if options is not None else AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_EXTRACT
self.services.utils.debugLogToFile(f"EXTRACTION CONTAINER CHUNK {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}", "AI_SERVICE") self.services.utils.debugLogToFile(f"EXTRACTION CONTAINER CHUNK {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}", "AI_SERVICE")
logger.info(f"Chunk {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}") logger.info(f"Chunk {chunk_index}: Processing {part.mimeType} container as text with generate_json={generate_json}")
@ -855,7 +855,7 @@ CONTINUATION INSTRUCTIONS:
# Ensure options is not None and set correct operation type for text # Ensure options is not None and set correct operation type for text
request_options = options if options is not None else AiCallOptions() request_options = options if options is not None else AiCallOptions()
# FIXED: Set operation type to general for text processing # FIXED: Set operation type to general for text processing
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_EXTRACT
self.services.utils.debugLogToFile(f"EXTRACTION CHUNK {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}", "AI_SERVICE") self.services.utils.debugLogToFile(f"EXTRACTION CHUNK {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}", "AI_SERVICE")
logger.info(f"Chunk {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}") logger.info(f"Chunk {chunk_index}: Calling aiObjects.call with operationType={request_options.operationType}, generate_json={generate_json}")

View file

@ -326,7 +326,7 @@ class BaseRenderer(ABC):
try: try:
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_GENERATE
request = AiCallRequest(prompt=style_template, context="", options=request_options) request = AiCallRequest(prompt=style_template, context="", options=request_options)

View file

@ -179,7 +179,7 @@ Return only the compressed prompt, no explanations.
request = AiCallRequest( request = AiCallRequest(
prompt=compression_prompt, prompt=compression_prompt,
options=AiCallOptions( options=AiCallOptions(
operationType=OperationTypeEnum.GENERAL, operationType=OperationTypeEnum.DATA_GENERATE,
maxTokens=None, # Let the model use its full context length maxTokens=None, # Let the model use its full context length
temperature=0.3 # Lower temperature for more consistent compression temperature=0.3 # Lower temperature for more consistent compression
) )

View file

@ -160,7 +160,7 @@ class RendererPdf(BaseRenderer):
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_GENERATE
request = AiCallRequest(prompt=style_template, context="", options=request_options) request = AiCallRequest(prompt=style_template, context="", options=request_options)

View file

@ -360,7 +360,7 @@ JSON ONLY. NO OTHER TEXT."""
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_GENERATE
request = AiCallRequest(prompt=style_template, context="", options=request_options) request = AiCallRequest(prompt=style_template, context="", options=request_options)

View file

@ -277,7 +277,7 @@ class RendererXlsx(BaseRenderer):
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_GENERATE
request = AiCallRequest(prompt=style_template, context="", options=request_options) request = AiCallRequest(prompt=style_template, context="", options=request_options)
response = await ai_service.aiObjects.call(request) response = await ai_service.aiObjects.call(request)

View file

@ -381,7 +381,7 @@ Extract the main intent and requirements for document processing. Focus on:
Respond with a clear, concise statement of the extraction intent. Respond with a clear, concise statement of the extraction intent.
""" """
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_GENERATE
request = AiCallRequest(prompt=analysis_prompt, context="", options=request_options) request = AiCallRequest(prompt=analysis_prompt, context="", options=request_options)
response = await aiService.aiObjects.call(request) response = await aiService.aiObjects.call(request)

View file

@ -207,4 +207,27 @@ class UtilsService:
return jsonUtils.parseJsonOrRaise(text) return jsonUtils.parseJsonOrRaise(text)
def jsonMergeRootLists(self, parts): def jsonMergeRootLists(self, parts):
return jsonUtils.mergeRootLists(parts) return jsonUtils.mergeRootLists(parts)
# ===== Enum utility functions =====
def mapToEnum(self, enum_class, value_str, default_value):
"""
Generic function to map string value to enum, using the enum value as key.
Args:
enum_class: The enum class to map to
value_str: String value to map
default_value: Default enum value if no match found
Returns:
Matching enum value or default value
"""
if not value_str:
return default_value
# Try to find enum by its value (case-insensitive)
for enum_item in enum_class:
if enum_item.value.lower() == value_str.lower():
return enum_item
# Fallback to default
return default_value

View file

@ -10,7 +10,7 @@ from datetime import datetime, UTC
from modules.workflows.methods.methodBase import MethodBase, action from modules.workflows.methods.methodBase import MethodBase, action
from modules.datamodels.datamodelChat import ActionResult from modules.datamodels.datamodelChat import ActionResult
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum, ModelCapabilitiesEnum from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.datamodels.datamodelChat import ChatDocument from modules.datamodels.datamodelChat import ChatDocument
from modules.aicore.aicorePluginTavily import WebResearchRequest from modules.aicore.aicorePluginTavily import WebResearchRequest
@ -34,19 +34,16 @@ class MethodAi(MethodBase):
GENERAL: GENERAL:
- Purpose: Process a user prompt with optional unlimited input documents to produce one or many output documents of the SAME format. - Purpose: Process a user prompt with optional unlimited input documents to produce one or many output documents of the SAME format.
- Input requirements: aiPrompt (required); optional documentList. - Input requirements: aiPrompt (required); optional documentList.
- Output format: Exactly one file format to select. For multiple output file formats to do different calls. - Output format: Exactly one file format to select. For multiple output file formats you need to do different calls.
Parameters: Parameters:
- aiPrompt (str, required): Instruction for the AI. - aiPrompt (str, required): Instruction for the AI.
- documentList (list, optional): Document reference(s) for context. - documentList (list, optional): Document reference(s) for context.
- resultType (str, optional): Output file extension - only one extension allowed (e.g. txt, json, md, csv, xml, html, pdf, docx, xlsx, png, ...). Default: txt. - resultType (str, optional): Output file extension - only one extension allowed (e.g. txt, json, md, csv, xml, html, pdf, docx, xlsx, png, ...). Default: txt.
- processingMode (str, optional): basic | advanced | detailed. Default: basic. - processingMode (str, optional): basic | advanced | detailed. Default: basic.
- includeMetadata (bool, optional): Include metadata when available. Default: True.
- operationType (str, optional): general | plan | analyse | generate | webResearch | imageAnalyse | imageGenerate. Default: general.
- priority (str, optional): speed | quality | cost | balanced. Default: balanced. - priority (str, optional): speed | quality | cost | balanced. Default: balanced.
- maxCost (float, optional): Cost limit. - maxCost (float, optional): Cost limit.
- maxProcessingTime (int, optional): Time limit in seconds. - maxProcessingTime (int, optional): Time limit in seconds.
- operationTypes (list, optional): Capability tags (e.g., text, chat, reasoning, analysis, image, vision, web, search).
""" """
try: try:
# Init progress logger # Init progress logger
@ -76,54 +73,24 @@ class MethodAi(MethodBase):
documentList = [documentList] documentList = [documentList]
resultType = parameters.get("resultType", "txt") resultType = parameters.get("resultType", "txt")
processingModeStr = parameters.get("processingMode", "basic") processingModeStr = parameters.get("processingMode", "basic")
includeMetadata = parameters.get("includeMetadata", True)
operationTypeStr = parameters.get("operationType", "general")
priorityStr = parameters.get("priority", "balanced") priorityStr = parameters.get("priority", "balanced")
maxCost = parameters.get("maxCost") maxCost = parameters.get("maxCost")
maxProcessingTime = parameters.get("maxProcessingTime") maxProcessingTime = parameters.get("maxProcessingTime")
operationTypes = parameters.get("operationTypes")
requiredTags = parameters.get("requiredTags", [])
# Map string parameters to enums # Dynamic operation type selection based on document presence
operationTypeMapping = { if documentList and len(documentList) > 0:
"general": OperationTypeEnum.GENERAL, # With documents: default to dataExtract (document intelligence)
"plan": OperationTypeEnum.PLAN, operationType = OperationTypeEnum.DATA_EXTRACT
"analyse": OperationTypeEnum.ANALYSE, logger.info(f"action.ai.processAuto-selected operationType EXTRACT (document intelligence mode - {len(documentList)} documents)")
"generate": OperationTypeEnum.GENERATE, else:
"webResearch": OperationTypeEnum.WEB_RESEARCH, # Without documents: default to dataGenerate (content generation)
"imageAnalyse": OperationTypeEnum.IMAGE_ANALYSE, operationType = OperationTypeEnum.DATA_GENERATE
"imageGenerate": OperationTypeEnum.IMAGE_GENERATE logger.info(f"action.ai.process Auto-selected operationType GENERATE (content generation mode - no documents)")
}
operationType = operationTypeMapping.get(operationTypeStr, OperationTypeEnum.GENERAL)
priorityMapping = { # Map string parameters to enums using centralized utility function
"speed": PriorityEnum.SPEED, priority = self.services.utils.mapToEnum(PriorityEnum, priorityStr, PriorityEnum.BALANCED)
"quality": PriorityEnum.QUALITY, processingMode = self.services.utils.mapToEnum(ProcessingModeEnum, processingModeStr, ProcessingModeEnum.BASIC)
"cost": PriorityEnum.COST,
"balanced": PriorityEnum.BALANCED
}
priority = priorityMapping.get(priorityStr, PriorityEnum.BALANCED)
processingModeMapping = {
"basic": ProcessingModeEnum.BASIC,
"advanced": ProcessingModeEnum.ADVANCED,
"detailed": ProcessingModeEnum.DETAILED
}
processingMode = processingModeMapping.get(processingModeStr, ProcessingModeEnum.BASIC)
# Map requiredTags from strings to ModelCapabilitiesEnum
if requiredTags and isinstance(requiredTags, list):
tagMapping = {
"text": ModelCapabilitiesEnum.TEXT_GENERATION,
"chat": ModelCapabilitiesEnum.CHAT,
"reasoning": ModelCapabilitiesEnum.REASONING,
"analysis": ModelCapabilitiesEnum.ANALYSIS,
"image": ModelCapabilitiesEnum.VISION,
"vision": ModelCapabilitiesEnum.VISION,
"web": ModelCapabilitiesEnum.WEB_SEARCH,
"search": ModelCapabilitiesEnum.WEB_SEARCH
}
requiredTags = [tagMapping.get(tag, tag) for tag in requiredTags if isinstance(tag, str)]
if not aiPrompt: if not aiPrompt:
logger.error(f"aiPrompt is missing or empty. Parameters: {parameters}") logger.error(f"aiPrompt is missing or empty. Parameters: {parameters}")
@ -162,7 +129,6 @@ class MethodAi(MethodBase):
resultFormat=output_format, resultFormat=output_format,
maxCost=maxCost, maxCost=maxCost,
maxProcessingTime=maxProcessingTime, maxProcessingTime=maxProcessingTime,
capabilities=requiredTags if requiredTags else None
) )
# Update progress - calling AI # Update progress - calling AI
@ -237,9 +203,8 @@ class MethodAi(MethodBase):
- urls (list, optional): Specific URLs to crawl. - urls (list, optional): Specific URLs to crawl.
- max_results (int, optional): Max search results. Default: 5. - max_results (int, optional): Max search results. Default: 5.
- max_pages (int, optional): Max pages to crawl per site. Default: 5. - max_pages (int, optional): Max pages to crawl per site. Default: 5.
- search_depth (str, optional): basic | advanced. Default: basic.
- extract_depth (str, optional): basic | advanced. Default: advanced. - extract_depth (str, optional): basic | advanced. Default: advanced.
- pages_search_depth (int, optional): Crawl depth level. Default: 2. - search_depth (int, optional): Crawl depth level - how many times to follow sublinks of a page. Default: 2.
- country (str, optional): Full English country name (ISO-3166; map codes via pycountry/i18n-iso-countries). - country (str, optional): Full English country name (ISO-3166; map codes via pycountry/i18n-iso-countries).
- time_range (str, optional): d | w | m | y. - time_range (str, optional): d | w | m | y.
- topic (str, optional): general | news | academic. - topic (str, optional): general | news | academic.
@ -250,9 +215,8 @@ class MethodAi(MethodBase):
urls = parameters.get("urls") urls = parameters.get("urls")
max_results = parameters.get("max_results", 5) max_results = parameters.get("max_results", 5)
max_pages = parameters.get("max_pages", 5) max_pages = parameters.get("max_pages", 5)
search_depth = parameters.get("search_depth", "basic")
extract_depth = parameters.get("extract_depth", "advanced") extract_depth = parameters.get("extract_depth", "advanced")
pages_search_depth = parameters.get("pages_search_depth", 2) search_depth = parameters.get("pages_search_depth", 2)
country = parameters.get("country") country = parameters.get("country")
time_range = parameters.get("time_range") time_range = parameters.get("time_range")
topic = parameters.get("topic") topic = parameters.get("topic")
@ -316,100 +280,3 @@ class MethodAi(MethodBase):
error=str(e) error=str(e)
) )
def _mergeDataChunks(self, chunks: List[str], resultType: str, mimeType: str) -> str:
"""Intelligently merge data chunks using strategies based on content type"""
try:
if resultType == "json":
return self._mergeJsonChunks(chunks)
elif resultType in ["csv", "table"]:
return self._mergeTableChunks(chunks)
elif resultType in ["txt", "md", "text"]:
return self._mergeTextChunks(chunks)
else:
# Default: simple concatenation
return "\n".join(str(chunk) for chunk in chunks)
except Exception as e:
logger.warning(f"Failed to merge chunks intelligently: {str(e)}, using simple concatenation")
return "\n".join(str(chunk) for chunk in chunks)
def _mergeJsonChunks(self, chunks: List[str]) -> str:
"""Merge JSON chunks intelligently"""
import json
merged_data = []
for i, chunk in enumerate(chunks):
try:
if isinstance(chunk, str):
chunk_data = json.loads(chunk)
else:
chunk_data = chunk
if isinstance(chunk_data, list):
merged_data.extend(chunk_data)
elif isinstance(chunk_data, dict):
# For objects, merge by combining keys
if not merged_data:
merged_data = chunk_data
else:
if isinstance(merged_data, dict):
merged_data.update(chunk_data)
else:
merged_data.append(chunk_data)
else:
merged_data.append(chunk_data)
except Exception as e:
logger.warning(f"Failed to parse chunk {i}: {str(e)}")
# Add as string if JSON parsing fails
merged_data.append(str(chunk))
return json.dumps(merged_data, indent=2)
def _mergeTableChunks(self, chunks: List[str]) -> str:
"""Merge table chunks (CSV) intelligently"""
import csv
import io
merged_rows = []
headers = None
for i, chunk in enumerate(chunks):
try:
# Parse CSV chunk
reader = csv.reader(io.StringIO(str(chunk)))
rows = list(reader)
if not rows:
continue
# First chunk: capture headers
if i == 0:
headers = rows[0] if rows else []
merged_rows.extend(rows)
else:
# Subsequent chunks: skip header if it matches
if rows and rows[0] == headers:
merged_rows.extend(rows[1:]) # Skip duplicate header
else:
merged_rows.extend(rows)
except Exception as e:
logger.warning(f"Failed to parse table chunk {i}: {str(e)}")
# Add as raw text if CSV parsing fails
merged_rows.append([f"Raw chunk {i}: {str(chunk)[:100]}..."])
# Convert back to CSV
output = io.StringIO()
writer = csv.writer(output)
writer.writerows(merged_rows)
return output.getvalue()
def _mergeTextChunks(self, chunks: List[str]) -> str:
"""Merge text chunks intelligently"""
# Simple concatenation with proper spacing
merged = []
for chunk in chunks:
chunk_str = str(chunk).strip()
if chunk_str:
merged.append(chunk_str)
return "\n\n".join(merged) # Double newline between chunks for readability

View file

@ -118,7 +118,7 @@ DELIVERED CONTENT TO CHECK:
# Call AI service for validation # Call AI service for validation
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_ANALYSE
response = await self.services.ai.callAiPlanning( response = await self.services.ai.callAiPlanning(
prompt=validationPrompt, prompt=validationPrompt,

View file

@ -61,7 +61,7 @@ CRITICAL: Respond with ONLY the JSON object below. Do not include any explanator
# Call AI service for analysis # Call AI service for analysis
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
request_options = AiCallOptions() request_options = AiCallOptions()
request_options.operationType = OperationTypeEnum.GENERAL request_options.operationType = OperationTypeEnum.DATA_ANALYSE
response = await self.services.ai.callAiPlanning( response = await self.services.ai.callAiPlanning(
prompt=analysisPrompt, prompt=analysisPrompt,

View file

@ -457,7 +457,7 @@ class ActionplanMode(BaseMode):
# Centralized AI call: Result validation (balanced analysis) with placeholders # Centralized AI call: Result validation (balanced analysis) with placeholders
options = AiCallOptions( options = AiCallOptions(
operationType=OperationTypeEnum.ANALYSE, operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
compressPrompt=True, compressPrompt=True,
compressContext=False, compressContext=False,

View file

@ -296,7 +296,7 @@ class ReactMode(BaseMode):
# Centralized AI call for parameter suggestion (balanced analysis) # Centralized AI call for parameter suggestion (balanced analysis)
options = AiCallOptions( options = AiCallOptions(
operationType=OperationTypeEnum.ANALYSE, operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
compressPrompt=True, compressPrompt=True,
compressContext=False, compressContext=False,
@ -611,7 +611,7 @@ class ReactMode(BaseMode):
# Centralized AI call for refinement decision (balanced analysis) # Centralized AI call for refinement decision (balanced analysis)
options = AiCallOptions( options = AiCallOptions(
operationType=OperationTypeEnum.ANALYSE, operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
compressPrompt=True, compressPrompt=True,
compressContext=False, compressContext=False,
@ -718,7 +718,7 @@ Return only the user-friendly message, no technical details."""
prompt=prompt, prompt=prompt,
placeholders=None, placeholders=None,
options=AiCallOptions( options=AiCallOptions(
operationType=OperationTypeEnum.GENERATE, operationType=OperationTypeEnum.DATA_GENERATE,
priority=PriorityEnum.SPEED, priority=PriorityEnum.SPEED,
compressPrompt=True, compressPrompt=True,
maxCost=0.01, maxCost=0.01,
@ -759,7 +759,7 @@ Return only the user-friendly message, no technical details."""
prompt=prompt, prompt=prompt,
placeholders=None, placeholders=None,
options=AiCallOptions( options=AiCallOptions(
operationType=OperationTypeEnum.GENERATE, operationType=OperationTypeEnum.DATA_GENERATE,
priority=PriorityEnum.SPEED, priority=PriorityEnum.SPEED,
compressPrompt=True, compressPrompt=True,
maxCost=0.01, maxCost=0.01,

View file

@ -1,16 +1,16 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
AI Model Selection Test - Prints prioritized fallback model lists used for AI calls AI Model Selection Test - Prints prioritized fallback model lists for all interface calls
Scenarios mirror typical calls in workflows/ (task planning, action planning, Tests all main interface methods in interfaceAiObjects.py and shows which models
analysis, and react-mode decisions), showing which models are shortlisted and are selected for each type of AI operation (text generation, image analysis,
their final prioritized order after rating and cost tie-breaking. image generation, web research, etc.).
""" """
import asyncio import asyncio
import os import os
import sys import sys
from typing import List, Tuple import base64
# Ensure gateway is on path when running directly # Ensure gateway is on path when running directly
@ -19,6 +19,8 @@ sys.path.append(os.path.dirname(__file__))
from modules.features.chatPlayground.mainChatPlayground import getServices from modules.features.chatPlayground.mainChatPlayground import getServices
from modules.datamodels.datamodelAi import ( from modules.datamodels.datamodelAi import (
AiCallOptions, AiCallOptions,
AiCallRequest,
AiModelCall,
OperationTypeEnum, OperationTypeEnum,
PriorityEnum, PriorityEnum,
ProcessingModeEnum, ProcessingModeEnum,
@ -42,8 +44,10 @@ class ModelSelectionTester:
async def initialize(self) -> None: async def initialize(self) -> None:
from modules.services.serviceAi.mainServiceAi import AiService from modules.services.serviceAi.mainServiceAi import AiService
from modules.interfaces.interfaceAiObjects import AiObjects
self.services.ai = await AiService.create(self.services) self.services.ai = await AiService.create(self.services)
self.aiObjects = await AiObjects.create()
async def _printFallbackListWithContext(self, title: str, prompt: str, context: str, options: AiCallOptions) -> None: async def _printFallbackListWithContext(self, title: str, prompt: str, context: str, options: AiCallOptions) -> None:
print(f"\n{'='*80}") print(f"\n{'='*80}")
@ -137,166 +141,355 @@ class ModelSelectionTester:
print(f" Size: {sizeRating:.3f}, ProcessingMode: {processingModeRating:.3f}, Priority: {priorityRating:.3f}") print(f" Size: {sizeRating:.3f}, ProcessingMode: {processingModeRating:.3f}, Priority: {priorityRating:.3f}")
async def run(self) -> None: async def run(self) -> None:
# Scenarios reflecting workflows/ """Test model selection for all interface methods."""
scenarios: List[Tuple[str, str, AiCallOptions]] = [] print("=" * 100)
print("AI INTERFACE MODEL SELECTION TEST")
print("=" * 100)
print("Testing model selection for all interface methods in interfaceAiObjects.py")
print("=" * 100)
# Task planning (taskPlanner, modeActionplan) # Test 1: Text Generation (call method)
scenarios.append( await self._testTextGeneration()
(
"PLAN - Quality, Detailed",
"Task planning for a multi-step business workflow.",
AiCallOptions(
operationType=OperationTypeEnum.PLAN,
priority=PriorityEnum.QUALITY,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=30,
),
)
)
# Result validation / analysis (modeActionplan) # Test 2: Image Analysis (callImage method)
scenarios.append( await self._testImageAnalysis()
(
"ANALYSE - Balanced, Advanced",
"Validate action plan correctness and completeness.",
AiCallOptions(
operationType=OperationTypeEnum.ANALYSE,
priority=PriorityEnum.BALANCED,
compressPrompt=True,
compressContext=False,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
maxProcessingTime=30,
),
)
)
# React mode - action selection (modeReact) # Test 3: Image Generation (generateImage method)
scenarios.append( await self._testImageGeneration()
(
"GENERAL - Balanced, Advanced (React: action selection)",
"Select next best action from context and state.",
AiCallOptions(
operationType=OperationTypeEnum.GENERAL,
priority=PriorityEnum.BALANCED,
compressPrompt=True,
compressContext=True,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.03,
maxProcessingTime=20,
),
)
)
# React mode - parameter suggestion (modeReact example) # Test 4: Web Search (searchWebsites method)
scenarios.append( await self._testWebSearch()
(
"ANALYSE - Balanced, Advanced (React: parameter suggestion)",
"Suggest parameters for the selected action as JSON.",
AiCallOptions(
operationType=OperationTypeEnum.ANALYSE,
priority=PriorityEnum.BALANCED,
compressPrompt=True,
compressContext=False,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
maxProcessingTime=30,
resultFormat="json",
temperature=0.3,
),
)
)
# Intent analysis (user input understanding) # Test 5: Web Crawling (crawlWebsites method)
scenarios.append( await self._testWebCrawling()
(
"ANALYSE - Quality, Detailed (Intent Analysis)",
"Analyze user intent and extract key requirements from the following request: 'I need to create a comprehensive marketing strategy for our new product launch including budget allocation, timeline, and target audience analysis.'",
AiCallOptions(
operationType=OperationTypeEnum.ANALYSE,
priority=PriorityEnum.QUALITY,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.08,
maxProcessingTime=45,
resultFormat="json",
temperature=0.2,
),
)
)
# Review/Validation (quality assurance) # Test 6: Web Research (webQuery method)
scenarios.append( await self._testWebResearch()
(
"ANALYSE - Quality, Detailed (Review/Validation)",
"Review and validate the following business proposal for completeness, accuracy, and compliance with industry standards. Identify any gaps or areas for improvement.",
AiCallOptions(
operationType=OperationTypeEnum.ANALYSE,
priority=PriorityEnum.QUALITY,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=60,
resultFormat="json",
temperature=0.1,
),
)
)
# Large context scenario (to test size-based scoring) # Test 7: Content Analysis with Chunking
scenarios.append( await self._testContentAnalysis()
(
"GENERAL - Balanced, Advanced (Large Context Test)",
"Process this large document and provide a comprehensive summary.",
AiCallOptions(
operationType=OperationTypeEnum.GENERAL,
priority=PriorityEnum.BALANCED,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.15,
maxProcessingTime=120,
),
)
)
# Iterate and print lists # Test 8: Website Selection
for title, prompt, options in scenarios: await self._testWebsiteSelection()
await self._printFallbackList(title, prompt, options)
# Test with actual context to see size-based scoring # Test 9: Actual Interface Calls
largeContext = """ await self._testActualInterfaceCalls()
This is a comprehensive business document containing detailed information about our company's strategic initiatives,
financial performance, market analysis, competitive landscape, operational metrics, customer feedback,
product development roadmap, technology stack, human resources, legal compliance, risk management,
sustainability efforts, and future growth plans. The document spans multiple sections including executive summary,
market research, financial statements, operational reports, customer insights, product specifications,
technology architecture, HR policies, legal frameworks, risk assessments, environmental impact studies,
and strategic recommendations. This extensive content is designed to test the model selection algorithm's
ability to handle large context sizes and make intelligent decisions about which models are best suited
for processing such substantial amounts of information while maintaining efficiency and cost-effectiveness.
""" * 10 # Repeat to make it even larger
await self._printFallbackListWithContext( # Show model registry summary
"GENERAL - Balanced, Advanced (Large Context Test)", await self._showModelSummary()
"Analyze this comprehensive business document and provide key insights.",
largeContext, print("\n" + "=" * 100)
AiCallOptions( print("ALL INTERFACE TESTS COMPLETED")
operationType=OperationTypeEnum.GENERAL, print("=" * 100)
async def _testTextGeneration(self) -> None:
"""Test model selection for text generation calls."""
print(f"\n{'='*80}")
print("1. TEXT GENERATION (call method)")
print(f"{'='*80}")
# Test different text generation scenarios
scenarios = [
("Text Analysis", "Write a summary about artificial intelligence trends.", OperationTypeEnum.DATA_ANALYSE),
("Planning Task", "Create a project plan for software development.", OperationTypeEnum.PLAN),
("Analysis Task", "Analyze the pros and cons of cloud computing.", OperationTypeEnum.DATA_ANALYSE),
]
for title, prompt, operation_type in scenarios:
options = AiCallOptions(
operationType=operation_type,
priority=PriorityEnum.BALANCED, priority=PriorityEnum.BALANCED,
compressPrompt=False,
compressContext=False,
processingMode=ProcessingModeEnum.ADVANCED, processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.15, maxCost=0.05,
maxProcessingTime=120, maxProcessingTime=30,
), )
await self._printFallbackList(f" {title}", prompt, options)
async def _testImageAnalysis(self) -> None:
"""Test model selection for image analysis calls."""
print(f"\n{'='*80}")
print("2. IMAGE ANALYSIS (callImage method)")
print(f"{'='*80}")
# Create a small test image (1x1 pixel PNG)
test_image_data = base64.b64encode(b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\tpHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\nIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xdd\x8d\xb4\x1c\x00\x00\x00\x00IEND\xaeB`\x82').decode('utf-8')
options = AiCallOptions(
operationType=OperationTypeEnum.IMAGE_ANALYSE,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.02,
maxProcessingTime=20,
) )
prompt = "Describe what you see in this image."
await self._printFallbackList(" Image Analysis", prompt, options)
async def _testImageGeneration(self) -> None:
"""Test model selection for image generation calls."""
print(f"\n{'='*80}")
print("3. IMAGE GENERATION (generateImage method)")
print(f"{'='*80}")
options = AiCallOptions(
operationType=OperationTypeEnum.IMAGE_GENERATE,
priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=60,
)
prompt = "A futuristic cityscape with flying cars and neon lights."
await self._printFallbackList(" Image Generation", prompt, options)
async def _testWebResearch(self) -> None:
"""Test model selection for web research calls."""
print(f"\n{'='*80}")
print("6. WEB RESEARCH (webQuery method)")
print(f"{'='*80}")
options = AiCallOptions(
operationType=OperationTypeEnum.WEB_RESEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
maxProcessingTime=30,
)
prompt = "What are the latest trends in artificial intelligence?"
await self._printFallbackList(" Web Research", prompt, options)
async def _testWebSearch(self) -> None:
"""Test model selection for web search calls."""
print(f"\n{'='*80}")
print("4. WEB SEARCH (searchWebsites method)")
print(f"{'='*80}")
options = AiCallOptions(
operationType=OperationTypeEnum.WEB_SEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC,
maxCost=0.01,
maxProcessingTime=30,
)
prompt = "Search for artificial intelligence companies"
await self._printFallbackList(" Web Search", prompt, options)
async def _testWebCrawling(self) -> None:
"""Test model selection for web crawling calls."""
print(f"\n{'='*80}")
print("5. WEB CRAWLING (crawlWebsites method)")
print(f"{'='*80}")
options = AiCallOptions(
operationType=OperationTypeEnum.WEB_CRAWL,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC,
maxCost=0.02,
maxProcessingTime=60,
)
prompt = "Crawl content from these URLs"
await self._printFallbackList(" Web Crawling", prompt, options)
async def _testContentAnalysis(self) -> None:
"""Test model selection for content analysis with chunking."""
print(f"\n{'='*80}")
print("7. CONTENT ANALYSIS WITH CHUNKING")
print(f"{'='*80}")
# Test with large content to trigger chunking
large_content = {
"https://example.com/page1": "This is a large document about artificial intelligence. " * 1000,
"https://example.com/page2": "This is another large document about machine learning. " * 1000,
}
options = AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.10,
maxProcessingTime=60,
)
prompt = "Analyze this content and provide key insights."
await self._printFallbackList(" Content Analysis", prompt, options)
async def _testWebsiteSelection(self) -> None:
"""Test model selection for website selection."""
print(f"\n{'='*80}")
print("8. WEBSITE SELECTION (selectRelevantWebsites method)")
print(f"{'='*80}")
# This method uses webQuery internally, so it uses the same model selection as web research
options = AiCallOptions(
operationType=OperationTypeEnum.WEB_RESEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.03,
maxProcessingTime=20,
)
prompt = "Select the most relevant websites from this list for AI research."
await self._printFallbackList(" Website Selection", prompt, options)
async def _testActualInterfaceCalls(self) -> None:
"""Test actual interface calls to show real model selection."""
print(f"\n{'='*80}")
print("9. ACTUAL INTERFACE CALLS (Real Model Selection)")
print(f"{'='*80}")
# Test 1: Text generation call
print("\n Testing: aiObjects.call() - Text Generation")
try:
request = AiCallRequest(
prompt="Write a short summary about machine learning.",
context="",
options=AiCallOptions(
operationType=OperationTypeEnum.DATA_ANALYSE,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
maxProcessingTime=30,
)
)
# Get the model selection that would be used
availableModels = modelRegistry.getAvailableModels()
failoverModelList = modelSelector.getFailoverModelList(
prompt=request.prompt,
context=request.context,
options=request.options,
availableModels=availableModels,
)
if failoverModelList:
print(f" Selected model: {failoverModelList[0].name}")
print(f" Fallback models: {[m.name for m in failoverModelList[1:3]]}")
else:
print(" No suitable models found")
except Exception as e:
print(f" Error: {e}")
# Test 2: Image analysis call
print("\n Testing: aiObjects.callImage() - Image Analysis")
try:
options = AiCallOptions(
operationType=OperationTypeEnum.IMAGE_ANALYSE,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.02,
maxProcessingTime=20,
)
availableModels = modelRegistry.getAvailableModels()
failoverModelList = modelSelector.getFailoverModelList(
prompt="Describe this image",
context="",
options=options,
availableModels=availableModels,
)
if failoverModelList:
print(f" Selected model: {failoverModelList[0].name}")
print(f" Fallback models: {[m.name for m in failoverModelList[1:3]]}")
else:
print(" No suitable models found")
except Exception as e:
print(f" Error: {e}")
# Test 3: Image generation call
print("\n Testing: aiObjects.generateImage() - Image Generation")
try:
options = AiCallOptions(
operationType=OperationTypeEnum.IMAGE_GENERATE,
priority=PriorityEnum.QUALITY,
processingMode=ProcessingModeEnum.DETAILED,
maxCost=0.10,
maxProcessingTime=60,
)
availableModels = modelRegistry.getAvailableModels()
failoverModelList = modelSelector.getFailoverModelList(
prompt="A futuristic cityscape",
context="",
options=options,
availableModels=availableModels,
)
if failoverModelList:
print(f" Selected model: {failoverModelList[0].name}")
print(f" Fallback models: {[m.name for m in failoverModelList[1:3]]}")
else:
print(" No suitable models found")
except Exception as e:
print(f" Error: {e}")
# Test 4: Web research call
print("\n Testing: aiObjects.webQuery() - Web Research")
try:
options = AiCallOptions(
operationType=OperationTypeEnum.WEB_RESEARCH,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.ADVANCED,
maxCost=0.05,
maxProcessingTime=30,
)
availableModels = modelRegistry.getAvailableModels()
failoverModelList = modelSelector.getFailoverModelList(
prompt="What are AI trends?",
context="",
options=options,
availableModels=availableModels,
)
if failoverModelList:
print(f" Selected model: {failoverModelList[0].name}")
print(f" Fallback models: {[m.name for m in failoverModelList[1:3]]}")
else:
print(" No suitable models found")
except Exception as e:
print(f" Error: {e}")
async def _showModelSummary(self) -> None:
"""Show summary of all available models and their capabilities."""
print(f"\n{'='*80}")
print("MODEL REGISTRY SUMMARY")
print(f"{'='*80}")
availableModels = modelRegistry.getAvailableModels()
print(f"Total models available: {len(availableModels)}")
# Group by connector type
by_connector = {}
for model in availableModels:
connector_type = getattr(model, 'connectorType', 'unknown')
if connector_type not in by_connector:
by_connector[connector_type] = []
by_connector[connector_type].append(model)
print(f"\nModels by connector type:")
for connector_type, models in by_connector.items():
print(f" {connector_type}: {len(models)} models")
for model in models:
capabilities = getattr(model, 'capabilities', [])
print(f" - {model.name}: {capabilities}")
# Show operation type support
print(f"\nOperation type support:")
for op_type in OperationTypeEnum:
supported_models = [m for m in availableModels if hasattr(m, 'operationTypes') and op_type in m.operationTypes]
print(f" {op_type.name}: {len(supported_models)} models")
if supported_models:
model_names = [m.name for m in supported_models[:3]] # Show first 3 models
print(f" Models: {', '.join(model_names)}")
async def main() -> None: async def main() -> None:
tester = ModelSelectionTester() tester = ModelSelectionTester()

View file

@ -0,0 +1,97 @@
#!/usr/bin/env python3
"""
Test script to demonstrate the new operation type rating system.
This shows how models are now sorted by their capability ratings for specific operation types.
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from modules.datamodels.datamodelAi import OperationTypeEnum, createOperationTypeRatings, AiCallOptions, PriorityEnum, ProcessingModeEnum
from modules.aicore.aicorePluginPerplexity import AiPerplexity
from modules.aicore.aicorePluginTavily import AiTavily
from modules.aicore.aicoreModelSelector import ModelSelector
def testOperationTypeRatings():
"""Test the new operation type rating system."""
print("🧪 Testing Operation Type Rating System")
print("=" * 50)
# Initialize connectors
perplexity = AiPerplexity()
tavily = AiTavily()
modelSelector = ModelSelector()
# Get all models
allModels = perplexity.getModels() + tavily.getModels()
print(f"📊 Total models available: {len(allModels)}")
print()
# Test different operation types
testCases = [
(OperationTypeEnum.WEB_RESEARCH, "Web Research"),
(OperationTypeEnum.WEB_NEWS, "Web News"),
(OperationTypeEnum.WEB_QUESTIONS, "Web Questions"),
(OperationTypeEnum.WEB_SEARCH, "Web Search"),
(OperationTypeEnum.DATA_ANALYSE, "Text Analysis tasks")
]
for operationType, description in testCases:
print(f"🎯 Testing: {description} ({operationType.value})")
print("-" * 40)
# Create AI call options
options = AiCallOptions(
operationType=operationType,
priority=PriorityEnum.BALANCED,
processingMode=ProcessingModeEnum.BASIC
)
# Get failover model list (sorted by rating)
failoverModels = modelSelector.getFailoverModelList(
prompt="Test prompt",
context="Test context",
options=options,
availableModels=allModels
)
if failoverModels:
print(f"✅ Found {len(failoverModels)} suitable models:")
for i, model in enumerate(failoverModels[:5]): # Show top 5
# Get the rating for this operation type
rating = 0
for ot_rating in model.operationTypes:
if ot_rating.operationType == operationType:
rating = ot_rating.rating
break
print(f" {i+1}. {model.displayName}")
print(f" Rating: {rating}/10 | Speed: {model.speedRating}/10 | Quality: {model.qualityRating}/10")
print(f" Cost: ${model.costPer1kTokensInput:.4f}/1k tokens")
else:
print("❌ No suitable models found")
print()
# Test the helper function
print("🔧 Testing Helper Function")
print("-" * 30)
# Create operation type ratings using the helper
ratings = createOperationTypeRatings(
(OperationTypeEnum.WEB_RESEARCH, 10),
(OperationTypeEnum.WEB_NEWS, 8),
(OperationTypeEnum.DATA_ANALYSE, 6)
)
print("Created ratings:")
for rating in ratings:
print(f" {rating.operationType.value}: {rating.rating}/10")
print()
print("✅ All tests completed successfully!")
if __name__ == "__main__":
testOperationTypeRatings()