module test for dynamic centralized ai calling system rev 3 tested
This commit is contained in:
parent
89337418f6
commit
4f7bba5f33
12 changed files with 146 additions and 137 deletions
|
|
@ -139,14 +139,12 @@ class AiService:
|
||||||
async def callAiPlanning(
|
async def callAiPlanning(
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
placeholders: Optional[List[PromptPlaceholder]] = None,
|
placeholders: Optional[List[PromptPlaceholder]] = None
|
||||||
options: Optional[AiCallOptions] = None,
|
|
||||||
loopInstructionFormat: Optional[str] = None
|
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Planning AI call for task planning, action planning, action selection, etc."""
|
"""Planning AI call for task planning, action planning, action selection, etc."""
|
||||||
await self._ensureAiObjectsInitialized()
|
await self._ensureAiObjectsInitialized()
|
||||||
# Always use "json" for planning calls since they return JSON
|
# Always use "json" for planning calls since they return JSON
|
||||||
return await self.coreAi.callAiPlanning(prompt, placeholders, options, "json")
|
return await self.coreAi.callAiPlanning(prompt, placeholders, "json")
|
||||||
|
|
||||||
async def callAiDocuments(
|
async def callAiDocuments(
|
||||||
self,
|
self,
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import json
|
||||||
import logging
|
import logging
|
||||||
from typing import Dict, Any, List, Optional, Tuple, Union
|
from typing import Dict, Any, List, Optional, Tuple, Union
|
||||||
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
|
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
|
||||||
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
|
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
|
||||||
from modules.datamodels.datamodelExtraction import ContentPart
|
from modules.datamodels.datamodelExtraction import ContentPart
|
||||||
from modules.services.serviceAi.subSharedAiUtils import (
|
from modules.services.serviceAi.subSharedAiUtils import (
|
||||||
buildPromptWithPlaceholders,
|
buildPromptWithPlaceholders,
|
||||||
|
|
@ -57,6 +57,85 @@ class SubCoreAi:
|
||||||
self.services = services
|
self.services = services
|
||||||
self.aiObjects = aiObjects
|
self.aiObjects = aiObjects
|
||||||
|
|
||||||
|
async def _analyzePromptAndCreateOptions(self, prompt: str) -> AiCallOptions:
|
||||||
|
"""Analyze prompt to determine appropriate AiCallOptions parameters."""
|
||||||
|
try:
|
||||||
|
# Get dynamic enum values from Pydantic models
|
||||||
|
operation_types = [e.value for e in OperationTypeEnum]
|
||||||
|
priorities = [e.value for e in PriorityEnum]
|
||||||
|
processing_modes = [e.value for e in ProcessingModeEnum]
|
||||||
|
|
||||||
|
# Create analysis prompt for AI to determine operation type and parameters
|
||||||
|
analysisPrompt = f"""
|
||||||
|
You are an AI operation analyzer. Analyze the following prompt and determine the most appropriate operation type and parameters.
|
||||||
|
|
||||||
|
PROMPT TO ANALYZE:
|
||||||
|
{self.services.ai.sanitizePromptContent(prompt, 'userinput')}
|
||||||
|
|
||||||
|
Based on the prompt content, determine:
|
||||||
|
1. operationType: Choose the most appropriate from: {', '.join(operation_types)}
|
||||||
|
2. priority: Choose from: {', '.join(priorities)}
|
||||||
|
3. processingMode: Choose from: {', '.join(processing_modes)}
|
||||||
|
4. compressPrompt: true/false (true for story-like prompts, false for structured prompts with JSON/schemas)
|
||||||
|
5. compressContext: true/false (true to summarize context, false to process fully)
|
||||||
|
|
||||||
|
Respond with ONLY a JSON object in this exact format:
|
||||||
|
{{
|
||||||
|
"operationType": "dataAnalyse",
|
||||||
|
"priority": "balanced",
|
||||||
|
"processingMode": "basic",
|
||||||
|
"compressPrompt": true,
|
||||||
|
"compressContext": true
|
||||||
|
}}
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Use AI to analyze the prompt
|
||||||
|
request = AiCallRequest(
|
||||||
|
prompt=analysisPrompt,
|
||||||
|
options=AiCallOptions(
|
||||||
|
operationType=OperationTypeEnum.DATA_ANALYSE,
|
||||||
|
priority=PriorityEnum.SPEED,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
|
compressPrompt=True,
|
||||||
|
compressContext=False
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await self.aiObjects.call(request)
|
||||||
|
|
||||||
|
# Parse AI response
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
json_start = response.content.find('{')
|
||||||
|
json_end = response.content.rfind('}') + 1
|
||||||
|
if json_start != -1 and json_end > json_start:
|
||||||
|
analysis = json.loads(response.content[json_start:json_end])
|
||||||
|
|
||||||
|
# Map string values to enums
|
||||||
|
operation_type = OperationTypeEnum(analysis.get('operationType', 'dataAnalyse'))
|
||||||
|
priority = PriorityEnum(analysis.get('priority', 'balanced'))
|
||||||
|
processing_mode = ProcessingModeEnum(analysis.get('processingMode', 'basic'))
|
||||||
|
|
||||||
|
return AiCallOptions(
|
||||||
|
operationType=operation_type,
|
||||||
|
priority=priority,
|
||||||
|
processingMode=processing_mode,
|
||||||
|
compressPrompt=analysis.get('compressPrompt', True),
|
||||||
|
compressContext=analysis.get('compressContext', True)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to parse AI analysis response: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Prompt analysis failed: {e}")
|
||||||
|
|
||||||
|
# Fallback to default options
|
||||||
|
return AiCallOptions(
|
||||||
|
operationType=OperationTypeEnum.DATA_ANALYSE,
|
||||||
|
priority=PriorityEnum.BALANCED,
|
||||||
|
processingMode=ProcessingModeEnum.BASIC
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Shared Core Function for AI Calls with Looping
|
# Shared Core Function for AI Calls with Looping
|
||||||
|
|
@ -342,22 +421,29 @@ CRITICAL REQUIREMENTS:
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
placeholders: Optional[List[PromptPlaceholder]] = None,
|
placeholders: Optional[List[PromptPlaceholder]] = None,
|
||||||
options: Optional[AiCallOptions] = None,
|
|
||||||
loopInstructionFormat: Optional[str] = None
|
loopInstructionFormat: Optional[str] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Planning AI call for task planning, action planning, action selection, etc.
|
Planning AI call for task planning, action planning, action selection, etc.
|
||||||
|
Always uses static parameters optimized for planning tasks.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
prompt: The planning prompt
|
prompt: The planning prompt
|
||||||
placeholders: Optional list of placeholder replacements
|
placeholders: Optional list of placeholder replacements
|
||||||
options: AI call configuration options
|
loopInstructionFormat: Optional loop instruction format
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Planning JSON response
|
Planning JSON response
|
||||||
"""
|
"""
|
||||||
if options is None:
|
# Planning calls always use static parameters
|
||||||
options = AiCallOptions()
|
logger.debug("Using static parameters for planning call")
|
||||||
|
options = AiCallOptions(
|
||||||
|
operationType=OperationTypeEnum.PLAN,
|
||||||
|
priority=PriorityEnum.QUALITY,
|
||||||
|
processingMode=ProcessingModeEnum.DETAILED,
|
||||||
|
compressPrompt=False,
|
||||||
|
compressContext=False
|
||||||
|
)
|
||||||
|
|
||||||
# Build full prompt with placeholders
|
# Build full prompt with placeholders
|
||||||
if placeholders:
|
if placeholders:
|
||||||
|
|
@ -393,17 +479,21 @@ CRITICAL REQUIREMENTS:
|
||||||
Returns:
|
Returns:
|
||||||
AI response as string, or dict with documents if outputFormat is specified
|
AI response as string, or dict with documents if outputFormat is specified
|
||||||
"""
|
"""
|
||||||
if options is None:
|
if options is None or (hasattr(options, 'operationType') and options.operationType is None):
|
||||||
options = AiCallOptions()
|
# Use AI to determine parameters ONLY when truly needed (options=None OR operationType=None)
|
||||||
|
logger.debug("Analyzing prompt to determine optimal parameters")
|
||||||
|
options = await self._analyzePromptAndCreateOptions(prompt)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Using provided options: operationType={options.operationType}, priority={options.priority}")
|
||||||
|
|
||||||
# Handle document generation with specific output format using unified approach
|
# Handle document generation with specific output format using unified approach
|
||||||
if outputFormat:
|
if outputFormat:
|
||||||
# Use unified generation method for all document generation
|
# Use unified generation method for all document generation
|
||||||
if documents and len(documents) > 0:
|
if documents and len(documents) > 0:
|
||||||
logger.info(f"Extracting content from {len(documents)} documents")
|
logger.debug(f"Extracting content from {len(documents)} documents")
|
||||||
extracted_content = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
|
extracted_content = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
|
||||||
else:
|
else:
|
||||||
logger.info("No documents provided - using direct generation")
|
logger.debug("No documents provided - using direct generation")
|
||||||
extracted_content = None
|
extracted_content = None
|
||||||
generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
|
generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
|
||||||
generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation", loopInstructionFormat=loopInstructionFormat)
|
generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation", loopInstructionFormat=loopInstructionFormat)
|
||||||
|
|
|
||||||
|
|
@ -90,7 +90,7 @@ class NormalizationService:
|
||||||
" \"Date\": {\"formats\": [\"DD.MM.YYYY\",\"YYYY-MM-DD\"]}\n }\n}\n"
|
" \"Date\": {\"formats\": [\"DD.MM.YYYY\",\"YYYY-MM-DD\"]}\n }\n}\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await self.services.ai.callAiPlanning(prompt=prompt, placeholders=None, options=None)
|
response = await self.services.ai.callAiPlanning(prompt=prompt, placeholders=None)
|
||||||
if not response:
|
if not response:
|
||||||
return {"mapping": {}, "normalizationPolicy": {}}
|
return {"mapping": {}, "normalizationPolicy": {}}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ import uuid
|
||||||
from typing import Dict, Any, List, Optional
|
from typing import Dict, Any, List, Optional
|
||||||
from modules.datamodels.datamodelUam import User, UserConnection
|
from modules.datamodels.datamodelUam import User, UserConnection
|
||||||
from modules.datamodels.datamodelChat import ChatDocument, ChatMessage, ChatStat, ChatLog
|
from modules.datamodels.datamodelChat import ChatDocument, ChatMessage, ChatStat, ChatLog
|
||||||
|
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
|
||||||
from modules.security.tokenManager import TokenManager
|
from modules.security.tokenManager import TokenManager
|
||||||
from modules.shared.progressLogger import ProgressLogger
|
from modules.shared.progressLogger import ProgressLogger
|
||||||
|
|
||||||
|
|
@ -57,17 +58,18 @@ LOOP_INSTRUCTION
|
||||||
Please provide a comprehensive summary of this conversation."""
|
Please provide a comprehensive summary of this conversation."""
|
||||||
|
|
||||||
# Get summary using AI service through proper main service interface
|
# Get summary using AI service through proper main service interface
|
||||||
|
|
||||||
return await self.services.ai.callAiDocuments(
|
return await self.services.ai.callAiDocuments(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
documents=None,
|
documents=None,
|
||||||
options={
|
options=AiCallOptions(
|
||||||
"process_type": "text",
|
operationType=OperationTypeEnum.DATA_GENERATE,
|
||||||
"operation_type": "generate",
|
priority=PriorityEnum.SPEED,
|
||||||
"priority": "speed",
|
processingMode=ProcessingModeEnum.BASIC,
|
||||||
"compress_prompt": True,
|
compressPrompt=True,
|
||||||
"compress_documents": False,
|
compressContext=False,
|
||||||
"max_cost": 0.01
|
maxCost=0.01
|
||||||
}
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ from datetime import datetime, UTC
|
||||||
|
|
||||||
from modules.workflows.methods.methodBase import MethodBase, action
|
from modules.workflows.methods.methodBase import MethodBase, action
|
||||||
from modules.datamodels.datamodelChat import ActionResult
|
from modules.datamodels.datamodelChat import ActionResult
|
||||||
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
|
from modules.datamodels.datamodelAi import AiCallOptions
|
||||||
from modules.datamodels.datamodelChat import ChatDocument
|
from modules.datamodels.datamodelChat import ChatDocument
|
||||||
from modules.aicore.aicorePluginTavily import WebResearchRequest
|
from modules.aicore.aicorePluginTavily import WebResearchRequest
|
||||||
|
|
||||||
|
|
@ -40,10 +40,6 @@ class MethodAi(MethodBase):
|
||||||
- aiPrompt (str, required): Instruction for the AI.
|
- aiPrompt (str, required): Instruction for the AI.
|
||||||
- documentList (list, optional): Document reference(s) for context.
|
- documentList (list, optional): Document reference(s) for context.
|
||||||
- resultType (str, optional): Output file extension - only one extension allowed (e.g. txt, json, md, csv, xml, html, pdf, docx, xlsx, png, ...). Default: txt.
|
- resultType (str, optional): Output file extension - only one extension allowed (e.g. txt, json, md, csv, xml, html, pdf, docx, xlsx, png, ...). Default: txt.
|
||||||
- processingMode (str, optional): basic | advanced | detailed. Default: basic.
|
|
||||||
- priority (str, optional): speed | quality | cost | balanced. Default: balanced.
|
|
||||||
- maxCost (float, optional): Cost limit.
|
|
||||||
- maxProcessingTime (int, optional): Time limit in seconds.
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# Init progress logger
|
# Init progress logger
|
||||||
|
|
@ -72,24 +68,6 @@ class MethodAi(MethodBase):
|
||||||
if isinstance(documentList, str):
|
if isinstance(documentList, str):
|
||||||
documentList = [documentList]
|
documentList = [documentList]
|
||||||
resultType = parameters.get("resultType", "txt")
|
resultType = parameters.get("resultType", "txt")
|
||||||
processingModeStr = parameters.get("processingMode", "basic")
|
|
||||||
priorityStr = parameters.get("priority", "balanced")
|
|
||||||
maxCost = parameters.get("maxCost")
|
|
||||||
maxProcessingTime = parameters.get("maxProcessingTime")
|
|
||||||
|
|
||||||
# Dynamic operation type selection based on document presence
|
|
||||||
if documentList and len(documentList) > 0:
|
|
||||||
# With documents: default to dataExtract (document intelligence)
|
|
||||||
operationType = OperationTypeEnum.DATA_EXTRACT
|
|
||||||
logger.info(f"action.ai.processAuto-selected operationType EXTRACT (document intelligence mode - {len(documentList)} documents)")
|
|
||||||
else:
|
|
||||||
# Without documents: default to dataGenerate (content generation)
|
|
||||||
operationType = OperationTypeEnum.DATA_GENERATE
|
|
||||||
logger.info(f"action.ai.process Auto-selected operationType GENERATE (content generation mode - no documents)")
|
|
||||||
|
|
||||||
# Map string parameters to enums using centralized utility function
|
|
||||||
priority = self.services.utils.mapToEnum(PriorityEnum, priorityStr, PriorityEnum.BALANCED)
|
|
||||||
processingMode = self.services.utils.mapToEnum(ProcessingModeEnum, processingModeStr, ProcessingModeEnum.BASIC)
|
|
||||||
|
|
||||||
|
|
||||||
if not aiPrompt:
|
if not aiPrompt:
|
||||||
|
|
@ -117,25 +95,18 @@ class MethodAi(MethodBase):
|
||||||
# Update progress - preparing AI call
|
# Update progress - preparing AI call
|
||||||
self.services.workflow.progressLogUpdate(operationId, 0.4, "Preparing AI call")
|
self.services.workflow.progressLogUpdate(operationId, 0.4, "Preparing AI call")
|
||||||
|
|
||||||
# Build options and delegate document handling to AI/Extraction/Generation services
|
# Build options with only resultFormat - let service layer handle all other parameters
|
||||||
output_format = output_extension.replace('.', '') or 'txt'
|
output_format = output_extension.replace('.', '') or 'txt'
|
||||||
options = AiCallOptions(
|
options = AiCallOptions(
|
||||||
operationType=operationType,
|
resultFormat=output_format
|
||||||
priority=priority,
|
# Removed all model parameters - service layer will analyze prompt and determine optimal parameters
|
||||||
compressPrompt=processingMode != ProcessingModeEnum.DETAILED,
|
|
||||||
compressContext=True,
|
|
||||||
processDocumentsIndividually=True,
|
|
||||||
processingMode=processingMode,
|
|
||||||
resultFormat=output_format,
|
|
||||||
maxCost=maxCost,
|
|
||||||
maxProcessingTime=maxProcessingTime,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Update progress - calling AI
|
# Update progress - calling AI
|
||||||
self.services.workflow.progressLogUpdate(operationId, 0.6, "Calling AI")
|
self.services.workflow.progressLogUpdate(operationId, 0.6, "Calling AI")
|
||||||
|
|
||||||
result = await self.services.ai.callAiDocuments(
|
result = await self.services.ai.callAiDocuments(
|
||||||
prompt=aiPrompt, # Use original prompt, let unified generation handle prompt building
|
prompt=aiPrompt,
|
||||||
documents=chatDocuments if chatDocuments else None,
|
documents=chatDocuments if chatDocuments else None,
|
||||||
options=options,
|
options=options,
|
||||||
outputFormat=output_format
|
outputFormat=output_format
|
||||||
|
|
|
||||||
|
|
@ -116,14 +116,9 @@ DELIVERED CONTENT TO CHECK:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Call AI service for validation
|
# Call AI service for validation
|
||||||
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
|
|
||||||
request_options = AiCallOptions()
|
|
||||||
request_options.operationType = OperationTypeEnum.DATA_ANALYSE
|
|
||||||
|
|
||||||
response = await self.services.ai.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=validationPrompt,
|
prompt=validationPrompt,
|
||||||
placeholders=None,
|
placeholders=None
|
||||||
options=request_options
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# No retries or correction prompts here; parse-or-fail below
|
# No retries or correction prompts here; parse-or-fail below
|
||||||
|
|
|
||||||
|
|
@ -59,14 +59,9 @@ CRITICAL: Respond with ONLY the JSON object below. Do not include any explanator
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Call AI service for analysis
|
# Call AI service for analysis
|
||||||
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
|
|
||||||
request_options = AiCallOptions()
|
|
||||||
request_options.operationType = OperationTypeEnum.DATA_ANALYSE
|
|
||||||
|
|
||||||
response = await self.services.ai.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=analysisPrompt,
|
prompt=analysisPrompt,
|
||||||
placeholders=None,
|
placeholders=None
|
||||||
options=request_options
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# No retries or correction prompts here; parse-or-fail below
|
# No retries or correction prompts here; parse-or-fail below
|
||||||
|
|
|
||||||
|
|
@ -107,9 +107,7 @@ class TaskPlanner:
|
||||||
|
|
||||||
prompt = await self.services.ai.callAiPlanning(
|
prompt = await self.services.ai.callAiPlanning(
|
||||||
prompt=taskPlanningPromptTemplate,
|
prompt=taskPlanningPromptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders
|
||||||
options=options,
|
|
||||||
loopInstructionFormat="json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if AI response is valid
|
# Check if AI response is valid
|
||||||
|
|
|
||||||
|
|
@ -134,7 +134,7 @@ class ActionplanMode(BaseMode):
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
prompt = await self.services.ai.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
|
prompt = await self.services.ai.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders)
|
||||||
|
|
||||||
# Check if AI response is valid
|
# Check if AI response is valid
|
||||||
if not prompt:
|
if not prompt:
|
||||||
|
|
@ -466,7 +466,7 @@ class ActionplanMode(BaseMode):
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await self.services.ai.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
|
response = await self.services.ai.callAiPlanning(prompt=promptTemplate, placeholders=placeholders)
|
||||||
|
|
||||||
# Log result review response received
|
# Log result review response received
|
||||||
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
||||||
|
|
|
||||||
|
|
@ -185,21 +185,10 @@ class ReactMode(BaseMode):
|
||||||
promptTemplate = bundle.prompt
|
promptTemplate = bundle.prompt
|
||||||
placeholders = bundle.placeholders
|
placeholders = bundle.placeholders
|
||||||
|
|
||||||
# Centralized AI call for plan selection (use plan generation quality)
|
# Centralized AI call for plan selection (uses static planning parameters)
|
||||||
options = AiCallOptions(
|
|
||||||
operationType=OperationTypeEnum.PLAN,
|
|
||||||
priority=PriorityEnum.QUALITY,
|
|
||||||
compressPrompt=False,
|
|
||||||
compressContext=False,
|
|
||||||
processingMode=ProcessingModeEnum.DETAILED,
|
|
||||||
maxCost=0.10,
|
|
||||||
maxProcessingTime=30
|
|
||||||
)
|
|
||||||
|
|
||||||
response = await self.services.ai.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=promptTemplate,
|
prompt=promptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders
|
||||||
options=options
|
|
||||||
)
|
)
|
||||||
jsonStart = response.find('{') if response else -1
|
jsonStart = response.find('{') if response else -1
|
||||||
jsonEnd = response.rfind('}') + 1 if response else 0
|
jsonEnd = response.rfind('}') + 1 if response else 0
|
||||||
|
|
@ -294,24 +283,10 @@ class ReactMode(BaseMode):
|
||||||
promptTemplate = bundle.prompt
|
promptTemplate = bundle.prompt
|
||||||
placeholders = bundle.placeholders
|
placeholders = bundle.placeholders
|
||||||
|
|
||||||
# Centralized AI call for parameter suggestion (balanced analysis)
|
# Centralized AI call for parameter suggestion (uses static planning parameters)
|
||||||
options = AiCallOptions(
|
|
||||||
operationType=OperationTypeEnum.DATA_ANALYSE,
|
|
||||||
priority=PriorityEnum.BALANCED,
|
|
||||||
compressPrompt=True,
|
|
||||||
compressContext=False,
|
|
||||||
processingMode=ProcessingModeEnum.ADVANCED,
|
|
||||||
maxCost=0.05,
|
|
||||||
maxProcessingTime=30,
|
|
||||||
temperature=0.3, # Slightly higher temperature for better instruction following
|
|
||||||
# max tokens not set - use model's maximum for big JSON responses
|
|
||||||
resultFormat="json" # Explicitly request JSON format
|
|
||||||
)
|
|
||||||
|
|
||||||
paramsResp = await self.services.ai.callAiPlanning(
|
paramsResp = await self.services.ai.callAiPlanning(
|
||||||
prompt=promptTemplate,
|
prompt=promptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders
|
||||||
options=options
|
|
||||||
)
|
)
|
||||||
# Parse JSON response
|
# Parse JSON response
|
||||||
js = paramsResp[paramsResp.find('{'):paramsResp.rfind('}')+1] if paramsResp else '{}'
|
js = paramsResp[paramsResp.find('{'):paramsResp.rfind('}')+1] if paramsResp else '{}'
|
||||||
|
|
@ -609,21 +584,10 @@ class ReactMode(BaseMode):
|
||||||
promptTemplate = bundle.prompt
|
promptTemplate = bundle.prompt
|
||||||
placeholders = bundle.placeholders
|
placeholders = bundle.placeholders
|
||||||
|
|
||||||
# Centralized AI call for refinement decision (balanced analysis)
|
# Centralized AI call for refinement decision (uses static planning parameters)
|
||||||
options = AiCallOptions(
|
|
||||||
operationType=OperationTypeEnum.DATA_ANALYSE,
|
|
||||||
priority=PriorityEnum.BALANCED,
|
|
||||||
compressPrompt=True,
|
|
||||||
compressContext=False,
|
|
||||||
processingMode=ProcessingModeEnum.ADVANCED,
|
|
||||||
maxCost=0.05,
|
|
||||||
maxProcessingTime=30
|
|
||||||
)
|
|
||||||
|
|
||||||
resp = await self.services.ai.callAiPlanning(
|
resp = await self.services.ai.callAiPlanning(
|
||||||
prompt=promptTemplate,
|
prompt=promptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders
|
||||||
options=options
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# More robust JSON extraction
|
# More robust JSON extraction
|
||||||
|
|
@ -716,14 +680,7 @@ Return only the user-friendly message, no technical details."""
|
||||||
# Call AI to generate user-friendly message
|
# Call AI to generate user-friendly message
|
||||||
response = await self.services.ai.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
placeholders=None,
|
placeholders=None
|
||||||
options=AiCallOptions(
|
|
||||||
operationType=OperationTypeEnum.DATA_GENERATE,
|
|
||||||
priority=PriorityEnum.SPEED,
|
|
||||||
compressPrompt=True,
|
|
||||||
maxCost=0.01,
|
|
||||||
maxProcessingTime=5
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return response.strip() if response else f"Executing {method}.{actionName} action..."
|
return response.strip() if response else f"Executing {method}.{actionName} action..."
|
||||||
|
|
@ -757,14 +714,7 @@ Return only the user-friendly message, no technical details."""
|
||||||
# Call AI to generate user-friendly result message
|
# Call AI to generate user-friendly result message
|
||||||
response = await self.services.ai.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
placeholders=None,
|
placeholders=None
|
||||||
options=AiCallOptions(
|
|
||||||
operationType=OperationTypeEnum.DATA_GENERATE,
|
|
||||||
priority=PriorityEnum.SPEED,
|
|
||||||
compressPrompt=True,
|
|
||||||
maxCost=0.01,
|
|
||||||
maxProcessingTime=5
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return response.strip() if response else f"{method}.{actionName} action completed"
|
return response.strip() if response else f"{method}.{actionName} action completed"
|
||||||
|
|
|
||||||
|
|
@ -218,8 +218,8 @@ class WorkflowManager:
|
||||||
f"User message:\n{self.services.ai.sanitizePromptContent(userInput.prompt, 'userinput')}"
|
f"User message:\n{self.services.ai.sanitizePromptContent(userInput.prompt, 'userinput')}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Call AI analyzer
|
# Call AI analyzer (planning call - will use static parameters)
|
||||||
aiResponse = await self.services.ai.callAiPlanning(prompt=analyzerPrompt, placeholders=None, options=None)
|
aiResponse = await self.services.ai.callAiPlanning(prompt=analyzerPrompt, placeholders=None)
|
||||||
|
|
||||||
detectedLanguage = None
|
detectedLanguage = None
|
||||||
normalizedRequest = None
|
normalizedRequest = None
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,10 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
from modules.datamodels.datamodelAi import OperationTypeEnum, createOperationTypeRatings, AiCallOptions, PriorityEnum, ProcessingModeEnum
|
from modules.datamodels.datamodelAi import OperationTypeEnum, createOperationTypeRatings, AiCallOptions, PriorityEnum, ProcessingModeEnum
|
||||||
from modules.aicore.aicorePluginPerplexity import AiPerplexity
|
from modules.aicore.aicorePluginPerplexity import AiPerplexity
|
||||||
from modules.aicore.aicorePluginTavily import AiTavily
|
from modules.aicore.aicorePluginTavily import ConnectorWeb
|
||||||
|
from modules.aicore.aicorePluginAnthropic import AiAnthropic
|
||||||
|
from modules.aicore.aicorePluginOpenai import AiOpenai
|
||||||
|
from modules.aicore.aicorePluginInternal import AiInternal
|
||||||
from modules.aicore.aicoreModelSelector import ModelSelector
|
from modules.aicore.aicoreModelSelector import ModelSelector
|
||||||
|
|
||||||
def testOperationTypeRatings():
|
def testOperationTypeRatings():
|
||||||
|
|
@ -20,11 +23,15 @@ def testOperationTypeRatings():
|
||||||
|
|
||||||
# Initialize connectors
|
# Initialize connectors
|
||||||
perplexity = AiPerplexity()
|
perplexity = AiPerplexity()
|
||||||
tavily = AiTavily()
|
tavily = ConnectorWeb()
|
||||||
|
anthropic = AiAnthropic()
|
||||||
|
openai = AiOpenai()
|
||||||
|
internal = AiInternal()
|
||||||
modelSelector = ModelSelector()
|
modelSelector = ModelSelector()
|
||||||
|
|
||||||
# Get all models
|
# Get all models
|
||||||
allModels = perplexity.getModels() + tavily.getModels()
|
allModels = (perplexity.getModels() + tavily.getModels() +
|
||||||
|
anthropic.getModels() + openai.getModels() + internal.getModels())
|
||||||
|
|
||||||
print(f"📊 Total models available: {len(allModels)}")
|
print(f"📊 Total models available: {len(allModels)}")
|
||||||
print()
|
print()
|
||||||
|
|
@ -35,7 +42,10 @@ def testOperationTypeRatings():
|
||||||
(OperationTypeEnum.WEB_NEWS, "Web News"),
|
(OperationTypeEnum.WEB_NEWS, "Web News"),
|
||||||
(OperationTypeEnum.WEB_QUESTIONS, "Web Questions"),
|
(OperationTypeEnum.WEB_QUESTIONS, "Web Questions"),
|
||||||
(OperationTypeEnum.WEB_SEARCH, "Web Search"),
|
(OperationTypeEnum.WEB_SEARCH, "Web Search"),
|
||||||
(OperationTypeEnum.DATA_ANALYSE, "Text Analysis tasks")
|
(OperationTypeEnum.DATA_ANALYSE, "Data Analysis tasks"),
|
||||||
|
(OperationTypeEnum.DATA_GENERATE, "Data Generation tasks"),
|
||||||
|
(OperationTypeEnum.DATA_EXTRACT, "Data Extraction tasks"),
|
||||||
|
(OperationTypeEnum.PLAN, "Planning tasks")
|
||||||
]
|
]
|
||||||
|
|
||||||
for operationType, description in testCases:
|
for operationType, description in testCases:
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue