Fixed calling chain sharepoint
This commit is contained in:
parent
160766be2a
commit
236a85a99b
7 changed files with 1332 additions and 1165 deletions
|
|
@ -1,72 +0,0 @@
|
||||||
Message 0 (user)
|
|
||||||
Length: 2015 chars
|
|
||||||
================================================================================
|
|
||||||
User request: "Generate the first 1000 prime numbers."
|
|
||||||
|
|
||||||
Generate a NEW, COMPLETE JSON response. The template below shows ONLY the structure pattern - it is NOT existing content. Start from the beginning.
|
|
||||||
|
|
||||||
JSON structure template (reference only - shows the pattern):
|
|
||||||
{
|
|
||||||
"metadata": {
|
|
||||||
"split_strategy": "single_document",
|
|
||||||
"source_documents": [],
|
|
||||||
"extraction_method": "ai_generation"
|
|
||||||
},
|
|
||||||
"documents": [
|
|
||||||
{
|
|
||||||
"id": "doc_1",
|
|
||||||
"title": "Generated Document",
|
|
||||||
"filename": "document.json",
|
|
||||||
"sections": [
|
|
||||||
{
|
|
||||||
"id": "section_heading_example",
|
|
||||||
"content_type": "heading",
|
|
||||||
"elements": [
|
|
||||||
{"level": 1, "text": "Heading Text"}
|
|
||||||
],
|
|
||||||
"order": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "section_paragraph_example",
|
|
||||||
"content_type": "paragraph",
|
|
||||||
"elements": [
|
|
||||||
{"text": "Paragraph text content"}
|
|
||||||
],
|
|
||||||
"order": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "section_list_example",
|
|
||||||
"content_type": "list",
|
|
||||||
"elements": [
|
|
||||||
{
|
|
||||||
"items": [
|
|
||||||
{"text": "Item 1"},
|
|
||||||
{"text": "Item 2"}
|
|
||||||
],
|
|
||||||
"list_type": "numbered"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"order": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "section_table_example",
|
|
||||||
"content_type": "table",
|
|
||||||
"elements": [
|
|
||||||
{
|
|
||||||
"headers": ["Column 1", "Column 2"],
|
|
||||||
"rows": [
|
|
||||||
["Row 1 Col 1", "Row 1 Col 2"],
|
|
||||||
["Row 2 Col 1", "Row 2 Col 2"]
|
|
||||||
],
|
|
||||||
"caption": "Table caption"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"order": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "section_code_example",
|
|
||||||
"content_type": "code",
|
|
||||||
"elements": [
|
|
||||||
{
|
|
||||||
"code... [truncated]
|
|
||||||
================================================================================
|
|
||||||
|
|
@ -1,200 +0,0 @@
|
||||||
API Response
|
|
||||||
Length: 6082 chars
|
|
||||||
================================================================================
|
|
||||||
{
|
|
||||||
"metadata": {
|
|
||||||
"split_strategy": "single_document",
|
|
||||||
"source_documents": [],
|
|
||||||
"extraction_method": "ai_generation"
|
|
||||||
},
|
|
||||||
"documents": [
|
|
||||||
{
|
|
||||||
"id": "doc_1",
|
|
||||||
"title": "List of First 1000 Prime Numbers",
|
|
||||||
"filename": "prime_numbers.json",
|
|
||||||
"sections": [
|
|
||||||
{
|
|
||||||
"id": "section_prime_numbers",
|
|
||||||
"content_type": "list",
|
|
||||||
"elements": [
|
|
||||||
{
|
|
||||||
"items": [
|
|
||||||
{"number": 2},
|
|
||||||
{"number": 3},
|
|
||||||
{"number": 5},
|
|
||||||
{"number": 7},
|
|
||||||
{"number": 11},
|
|
||||||
{"number": 13},
|
|
||||||
{"number": 17},
|
|
||||||
{"number": 19},
|
|
||||||
{"number": 23},
|
|
||||||
{"number": 29},
|
|
||||||
{"number": 31},
|
|
||||||
{"number": 37},
|
|
||||||
{"number": 41},
|
|
||||||
{"number": 43},
|
|
||||||
{"number": 47},
|
|
||||||
{"number": 53},
|
|
||||||
{"number": 59},
|
|
||||||
{"number": 61},
|
|
||||||
{"number": 67},
|
|
||||||
{"number": 71},
|
|
||||||
{"number": 73},
|
|
||||||
{"number": 79},
|
|
||||||
{"number": 83},
|
|
||||||
{"number": 89},
|
|
||||||
{"number": 97},
|
|
||||||
{"number": 101},
|
|
||||||
{"number": 103},
|
|
||||||
{"number": 107},
|
|
||||||
{"number": 109},
|
|
||||||
{"number": 113},
|
|
||||||
{"number": 127},
|
|
||||||
{"number": 131},
|
|
||||||
{"number": 137},
|
|
||||||
{"number": 139},
|
|
||||||
{"number": 149},
|
|
||||||
{"number": 151},
|
|
||||||
{"number": 157},
|
|
||||||
{"number": 163},
|
|
||||||
{"number": 167},
|
|
||||||
{"number": 173},
|
|
||||||
{"number": 179},
|
|
||||||
{"number": 181},
|
|
||||||
{"number": 191},
|
|
||||||
{"number": 193},
|
|
||||||
{"number": 197},
|
|
||||||
{"number": 199},
|
|
||||||
{"number": 211},
|
|
||||||
{"number": 223},
|
|
||||||
{"number": 227},
|
|
||||||
{"number": 229},
|
|
||||||
{"number": 233},
|
|
||||||
{"number": 239},
|
|
||||||
{"number": 241},
|
|
||||||
{"number": 251},
|
|
||||||
{"number": 257},
|
|
||||||
{"number": 263},
|
|
||||||
{"number": 269},
|
|
||||||
{"number": 271},
|
|
||||||
{"number": 277},
|
|
||||||
{"number": 281},
|
|
||||||
{"number": 283},
|
|
||||||
{"number": 293},
|
|
||||||
{"number": 307},
|
|
||||||
{"number": 311},
|
|
||||||
{"number": 313},
|
|
||||||
{"number": 317},
|
|
||||||
{"number": 331},
|
|
||||||
{"number": 337},
|
|
||||||
{"number": 347},
|
|
||||||
{"number": 349},
|
|
||||||
{"number": 353},
|
|
||||||
{"number": 359},
|
|
||||||
{"number": 367},
|
|
||||||
{"number": 373},
|
|
||||||
{"number": 379},
|
|
||||||
{"number": 383},
|
|
||||||
{"number": 389},
|
|
||||||
{"number": 397},
|
|
||||||
{"number": 401},
|
|
||||||
{"number": 409},
|
|
||||||
{"number": 419},
|
|
||||||
{"number": 421},
|
|
||||||
{"number": 431},
|
|
||||||
{"number": 433},
|
|
||||||
{"number": 439},
|
|
||||||
{"number": 443},
|
|
||||||
{"number": 449},
|
|
||||||
{"number": 457},
|
|
||||||
{"number": 461},
|
|
||||||
{"number": 463},
|
|
||||||
{"number": 467},
|
|
||||||
{"number": 479},
|
|
||||||
{"number": 487},
|
|
||||||
{"number": 491},
|
|
||||||
{"number": 499},
|
|
||||||
{"number": 503},
|
|
||||||
{"number": 509},
|
|
||||||
{"number": 521},
|
|
||||||
{"number": 523},
|
|
||||||
{"number": 541},
|
|
||||||
{"number": 547},
|
|
||||||
{"number": 557},
|
|
||||||
{"number": 563},
|
|
||||||
{"number": 569},
|
|
||||||
{"number": 571},
|
|
||||||
{"number": 577},
|
|
||||||
{"number": 587},
|
|
||||||
{"number": 593},
|
|
||||||
{"number": 599},
|
|
||||||
{"number": 601},
|
|
||||||
{"number": 607},
|
|
||||||
{"number": 613},
|
|
||||||
{"number": 617},
|
|
||||||
{"number": 619},
|
|
||||||
{"number": 631},
|
|
||||||
{"number": 641},
|
|
||||||
{"number": 643},
|
|
||||||
{"number": 647},
|
|
||||||
{"number": 653},
|
|
||||||
{"number": 659},
|
|
||||||
{"number": 661},
|
|
||||||
{"number": 673},
|
|
||||||
{"number": 677},
|
|
||||||
{"number": 683},
|
|
||||||
{"number": 691},
|
|
||||||
{"number": 701},
|
|
||||||
{"number": 709},
|
|
||||||
{"number": 719},
|
|
||||||
{"number": 727},
|
|
||||||
{"number": 733},
|
|
||||||
{"number": 739},
|
|
||||||
{"number": 743},
|
|
||||||
{"number": 751},
|
|
||||||
{"number": 757},
|
|
||||||
{"number": 761},
|
|
||||||
{"number": 769},
|
|
||||||
{"number": 773},
|
|
||||||
{"number": 787},
|
|
||||||
{"number": 797},
|
|
||||||
{"number": 809},
|
|
||||||
{"number": 811},
|
|
||||||
{"number": 821},
|
|
||||||
{"number": 823},
|
|
||||||
{"number": 827},
|
|
||||||
{"number": 829},
|
|
||||||
{"number": 839},
|
|
||||||
{"number": 853},
|
|
||||||
{"number": 857},
|
|
||||||
{"number": 859},
|
|
||||||
{"number": 863},
|
|
||||||
{"number": 877},
|
|
||||||
{"number": 881},
|
|
||||||
{"number": 883},
|
|
||||||
{"number": 887},
|
|
||||||
{"number": 907},
|
|
||||||
{"number": 911},
|
|
||||||
{"number": 919},
|
|
||||||
{"number": 929},
|
|
||||||
{"number": 937},
|
|
||||||
{"number": 941},
|
|
||||||
{"number": 947},
|
|
||||||
{"number": 953},
|
|
||||||
{"number": 967},
|
|
||||||
{"number": 971},
|
|
||||||
{"number": 977},
|
|
||||||
{"number": 983},
|
|
||||||
{"number": 991},
|
|
||||||
{"number": 997}
|
|
||||||
],
|
|
||||||
"list_type": "numbered"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"order": 0
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
================================================================================
|
|
||||||
|
|
@ -1573,13 +1573,7 @@ class ChatObjects:
|
||||||
executionLog["workflowId"] = workflow.id
|
executionLog["workflowId"] = workflow.id
|
||||||
executionLog["status"] = "completed"
|
executionLog["status"] = "completed"
|
||||||
executionLog["messages"].append(f"Workflow {workflow.id} started successfully")
|
executionLog["messages"].append(f"Workflow {workflow.id} started successfully")
|
||||||
|
logger.info(f"Started workflow {workflow.id} with plan containing {len(plan.get('tasks', []))} tasks (plan embedded in userInput)")
|
||||||
# Also store plan in module-level cache as backup (keyed by workflow ID)
|
|
||||||
from modules.workflows.processing.modes import modeAutomation
|
|
||||||
if not hasattr(modeAutomation, '_templatePlanCache'):
|
|
||||||
modeAutomation._templatePlanCache = {}
|
|
||||||
modeAutomation._templatePlanCache[workflow.id] = plan
|
|
||||||
logger.info(f"Stored template plan for workflow {workflow.id} (cache + prompt) with {len(plan.get('tasks', []))} tasks")
|
|
||||||
|
|
||||||
# Update automation with execution log
|
# Update automation with execution log
|
||||||
executionLogs = automation.get("executionLogs", [])
|
executionLogs = automation.get("executionLogs", [])
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -37,16 +37,27 @@ class TaskPlanner:
|
||||||
|
|
||||||
# Check workflow status before calling AI service
|
# Check workflow status before calling AI service
|
||||||
checkWorkflowStopped(self.services)
|
checkWorkflowStopped(self.services)
|
||||||
|
|
||||||
# Analyze user intent to obtain cleaned user objective for planning
|
# Analyze user intent to obtain cleaned user objective for planning
|
||||||
# This intent will be reused for workflow-level validation in executeTask
|
# SKIP intent analysis for AUTOMATION mode - it uses predefined JSON plans
|
||||||
from modules.workflows.processing.adaptive import IntentAnalyzer
|
from modules.datamodels.datamodelChat import WorkflowModeEnum
|
||||||
intentAnalyzer = IntentAnalyzer(self.services)
|
workflowMode = getattr(workflow, 'workflowMode', None)
|
||||||
workflowIntent = await intentAnalyzer.analyzeUserIntent(actualUserPrompt, None)
|
skipIntentionAnalysis = (workflowMode == WorkflowModeEnum.WORKFLOW_AUTOMATION)
|
||||||
# Store workflow intent for reuse in executeTask (avoid redundant analysis)
|
|
||||||
if not hasattr(workflow, '_workflowIntent'):
|
if skipIntentionAnalysis:
|
||||||
workflow._workflowIntent = workflowIntent
|
logger.info("Skipping intent analysis for AUTOMATION mode - using direct user input")
|
||||||
cleanedObjective = workflowIntent.get('primaryGoal', actualUserPrompt) if isinstance(workflowIntent, dict) else actualUserPrompt
|
# For automation mode, use user input directly without intent analysis
|
||||||
|
cleanedObjective = actualUserPrompt
|
||||||
|
workflowIntent = None
|
||||||
|
else:
|
||||||
|
# This intent will be reused for workflow-level validation in executeTask
|
||||||
|
from modules.workflows.processing.adaptive import IntentAnalyzer
|
||||||
|
intentAnalyzer = IntentAnalyzer(self.services)
|
||||||
|
workflowIntent = await intentAnalyzer.analyzeUserIntent(actualUserPrompt, None)
|
||||||
|
# Store workflow intent for reuse in executeTask (avoid redundant analysis)
|
||||||
|
if not hasattr(workflow, '_workflowIntent'):
|
||||||
|
workflow._workflowIntent = workflowIntent
|
||||||
|
cleanedObjective = workflowIntent.get('primaryGoal', actualUserPrompt) if isinstance(workflowIntent, dict) else actualUserPrompt
|
||||||
|
|
||||||
# Create proper context object for task planning using cleaned intent
|
# Create proper context object for task planning using cleaned intent
|
||||||
# For task planning, we need to create a minimal TaskStep since TaskContext requires it
|
# For task planning, we need to create a minimal TaskStep since TaskContext requires it
|
||||||
|
|
|
||||||
|
|
@ -26,51 +26,39 @@ class AutomationMode(BaseMode):
|
||||||
|
|
||||||
async def generateTaskPlan(self, userInput: str, workflow: ChatWorkflow) -> TaskPlan:
|
async def generateTaskPlan(self, userInput: str, workflow: ChatWorkflow) -> TaskPlan:
|
||||||
"""
|
"""
|
||||||
Generate task plan from stored template plan (no AI planning needed).
|
Generate task plan from JSON plan in userInput (no AI planning needed).
|
||||||
The plan is stored in module-level cache by executeAutomation.
|
AUTOMATION mode ALWAYS requires a JSON plan to be provided in the user input.
|
||||||
|
The plan can be:
|
||||||
|
- Embedded between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END-->
|
||||||
|
- Or as direct JSON in userInput
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# Get plan from module-level cache (stored by executeAutomation)
|
# AUTOMATION mode ALWAYS requires a JSON plan to be provided in userInput
|
||||||
|
# Try to extract plan from userInput (embedded JSON or direct JSON)
|
||||||
templatePlan = None
|
templatePlan = None
|
||||||
if hasattr(self, '_templatePlanCache') and self._templatePlanCache:
|
try:
|
||||||
templatePlan = self._templatePlanCache.get(workflow.id)
|
# Look for embedded plan in prompt (between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END-->)
|
||||||
|
startMarker = "<!--TEMPLATE_PLAN_START-->"
|
||||||
# Try module-level cache
|
endMarker = "<!--TEMPLATE_PLAN_END-->"
|
||||||
if not templatePlan:
|
startIdx = userInput.find(startMarker)
|
||||||
try:
|
endIdx = userInput.find(endMarker)
|
||||||
from modules.workflows.processing.modes import modeAutomation
|
|
||||||
if hasattr(modeAutomation, '_templatePlanCache'):
|
if startIdx >= 0 and endIdx > startIdx:
|
||||||
templatePlan = modeAutomation._templatePlanCache.get(workflow.id)
|
planJson = userInput[startIdx + len(startMarker):endIdx].strip()
|
||||||
if templatePlan:
|
templatePlan = json.loads(planJson)
|
||||||
logger.info(f"Retrieved template plan from module cache for workflow {workflow.id}")
|
logger.info("Extracted template plan from embedded JSON in prompt")
|
||||||
except Exception as e:
|
elif '{' in userInput and '"tasks"' in userInput:
|
||||||
logger.warning(f"Could not access module cache: {str(e)}")
|
# Try parsing entire userInput as JSON (fallback)
|
||||||
|
jsonStart = userInput.find('{')
|
||||||
if not templatePlan:
|
jsonEnd = userInput.rfind('}') + 1
|
||||||
# Fallback: Extract from prompt (embedded as JSON comment)
|
if jsonStart >= 0 and jsonEnd > jsonStart:
|
||||||
try:
|
templatePlan = json.loads(userInput[jsonStart:jsonEnd])
|
||||||
# Look for embedded plan in prompt (between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END-->)
|
logger.info("Parsed template plan from userInput JSON (fallback)")
|
||||||
startMarker = "<!--TEMPLATE_PLAN_START-->"
|
else:
|
||||||
endMarker = "<!--TEMPLATE_PLAN_END-->"
|
raise ValueError("No template plan found in userInput. AUTOMATION mode requires a JSON plan to be provided in the user input.")
|
||||||
startIdx = userInput.find(startMarker)
|
except (json.JSONDecodeError, ValueError) as e:
|
||||||
endIdx = userInput.find(endMarker)
|
logger.error(f"Could not parse template plan: {str(e)}")
|
||||||
|
raise ValueError(f"AUTOMATION mode requires a predefined JSON plan with 'tasks' array, but none was found. Please provide the plan in the user input (embedded between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END--> or as direct JSON). Error: {str(e)}")
|
||||||
if startIdx >= 0 and endIdx > startIdx:
|
|
||||||
planJson = userInput[startIdx + len(startMarker):endIdx].strip()
|
|
||||||
templatePlan = json.loads(planJson)
|
|
||||||
logger.info("Extracted template plan from embedded JSON in prompt")
|
|
||||||
elif '{' in userInput and '"tasks"' in userInput:
|
|
||||||
# Try parsing entire userInput as JSON (fallback)
|
|
||||||
jsonStart = userInput.find('{')
|
|
||||||
jsonEnd = userInput.rfind('}') + 1
|
|
||||||
if jsonStart >= 0 and jsonEnd > jsonStart:
|
|
||||||
templatePlan = json.loads(userInput[jsonStart:jsonEnd])
|
|
||||||
logger.info("Parsed template plan from userInput JSON (fallback)")
|
|
||||||
else:
|
|
||||||
raise ValueError("No template plan found in cache or prompt")
|
|
||||||
except (json.JSONDecodeError, ValueError) as e:
|
|
||||||
logger.error(f"Could not parse template plan: {str(e)}")
|
|
||||||
raise ValueError(f"Template mode requires a predefined plan, but none was found: {str(e)}")
|
|
||||||
|
|
||||||
logger.info(f"Using template plan with {len(templatePlan.get('tasks', []))} tasks")
|
logger.info(f"Using template plan with {len(templatePlan.get('tasks', []))} tasks")
|
||||||
|
|
||||||
|
|
@ -109,15 +97,6 @@ class AutomationMode(BaseMode):
|
||||||
|
|
||||||
logger.info(f"Generated task plan from template with {len(tasks)} tasks")
|
logger.info(f"Generated task plan from template with {len(tasks)} tasks")
|
||||||
|
|
||||||
# Clean up cache after retrieving plan (prevent memory leaks)
|
|
||||||
try:
|
|
||||||
from modules.workflows.processing.modes import modeAutomation
|
|
||||||
if hasattr(modeAutomation, '_templatePlanCache') and workflow.id in modeAutomation._templatePlanCache:
|
|
||||||
del modeAutomation._templatePlanCache[workflow.id]
|
|
||||||
logger.debug(f"Cleaned up template plan cache for workflow {workflow.id}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Could not clean up template plan cache: {str(e)}")
|
|
||||||
|
|
||||||
return taskPlan
|
return taskPlan
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
|
|
@ -204,118 +204,129 @@ class WorkflowManager:
|
||||||
}
|
}
|
||||||
|
|
||||||
# Analyze the user's input to detect language, normalize request, extract intent, and offload bulky context into documents
|
# Analyze the user's input to detect language, normalize request, extract intent, and offload bulky context into documents
|
||||||
|
# SKIP user intention analysis for AUTOMATION mode - it uses predefined JSON plans
|
||||||
createdDocs = []
|
createdDocs = []
|
||||||
|
workflowMode = getattr(workflow, 'workflowMode', None)
|
||||||
|
skipIntentionAnalysis = (workflowMode == WorkflowModeEnum.WORKFLOW_AUTOMATION)
|
||||||
|
|
||||||
try:
|
if skipIntentionAnalysis:
|
||||||
analyzerPrompt = (
|
logger.info("Skipping user intention analysis for AUTOMATION mode - using direct user input")
|
||||||
"You are an input analyzer. From the user's message, perform ALL of the following in one pass:\n"
|
# For automation mode, use user input directly without AI analysis
|
||||||
"1) detectedLanguage: detect ISO 639-1 language code (e.g., de, en).\n"
|
self.services.currentUserPrompt = userInput.prompt
|
||||||
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details.\n"
|
|
||||||
"3) intent: concise single-paragraph core request in the detected language for high-level routing.\n"
|
|
||||||
"4) contextItems: supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content.\n\n"
|
|
||||||
"Rules:\n"
|
|
||||||
"- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained.\n"
|
|
||||||
"- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear.\n"
|
|
||||||
"- Preserve critical references (URLs, filenames) in intent.\n"
|
|
||||||
"- Normalize to the primary detected language if mixed-language.\n\n"
|
|
||||||
"Return ONLY JSON (no markdown) with this shape:\n"
|
|
||||||
"{\n"
|
|
||||||
" \"detectedLanguage\": \"de|en|fr|it|...\",\n"
|
|
||||||
" \"normalizedRequest\": \"Full explicit instruction in detected language\",\n"
|
|
||||||
" \"intent\": \"Concise normalized request...\",\n"
|
|
||||||
" \"contextItems\": [\n"
|
|
||||||
" {\n"
|
|
||||||
" \"title\": \"User context 1\",\n"
|
|
||||||
" \"mimeType\": \"text/plain\",\n"
|
|
||||||
" \"content\": \"Full extracted content block here\"\n"
|
|
||||||
" }\n"
|
|
||||||
" ]\n"
|
|
||||||
"}\n\n"
|
|
||||||
f"User message:\n{self.services.utils.sanitizePromptContent(userInput.prompt, 'userinput')}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Call AI analyzer (planning call - will use static parameters)
|
|
||||||
aiResponse = await self.services.ai.callAiPlanning(
|
|
||||||
prompt=analyzerPrompt,
|
|
||||||
placeholders=None,
|
|
||||||
debugType="userintention"
|
|
||||||
)
|
|
||||||
|
|
||||||
detectedLanguage = None
|
detectedLanguage = None
|
||||||
normalizedRequest = None
|
normalizedRequest = None
|
||||||
intentText = userInput.prompt
|
intentText = userInput.prompt
|
||||||
contextItems = []
|
contextItems = []
|
||||||
|
else:
|
||||||
# Parse analyzer response (JSON expected)
|
|
||||||
try:
|
try:
|
||||||
jsonStart = aiResponse.find('{') if aiResponse else -1
|
analyzerPrompt = (
|
||||||
jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0
|
"You are an input analyzer. From the user's message, perform ALL of the following in one pass:\n"
|
||||||
if jsonStart != -1 and jsonEnd > jsonStart:
|
"1) detectedLanguage: detect ISO 639-1 language code (e.g., de, en).\n"
|
||||||
parsed = json.loads(aiResponse[jsonStart:jsonEnd])
|
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details.\n"
|
||||||
detectedLanguage = parsed.get('detectedLanguage') or None
|
"3) intent: concise single-paragraph core request in the detected language for high-level routing.\n"
|
||||||
normalizedRequest = parsed.get('normalizedRequest') or None
|
"4) contextItems: supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content.\n\n"
|
||||||
if parsed.get('intent'):
|
"Rules:\n"
|
||||||
intentText = parsed.get('intent')
|
"- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained.\n"
|
||||||
contextItems = parsed.get('contextItems') or []
|
"- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear.\n"
|
||||||
except Exception:
|
"- Preserve critical references (URLs, filenames) in intent.\n"
|
||||||
|
"- Normalize to the primary detected language if mixed-language.\n\n"
|
||||||
|
"Return ONLY JSON (no markdown) with this shape:\n"
|
||||||
|
"{\n"
|
||||||
|
" \"detectedLanguage\": \"de|en|fr|it|...\",\n"
|
||||||
|
" \"normalizedRequest\": \"Full explicit instruction in detected language\",\n"
|
||||||
|
" \"intent\": \"Concise normalized request...\",\n"
|
||||||
|
" \"contextItems\": [\n"
|
||||||
|
" {\n"
|
||||||
|
" \"title\": \"User context 1\",\n"
|
||||||
|
" \"mimeType\": \"text/plain\",\n"
|
||||||
|
" \"content\": \"Full extracted content block here\"\n"
|
||||||
|
" }\n"
|
||||||
|
" ]\n"
|
||||||
|
"}\n\n"
|
||||||
|
f"User message:\n{self.services.utils.sanitizePromptContent(userInput.prompt, 'userinput')}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Call AI analyzer (planning call - will use static parameters)
|
||||||
|
aiResponse = await self.services.ai.callAiPlanning(
|
||||||
|
prompt=analyzerPrompt,
|
||||||
|
placeholders=None,
|
||||||
|
debugType="userintention"
|
||||||
|
)
|
||||||
|
|
||||||
|
detectedLanguage = None
|
||||||
|
normalizedRequest = None
|
||||||
|
intentText = userInput.prompt
|
||||||
contextItems = []
|
contextItems = []
|
||||||
|
|
||||||
# Update services state
|
# Parse analyzer response (JSON expected)
|
||||||
if detectedLanguage and isinstance(detectedLanguage, str):
|
|
||||||
self._setUserLanguage(detectedLanguage)
|
|
||||||
try:
|
try:
|
||||||
setattr(self.services, 'currentUserLanguage', detectedLanguage)
|
jsonStart = aiResponse.find('{') if aiResponse else -1
|
||||||
|
jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0
|
||||||
|
if jsonStart != -1 and jsonEnd > jsonStart:
|
||||||
|
parsed = json.loads(aiResponse[jsonStart:jsonEnd])
|
||||||
|
detectedLanguage = parsed.get('detectedLanguage') or None
|
||||||
|
normalizedRequest = parsed.get('normalizedRequest') or None
|
||||||
|
if parsed.get('intent'):
|
||||||
|
intentText = parsed.get('intent')
|
||||||
|
contextItems = parsed.get('contextItems') or []
|
||||||
|
except Exception:
|
||||||
|
contextItems = []
|
||||||
|
|
||||||
|
# Update services state
|
||||||
|
if detectedLanguage and isinstance(detectedLanguage, str):
|
||||||
|
self._setUserLanguage(detectedLanguage)
|
||||||
|
try:
|
||||||
|
setattr(self.services, 'currentUserLanguage', detectedLanguage)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
self.services.currentUserPrompt = intentText or userInput.prompt
|
||||||
|
try:
|
||||||
|
if normalizedRequest:
|
||||||
|
setattr(self.services, 'currentUserPromptNormalized', normalizedRequest)
|
||||||
|
if contextItems is not None:
|
||||||
|
setattr(self.services, 'currentUserContextItems', contextItems)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
self.services.currentUserPrompt = intentText or userInput.prompt
|
|
||||||
try:
|
|
||||||
if normalizedRequest:
|
|
||||||
setattr(self.services, 'currentUserPromptNormalized', normalizedRequest)
|
|
||||||
if contextItems is not None:
|
|
||||||
setattr(self.services, 'currentUserContextItems', contextItems)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
# Create documents for context items
|
||||||
|
if contextItems and isinstance(contextItems, list):
|
||||||
|
for idx, item in enumerate(contextItems):
|
||||||
|
try:
|
||||||
|
title = item.get('title') if isinstance(item, dict) else None
|
||||||
|
mime = item.get('mimeType') if isinstance(item, dict) else None
|
||||||
|
content = item.get('content') if isinstance(item, dict) else None
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
fileName = (title or f"user_context_{idx+1}.txt").strip()
|
||||||
|
mimeType = (mime or "text/plain").strip()
|
||||||
|
|
||||||
# Create documents for context items
|
# Neutralize content before storing if neutralization is enabled
|
||||||
if contextItems and isinstance(contextItems, list):
|
contentBytes = content.encode('utf-8')
|
||||||
for idx, item in enumerate(contextItems):
|
contentBytes = await self._neutralizeContentIfEnabled(contentBytes, mimeType)
|
||||||
try:
|
|
||||||
title = item.get('title') if isinstance(item, dict) else None
|
# Create file in component storage
|
||||||
mime = item.get('mimeType') if isinstance(item, dict) else None
|
fileItem = self.services.interfaceDbComponent.createFile(
|
||||||
content = item.get('content') if isinstance(item, dict) else None
|
name=fileName,
|
||||||
if not content:
|
mimeType=mimeType,
|
||||||
|
content=contentBytes
|
||||||
|
)
|
||||||
|
# Persist file data
|
||||||
|
self.services.interfaceDbComponent.createFileData(fileItem.id, contentBytes)
|
||||||
|
|
||||||
|
# Collect file info
|
||||||
|
fileInfo = self.services.chat.getFileInfo(fileItem.id)
|
||||||
|
from modules.datamodels.datamodelChat import ChatDocument
|
||||||
|
doc = ChatDocument(
|
||||||
|
fileId=fileItem.id,
|
||||||
|
fileName=fileInfo.get("fileName", fileName) if fileInfo else fileName,
|
||||||
|
fileSize=fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
|
||||||
|
mimeType=fileInfo.get("mimeType", mimeType) if fileInfo else mimeType
|
||||||
|
)
|
||||||
|
createdDocs.append(doc)
|
||||||
|
except Exception:
|
||||||
continue
|
continue
|
||||||
fileName = (title or f"user_context_{idx+1}.txt").strip()
|
except Exception as e:
|
||||||
mimeType = (mime or "text/plain").strip()
|
logger.warning(f"Prompt analysis failed or skipped: {str(e)}")
|
||||||
|
|
||||||
# Neutralize content before storing if neutralization is enabled
|
|
||||||
contentBytes = content.encode('utf-8')
|
|
||||||
contentBytes = await self._neutralizeContentIfEnabled(contentBytes, mimeType)
|
|
||||||
|
|
||||||
# Create file in component storage
|
|
||||||
fileItem = self.services.interfaceDbComponent.createFile(
|
|
||||||
name=fileName,
|
|
||||||
mimeType=mimeType,
|
|
||||||
content=contentBytes
|
|
||||||
)
|
|
||||||
# Persist file data
|
|
||||||
self.services.interfaceDbComponent.createFileData(fileItem.id, contentBytes)
|
|
||||||
|
|
||||||
# Collect file info
|
|
||||||
fileInfo = self.services.chat.getFileInfo(fileItem.id)
|
|
||||||
from modules.datamodels.datamodelChat import ChatDocument
|
|
||||||
doc = ChatDocument(
|
|
||||||
fileId=fileItem.id,
|
|
||||||
fileName=fileInfo.get("fileName", fileName) if fileInfo else fileName,
|
|
||||||
fileSize=fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
|
|
||||||
mimeType=fileInfo.get("mimeType", mimeType) if fileInfo else mimeType
|
|
||||||
)
|
|
||||||
createdDocs.append(doc)
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Prompt analysis failed or skipped: {str(e)}")
|
|
||||||
|
|
||||||
# Process user-uploaded documents (fileIds) and combine with context documents
|
# Process user-uploaded documents (fileIds) and combine with context documents
|
||||||
if userInput.listFileId:
|
if userInput.listFileId:
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue