Fixed calling chain sharepoint

This commit is contained in:
ValueOn AG 2025-11-04 23:59:29 +01:00
parent 160766be2a
commit 236a85a99b
7 changed files with 1332 additions and 1165 deletions

View file

@ -1,72 +0,0 @@
Message 0 (user)
Length: 2015 chars
================================================================================
User request: "Generate the first 1000 prime numbers."
Generate a NEW, COMPLETE JSON response. The template below shows ONLY the structure pattern - it is NOT existing content. Start from the beginning.
JSON structure template (reference only - shows the pattern):
{
"metadata": {
"split_strategy": "single_document",
"source_documents": [],
"extraction_method": "ai_generation"
},
"documents": [
{
"id": "doc_1",
"title": "Generated Document",
"filename": "document.json",
"sections": [
{
"id": "section_heading_example",
"content_type": "heading",
"elements": [
{"level": 1, "text": "Heading Text"}
],
"order": 0
},
{
"id": "section_paragraph_example",
"content_type": "paragraph",
"elements": [
{"text": "Paragraph text content"}
],
"order": 0
},
{
"id": "section_list_example",
"content_type": "list",
"elements": [
{
"items": [
{"text": "Item 1"},
{"text": "Item 2"}
],
"list_type": "numbered"
}
],
"order": 0
},
{
"id": "section_table_example",
"content_type": "table",
"elements": [
{
"headers": ["Column 1", "Column 2"],
"rows": [
["Row 1 Col 1", "Row 1 Col 2"],
["Row 2 Col 1", "Row 2 Col 2"]
],
"caption": "Table caption"
}
],
"order": 0
},
{
"id": "section_code_example",
"content_type": "code",
"elements": [
{
"code... [truncated]
================================================================================

View file

@ -1,200 +0,0 @@
API Response
Length: 6082 chars
================================================================================
{
"metadata": {
"split_strategy": "single_document",
"source_documents": [],
"extraction_method": "ai_generation"
},
"documents": [
{
"id": "doc_1",
"title": "List of First 1000 Prime Numbers",
"filename": "prime_numbers.json",
"sections": [
{
"id": "section_prime_numbers",
"content_type": "list",
"elements": [
{
"items": [
{"number": 2},
{"number": 3},
{"number": 5},
{"number": 7},
{"number": 11},
{"number": 13},
{"number": 17},
{"number": 19},
{"number": 23},
{"number": 29},
{"number": 31},
{"number": 37},
{"number": 41},
{"number": 43},
{"number": 47},
{"number": 53},
{"number": 59},
{"number": 61},
{"number": 67},
{"number": 71},
{"number": 73},
{"number": 79},
{"number": 83},
{"number": 89},
{"number": 97},
{"number": 101},
{"number": 103},
{"number": 107},
{"number": 109},
{"number": 113},
{"number": 127},
{"number": 131},
{"number": 137},
{"number": 139},
{"number": 149},
{"number": 151},
{"number": 157},
{"number": 163},
{"number": 167},
{"number": 173},
{"number": 179},
{"number": 181},
{"number": 191},
{"number": 193},
{"number": 197},
{"number": 199},
{"number": 211},
{"number": 223},
{"number": 227},
{"number": 229},
{"number": 233},
{"number": 239},
{"number": 241},
{"number": 251},
{"number": 257},
{"number": 263},
{"number": 269},
{"number": 271},
{"number": 277},
{"number": 281},
{"number": 283},
{"number": 293},
{"number": 307},
{"number": 311},
{"number": 313},
{"number": 317},
{"number": 331},
{"number": 337},
{"number": 347},
{"number": 349},
{"number": 353},
{"number": 359},
{"number": 367},
{"number": 373},
{"number": 379},
{"number": 383},
{"number": 389},
{"number": 397},
{"number": 401},
{"number": 409},
{"number": 419},
{"number": 421},
{"number": 431},
{"number": 433},
{"number": 439},
{"number": 443},
{"number": 449},
{"number": 457},
{"number": 461},
{"number": 463},
{"number": 467},
{"number": 479},
{"number": 487},
{"number": 491},
{"number": 499},
{"number": 503},
{"number": 509},
{"number": 521},
{"number": 523},
{"number": 541},
{"number": 547},
{"number": 557},
{"number": 563},
{"number": 569},
{"number": 571},
{"number": 577},
{"number": 587},
{"number": 593},
{"number": 599},
{"number": 601},
{"number": 607},
{"number": 613},
{"number": 617},
{"number": 619},
{"number": 631},
{"number": 641},
{"number": 643},
{"number": 647},
{"number": 653},
{"number": 659},
{"number": 661},
{"number": 673},
{"number": 677},
{"number": 683},
{"number": 691},
{"number": 701},
{"number": 709},
{"number": 719},
{"number": 727},
{"number": 733},
{"number": 739},
{"number": 743},
{"number": 751},
{"number": 757},
{"number": 761},
{"number": 769},
{"number": 773},
{"number": 787},
{"number": 797},
{"number": 809},
{"number": 811},
{"number": 821},
{"number": 823},
{"number": 827},
{"number": 829},
{"number": 839},
{"number": 853},
{"number": 857},
{"number": 859},
{"number": 863},
{"number": 877},
{"number": 881},
{"number": 883},
{"number": 887},
{"number": 907},
{"number": 911},
{"number": 919},
{"number": 929},
{"number": 937},
{"number": 941},
{"number": 947},
{"number": 953},
{"number": 967},
{"number": 971},
{"number": 977},
{"number": 983},
{"number": 991},
{"number": 997}
],
"list_type": "numbered"
}
],
"order": 0
}
]
}
]
}
================================================================================

View file

@ -1573,13 +1573,7 @@ class ChatObjects:
executionLog["workflowId"] = workflow.id
executionLog["status"] = "completed"
executionLog["messages"].append(f"Workflow {workflow.id} started successfully")
# Also store plan in module-level cache as backup (keyed by workflow ID)
from modules.workflows.processing.modes import modeAutomation
if not hasattr(modeAutomation, '_templatePlanCache'):
modeAutomation._templatePlanCache = {}
modeAutomation._templatePlanCache[workflow.id] = plan
logger.info(f"Stored template plan for workflow {workflow.id} (cache + prompt) with {len(plan.get('tasks', []))} tasks")
logger.info(f"Started workflow {workflow.id} with plan containing {len(plan.get('tasks', []))} tasks (plan embedded in userInput)")
# Update automation with execution log
executionLogs = automation.get("executionLogs", [])

File diff suppressed because it is too large Load diff

View file

@ -37,16 +37,27 @@ class TaskPlanner:
# Check workflow status before calling AI service
checkWorkflowStopped(self.services)
# Analyze user intent to obtain cleaned user objective for planning
# This intent will be reused for workflow-level validation in executeTask
from modules.workflows.processing.adaptive import IntentAnalyzer
intentAnalyzer = IntentAnalyzer(self.services)
workflowIntent = await intentAnalyzer.analyzeUserIntent(actualUserPrompt, None)
# Store workflow intent for reuse in executeTask (avoid redundant analysis)
if not hasattr(workflow, '_workflowIntent'):
workflow._workflowIntent = workflowIntent
cleanedObjective = workflowIntent.get('primaryGoal', actualUserPrompt) if isinstance(workflowIntent, dict) else actualUserPrompt
# SKIP intent analysis for AUTOMATION mode - it uses predefined JSON plans
from modules.datamodels.datamodelChat import WorkflowModeEnum
workflowMode = getattr(workflow, 'workflowMode', None)
skipIntentionAnalysis = (workflowMode == WorkflowModeEnum.WORKFLOW_AUTOMATION)
if skipIntentionAnalysis:
logger.info("Skipping intent analysis for AUTOMATION mode - using direct user input")
# For automation mode, use user input directly without intent analysis
cleanedObjective = actualUserPrompt
workflowIntent = None
else:
# This intent will be reused for workflow-level validation in executeTask
from modules.workflows.processing.adaptive import IntentAnalyzer
intentAnalyzer = IntentAnalyzer(self.services)
workflowIntent = await intentAnalyzer.analyzeUserIntent(actualUserPrompt, None)
# Store workflow intent for reuse in executeTask (avoid redundant analysis)
if not hasattr(workflow, '_workflowIntent'):
workflow._workflowIntent = workflowIntent
cleanedObjective = workflowIntent.get('primaryGoal', actualUserPrompt) if isinstance(workflowIntent, dict) else actualUserPrompt
# Create proper context object for task planning using cleaned intent
# For task planning, we need to create a minimal TaskStep since TaskContext requires it

View file

@ -26,51 +26,39 @@ class AutomationMode(BaseMode):
async def generateTaskPlan(self, userInput: str, workflow: ChatWorkflow) -> TaskPlan:
"""
Generate task plan from stored template plan (no AI planning needed).
The plan is stored in module-level cache by executeAutomation.
Generate task plan from JSON plan in userInput (no AI planning needed).
AUTOMATION mode ALWAYS requires a JSON plan to be provided in the user input.
The plan can be:
- Embedded between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END-->
- Or as direct JSON in userInput
"""
try:
# Get plan from module-level cache (stored by executeAutomation)
# AUTOMATION mode ALWAYS requires a JSON plan to be provided in userInput
# Try to extract plan from userInput (embedded JSON or direct JSON)
templatePlan = None
if hasattr(self, '_templatePlanCache') and self._templatePlanCache:
templatePlan = self._templatePlanCache.get(workflow.id)
# Try module-level cache
if not templatePlan:
try:
from modules.workflows.processing.modes import modeAutomation
if hasattr(modeAutomation, '_templatePlanCache'):
templatePlan = modeAutomation._templatePlanCache.get(workflow.id)
if templatePlan:
logger.info(f"Retrieved template plan from module cache for workflow {workflow.id}")
except Exception as e:
logger.warning(f"Could not access module cache: {str(e)}")
if not templatePlan:
# Fallback: Extract from prompt (embedded as JSON comment)
try:
# Look for embedded plan in prompt (between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END-->)
startMarker = "<!--TEMPLATE_PLAN_START-->"
endMarker = "<!--TEMPLATE_PLAN_END-->"
startIdx = userInput.find(startMarker)
endIdx = userInput.find(endMarker)
if startIdx >= 0 and endIdx > startIdx:
planJson = userInput[startIdx + len(startMarker):endIdx].strip()
templatePlan = json.loads(planJson)
logger.info("Extracted template plan from embedded JSON in prompt")
elif '{' in userInput and '"tasks"' in userInput:
# Try parsing entire userInput as JSON (fallback)
jsonStart = userInput.find('{')
jsonEnd = userInput.rfind('}') + 1
if jsonStart >= 0 and jsonEnd > jsonStart:
templatePlan = json.loads(userInput[jsonStart:jsonEnd])
logger.info("Parsed template plan from userInput JSON (fallback)")
else:
raise ValueError("No template plan found in cache or prompt")
except (json.JSONDecodeError, ValueError) as e:
logger.error(f"Could not parse template plan: {str(e)}")
raise ValueError(f"Template mode requires a predefined plan, but none was found: {str(e)}")
try:
# Look for embedded plan in prompt (between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END-->)
startMarker = "<!--TEMPLATE_PLAN_START-->"
endMarker = "<!--TEMPLATE_PLAN_END-->"
startIdx = userInput.find(startMarker)
endIdx = userInput.find(endMarker)
if startIdx >= 0 and endIdx > startIdx:
planJson = userInput[startIdx + len(startMarker):endIdx].strip()
templatePlan = json.loads(planJson)
logger.info("Extracted template plan from embedded JSON in prompt")
elif '{' in userInput and '"tasks"' in userInput:
# Try parsing entire userInput as JSON (fallback)
jsonStart = userInput.find('{')
jsonEnd = userInput.rfind('}') + 1
if jsonStart >= 0 and jsonEnd > jsonStart:
templatePlan = json.loads(userInput[jsonStart:jsonEnd])
logger.info("Parsed template plan from userInput JSON (fallback)")
else:
raise ValueError("No template plan found in userInput. AUTOMATION mode requires a JSON plan to be provided in the user input.")
except (json.JSONDecodeError, ValueError) as e:
logger.error(f"Could not parse template plan: {str(e)}")
raise ValueError(f"AUTOMATION mode requires a predefined JSON plan with 'tasks' array, but none was found. Please provide the plan in the user input (embedded between <!--TEMPLATE_PLAN_START--> and <!--TEMPLATE_PLAN_END--> or as direct JSON). Error: {str(e)}")
logger.info(f"Using template plan with {len(templatePlan.get('tasks', []))} tasks")
@ -109,15 +97,6 @@ class AutomationMode(BaseMode):
logger.info(f"Generated task plan from template with {len(tasks)} tasks")
# Clean up cache after retrieving plan (prevent memory leaks)
try:
from modules.workflows.processing.modes import modeAutomation
if hasattr(modeAutomation, '_templatePlanCache') and workflow.id in modeAutomation._templatePlanCache:
del modeAutomation._templatePlanCache[workflow.id]
logger.debug(f"Cleaned up template plan cache for workflow {workflow.id}")
except Exception as e:
logger.warning(f"Could not clean up template plan cache: {str(e)}")
return taskPlan
except Exception as e:

View file

@ -204,118 +204,129 @@ class WorkflowManager:
}
# Analyze the user's input to detect language, normalize request, extract intent, and offload bulky context into documents
# SKIP user intention analysis for AUTOMATION mode - it uses predefined JSON plans
createdDocs = []
workflowMode = getattr(workflow, 'workflowMode', None)
skipIntentionAnalysis = (workflowMode == WorkflowModeEnum.WORKFLOW_AUTOMATION)
try:
analyzerPrompt = (
"You are an input analyzer. From the user's message, perform ALL of the following in one pass:\n"
"1) detectedLanguage: detect ISO 639-1 language code (e.g., de, en).\n"
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details.\n"
"3) intent: concise single-paragraph core request in the detected language for high-level routing.\n"
"4) contextItems: supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content.\n\n"
"Rules:\n"
"- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained.\n"
"- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear.\n"
"- Preserve critical references (URLs, filenames) in intent.\n"
"- Normalize to the primary detected language if mixed-language.\n\n"
"Return ONLY JSON (no markdown) with this shape:\n"
"{\n"
" \"detectedLanguage\": \"de|en|fr|it|...\",\n"
" \"normalizedRequest\": \"Full explicit instruction in detected language\",\n"
" \"intent\": \"Concise normalized request...\",\n"
" \"contextItems\": [\n"
" {\n"
" \"title\": \"User context 1\",\n"
" \"mimeType\": \"text/plain\",\n"
" \"content\": \"Full extracted content block here\"\n"
" }\n"
" ]\n"
"}\n\n"
f"User message:\n{self.services.utils.sanitizePromptContent(userInput.prompt, 'userinput')}"
)
# Call AI analyzer (planning call - will use static parameters)
aiResponse = await self.services.ai.callAiPlanning(
prompt=analyzerPrompt,
placeholders=None,
debugType="userintention"
)
if skipIntentionAnalysis:
logger.info("Skipping user intention analysis for AUTOMATION mode - using direct user input")
# For automation mode, use user input directly without AI analysis
self.services.currentUserPrompt = userInput.prompt
detectedLanguage = None
normalizedRequest = None
intentText = userInput.prompt
contextItems = []
# Parse analyzer response (JSON expected)
else:
try:
jsonStart = aiResponse.find('{') if aiResponse else -1
jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0
if jsonStart != -1 and jsonEnd > jsonStart:
parsed = json.loads(aiResponse[jsonStart:jsonEnd])
detectedLanguage = parsed.get('detectedLanguage') or None
normalizedRequest = parsed.get('normalizedRequest') or None
if parsed.get('intent'):
intentText = parsed.get('intent')
contextItems = parsed.get('contextItems') or []
except Exception:
analyzerPrompt = (
"You are an input analyzer. From the user's message, perform ALL of the following in one pass:\n"
"1) detectedLanguage: detect ISO 639-1 language code (e.g., de, en).\n"
"2) normalizedRequest: full, explicit restatement of the user's request in the detected language; do NOT summarize; preserve ALL constraints and details.\n"
"3) intent: concise single-paragraph core request in the detected language for high-level routing.\n"
"4) contextItems: supportive data blocks to attach as separate documents if significantly larger than the intent (large literal content, long lists/tables, code/JSON blocks, transcripts, CSV fragments, detailed specs). Keep URLs in the intent unless they embed large pasted content.\n\n"
"Rules:\n"
"- If total content (intent + data) is < 10% of model max tokens, do not extract; return empty contextItems and keep intent compact and self-contained.\n"
"- If content exceeds that threshold, move bulky parts into contextItems; keep intent short and clear.\n"
"- Preserve critical references (URLs, filenames) in intent.\n"
"- Normalize to the primary detected language if mixed-language.\n\n"
"Return ONLY JSON (no markdown) with this shape:\n"
"{\n"
" \"detectedLanguage\": \"de|en|fr|it|...\",\n"
" \"normalizedRequest\": \"Full explicit instruction in detected language\",\n"
" \"intent\": \"Concise normalized request...\",\n"
" \"contextItems\": [\n"
" {\n"
" \"title\": \"User context 1\",\n"
" \"mimeType\": \"text/plain\",\n"
" \"content\": \"Full extracted content block here\"\n"
" }\n"
" ]\n"
"}\n\n"
f"User message:\n{self.services.utils.sanitizePromptContent(userInput.prompt, 'userinput')}"
)
# Call AI analyzer (planning call - will use static parameters)
aiResponse = await self.services.ai.callAiPlanning(
prompt=analyzerPrompt,
placeholders=None,
debugType="userintention"
)
detectedLanguage = None
normalizedRequest = None
intentText = userInput.prompt
contextItems = []
# Update services state
if detectedLanguage and isinstance(detectedLanguage, str):
self._setUserLanguage(detectedLanguage)
# Parse analyzer response (JSON expected)
try:
setattr(self.services, 'currentUserLanguage', detectedLanguage)
jsonStart = aiResponse.find('{') if aiResponse else -1
jsonEnd = aiResponse.rfind('}') + 1 if aiResponse else 0
if jsonStart != -1 and jsonEnd > jsonStart:
parsed = json.loads(aiResponse[jsonStart:jsonEnd])
detectedLanguage = parsed.get('detectedLanguage') or None
normalizedRequest = parsed.get('normalizedRequest') or None
if parsed.get('intent'):
intentText = parsed.get('intent')
contextItems = parsed.get('contextItems') or []
except Exception:
contextItems = []
# Update services state
if detectedLanguage and isinstance(detectedLanguage, str):
self._setUserLanguage(detectedLanguage)
try:
setattr(self.services, 'currentUserLanguage', detectedLanguage)
except Exception:
pass
self.services.currentUserPrompt = intentText or userInput.prompt
try:
if normalizedRequest:
setattr(self.services, 'currentUserPromptNormalized', normalizedRequest)
if contextItems is not None:
setattr(self.services, 'currentUserContextItems', contextItems)
except Exception:
pass
self.services.currentUserPrompt = intentText or userInput.prompt
try:
if normalizedRequest:
setattr(self.services, 'currentUserPromptNormalized', normalizedRequest)
if contextItems is not None:
setattr(self.services, 'currentUserContextItems', contextItems)
except Exception:
pass
# Create documents for context items
if contextItems and isinstance(contextItems, list):
for idx, item in enumerate(contextItems):
try:
title = item.get('title') if isinstance(item, dict) else None
mime = item.get('mimeType') if isinstance(item, dict) else None
content = item.get('content') if isinstance(item, dict) else None
if not content:
continue
fileName = (title or f"user_context_{idx+1}.txt").strip()
mimeType = (mime or "text/plain").strip()
# Create documents for context items
if contextItems and isinstance(contextItems, list):
for idx, item in enumerate(contextItems):
try:
title = item.get('title') if isinstance(item, dict) else None
mime = item.get('mimeType') if isinstance(item, dict) else None
content = item.get('content') if isinstance(item, dict) else None
if not content:
# Neutralize content before storing if neutralization is enabled
contentBytes = content.encode('utf-8')
contentBytes = await self._neutralizeContentIfEnabled(contentBytes, mimeType)
# Create file in component storage
fileItem = self.services.interfaceDbComponent.createFile(
name=fileName,
mimeType=mimeType,
content=contentBytes
)
# Persist file data
self.services.interfaceDbComponent.createFileData(fileItem.id, contentBytes)
# Collect file info
fileInfo = self.services.chat.getFileInfo(fileItem.id)
from modules.datamodels.datamodelChat import ChatDocument
doc = ChatDocument(
fileId=fileItem.id,
fileName=fileInfo.get("fileName", fileName) if fileInfo else fileName,
fileSize=fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
mimeType=fileInfo.get("mimeType", mimeType) if fileInfo else mimeType
)
createdDocs.append(doc)
except Exception:
continue
fileName = (title or f"user_context_{idx+1}.txt").strip()
mimeType = (mime or "text/plain").strip()
# Neutralize content before storing if neutralization is enabled
contentBytes = content.encode('utf-8')
contentBytes = await self._neutralizeContentIfEnabled(contentBytes, mimeType)
# Create file in component storage
fileItem = self.services.interfaceDbComponent.createFile(
name=fileName,
mimeType=mimeType,
content=contentBytes
)
# Persist file data
self.services.interfaceDbComponent.createFileData(fileItem.id, contentBytes)
# Collect file info
fileInfo = self.services.chat.getFileInfo(fileItem.id)
from modules.datamodels.datamodelChat import ChatDocument
doc = ChatDocument(
fileId=fileItem.id,
fileName=fileInfo.get("fileName", fileName) if fileInfo else fileName,
fileSize=fileInfo.get("size", len(contentBytes)) if fileInfo else len(contentBytes),
mimeType=fileInfo.get("mimeType", mimeType) if fileInfo else mimeType
)
createdDocs.append(doc)
except Exception:
continue
except Exception as e:
logger.warning(f"Prompt analysis failed or skipped: {str(e)}")
except Exception as e:
logger.warning(f"Prompt analysis failed or skipped: {str(e)}")
# Process user-uploaded documents (fileIds) and combine with context documents
if userInput.listFileId: