394 lines
19 KiB
Python
394 lines
19 KiB
Python
# Copyright (c) 2025 Patrick Motsch
|
|
# All rights reserved.
|
|
"""
|
|
Dynamic Mode Prompt Generation
|
|
Handles prompt templates for dynamic mode action handling.
|
|
"""
|
|
|
|
import json
|
|
from typing import Any, List
|
|
from modules.datamodels.datamodelChatbot import PromptBundle, PromptPlaceholder
|
|
from modules.workflows.processing.shared.placeholderFactory import (
|
|
extractUserPrompt,
|
|
extractUserLanguage,
|
|
extractAvailableMethods,
|
|
extractAvailableDocumentsSummary,
|
|
extractAvailableDocumentsIndex,
|
|
extractAvailableConnectionsIndex,
|
|
extractPreviousActionResults,
|
|
extractLearningsAndImprovements,
|
|
extractLatestRefinementFeedback,
|
|
extractWorkflowHistory,
|
|
)
|
|
from modules.workflows.processing.shared.methodDiscovery import methods, getActionParameterList
|
|
|
|
def generateDynamicPlanSelectionPrompt(services, context: Any, learningEngine=None) -> PromptBundle:
|
|
"""Define placeholders first, then the template; return PromptBundle."""
|
|
placeholders: List[PromptPlaceholder] = [
|
|
PromptPlaceholder(label="OVERALL_TASK_CONTEXT", content=services.currentUserPromptNormalized, summaryAllowed=False),
|
|
PromptPlaceholder(label="TASK_OBJECTIVE", content=context.taskStep.objective, summaryAllowed=False),
|
|
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
|
|
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
|
|
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
|
|
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
|
|
# Provide enriched history context for Stage 1 to craft parametersContext
|
|
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services), summaryAllowed=True),
|
|
# Provide deterministic indexes so the planner can choose exact labels
|
|
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_INDEX", content=extractAvailableDocumentsIndex(services, context), summaryAllowed=True),
|
|
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
|
|
]
|
|
|
|
# Add adaptive learning context if available
|
|
adaptiveContext = {}
|
|
if learningEngine:
|
|
workflowId = getattr(context, 'workflow_id', 'unknown')
|
|
userPrompt = extractUserPrompt(context)
|
|
adaptiveContext = learningEngine.getAdaptiveContextForActionSelection(workflowId, userPrompt)
|
|
|
|
if adaptiveContext:
|
|
# Add learning-aware placeholders
|
|
placeholders.extend([
|
|
PromptPlaceholder(label="ADAPTIVE_GUIDANCE", content=adaptiveContext.get('adaptiveGuidance', ''), summaryAllowed=True),
|
|
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
|
|
PromptPlaceholder(label="ESCALATION_LEVEL", content=adaptiveContext.get('escalationLevel', 'low'), summaryAllowed=False),
|
|
])
|
|
|
|
template = """Select exactly one next action to advance the task incrementally.
|
|
|
|
=== TASK ===
|
|
CONTEXT: {{KEY:OVERALL_TASK_CONTEXT}}
|
|
OBJECTIVE: {{KEY:TASK_OBJECTIVE}}
|
|
|
|
=== AVAILABLE RESOURCES ===
|
|
AVAILABLE_DOCUMENTS_INDEX: {{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
|
|
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
|
|
AVAILABLE_CONNECTIONS_INDEX:
|
|
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
|
|
|
|
=== AVAILABLE ACTIONS ===
|
|
{{KEY:AVAILABLE_METHODS}}
|
|
|
|
=== CONTEXT ===
|
|
HISTORY: {{KEY:WORKFLOW_HISTORY}}
|
|
GUIDANCE: {{KEY:ADAPTIVE_GUIDANCE}}
|
|
FAILURES: {{KEY:FAILURE_ANALYSIS}}
|
|
ESCALATION: {{KEY:ESCALATION_LEVEL}}
|
|
|
|
=== SELECTION RULE ===
|
|
1. Read OBJECTIVE and identify what it requires
|
|
2. Check AVAILABLE_METHODS to find action whose PURPOSE matches that requirement
|
|
3. Select action that can DO what objective needs - do not select actions that do something different
|
|
|
|
=== OUTPUT FORMAT ===
|
|
Return ONLY JSON (no markdown, no explanations). The chosen action MUST:
|
|
- Match the objective's requirement (verify action's purpose in AVAILABLE_METHODS)
|
|
- Be the next logical incremental step (not complete entire objective in one step)
|
|
- Target exactly one output format if producing files
|
|
- Use ONLY exact references from AVAILABLE_DOCUMENTS_INDEX (docList:... or docItem:...)
|
|
- ALWAYS use FULL document references with filename: docItem:<documentId>:<filename> (filename is required)
|
|
- Learn from previous validation feedback and avoid repeated mistakes
|
|
- Include intent analysis fields (dataType, expectedFormats, qualityRequirements, successCriteria)
|
|
|
|
{{
|
|
"action": "method.action_name",
|
|
"actionObjective": "...",
|
|
"dataType": "numbers|text|documents|analysis|code|unknown",
|
|
"expectedFormats": ["pdf", "docx", "xlsx", "txt", "json", "csv", "html", "md"],
|
|
"qualityRequirements": {{
|
|
"accuracyThreshold": 0.0-1.0,
|
|
"completenessThreshold": 0.0-1.0
|
|
}},
|
|
"successCriteria": ["specific criterion 1", "specific criterion 2"],
|
|
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}' explaining what this action will do (1 sentence, first person, friendly tone)",
|
|
"learnings": ["..."],
|
|
"requiredInputDocuments": ["docItem:<documentId>:<filename>", "docList:<label>"],
|
|
"requiredConnection": "connection:..." | null,
|
|
"parametersContext": "concise text that Stage 2 will use to set business parameters"
|
|
}}
|
|
|
|
=== INTENT ANALYSIS ===
|
|
Analyze actionObjective to determine:
|
|
- dataType: numbers|text|documents|analysis|code|unknown
|
|
- expectedFormats: array of format strings
|
|
- qualityRequirements: {accuracyThreshold: 0.0-1.0, completenessThreshold: 0.0-1.0}
|
|
- successCriteria: array of specific completion criteria
|
|
|
|
=== RULES ===
|
|
1. Use EXACT action names from AVAILABLE_METHODS
|
|
2. Do NOT output "parameters" object
|
|
3. parametersContext: short, sufficient for Stage 2
|
|
4. Return ONLY JSON - no markdown, no explanations
|
|
5. requiredInputDocuments: ONLY exact references from AVAILABLE_DOCUMENTS_INDEX (do not invent/modify)
|
|
- For individual documents: ALWAYS use docItem:<documentId>:<filename> format (include filename)
|
|
- For document lists: use docList:<label> format
|
|
- Copy references EXACTLY as shown in AVAILABLE_DOCUMENTS_INDEX (including filename)
|
|
6. requiredConnection: ONLY exact label from AVAILABLE_CONNECTIONS_INDEX
|
|
7. Plan incrementally: one output format per step
|
|
8. Learn from validation feedback - avoid repeating mistakes
|
|
9. If previous attempts failed, try alternative approaches
|
|
"""
|
|
|
|
return PromptBundle(prompt=template, placeholders=placeholders)
|
|
|
|
def generateDynamicParametersPrompt(services, context: Any, compoundActionName: str, learningEngine=None) -> PromptBundle:
|
|
"""Define placeholders first, then the template; return PromptBundle.
|
|
|
|
Minimal Stage 2 (no fallback): consumes actionObjective, selectedAction, parametersContext only.
|
|
Excludes documents/connections/history entirely.
|
|
"""
|
|
# derive method/action and parameter list
|
|
methodName, actionName = (compoundActionName.split('.', 1) if '.' in compoundActionName else (compoundActionName, ''))
|
|
actionParameterList = getActionParameterList(methodName, actionName, methods)
|
|
|
|
def _formatBusinessParameters(params) -> str:
|
|
excluded = {"documentList", "connectionReference"}
|
|
# Case 1: params is a list of dicts or objects with 'name'
|
|
if isinstance(params, (list, tuple)):
|
|
entries = []
|
|
for p in params:
|
|
try:
|
|
if isinstance(p, dict):
|
|
name = p.get("name")
|
|
if not name or name in excluded:
|
|
continue
|
|
ptype = p.get("type") or p.get("dataType") or ""
|
|
req = p.get("required")
|
|
reqTxt = "required" if (req is True or str(req).lower() == "true") else "optional"
|
|
desc = p.get("description") or p.get("desc") or ""
|
|
entry = f"- {name} ({ptype}, {reqTxt})" + (f": {desc}" if desc else "")
|
|
entries.append(entry)
|
|
else:
|
|
# Try attribute access
|
|
name = getattr(p, "name", None)
|
|
if not name or name in excluded:
|
|
continue
|
|
ptype = getattr(p, "type", "") or getattr(p, "dataType", "")
|
|
req = getattr(p, "required", False)
|
|
reqTxt = "required" if (req is True or str(req).lower() == "true") else "optional"
|
|
desc = getattr(p, "description", None) or getattr(p, "desc", None) or ""
|
|
entry = f"- {name} ({ptype}, {reqTxt})" + (f": {desc}" if desc else "")
|
|
entries.append(entry)
|
|
except Exception:
|
|
continue
|
|
return "\n".join(entries)
|
|
# Case 2: params is a string description: filter out lines mentioning excluded names
|
|
if isinstance(params, str):
|
|
lines = [ln for ln in params.splitlines() if not any(ex in ln for ex in excluded)]
|
|
return "\n".join(lines).strip()
|
|
# Fallback: plain string
|
|
try:
|
|
return str(params)
|
|
except Exception:
|
|
return ""
|
|
|
|
actionParametersText = _formatBusinessParameters(actionParameterList)
|
|
|
|
# determine action objective if available, else fall back to user prompt
|
|
if hasattr(context, 'actionObjective') and context.actionObjective:
|
|
actionObjective = context.actionObjective
|
|
elif hasattr(context, 'taskStep') and context.taskStep and getattr(context.taskStep, 'objective', None):
|
|
actionObjective = context.taskStep.objective
|
|
else:
|
|
actionObjective = extractUserPrompt(context)
|
|
|
|
# Minimal Stage 2 (no fallback)
|
|
parametersContext = getattr(context, 'parametersContext', None)
|
|
|
|
learningsText = ""
|
|
try:
|
|
# If Stage 1 learnings were attached to context, pass them textually
|
|
if hasattr(context, 'learnings') and context.learnings:
|
|
if isinstance(context.learnings, (list, tuple)):
|
|
learningsText = "\n".join(f"- {str(x)}" for x in context.learnings)
|
|
else:
|
|
learningsText = str(context.learnings)
|
|
except Exception:
|
|
learningsText = ""
|
|
|
|
placeholders: List[PromptPlaceholder] = [
|
|
PromptPlaceholder(label="OVERALL_TASK_CONTEXT", content=services.currentUserPromptNormalized, summaryAllowed=False),
|
|
PromptPlaceholder(label="ACTION_OBJECTIVE", content=actionObjective, summaryAllowed=False),
|
|
PromptPlaceholder(label="SELECTED_ACTION", content=compoundActionName, summaryAllowed=False),
|
|
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
|
|
PromptPlaceholder(label="PARAMETERS_CONTEXT", content=(parametersContext or ""), summaryAllowed=True),
|
|
PromptPlaceholder(label="ACTION_PARAMETERS", content=actionParametersText, summaryAllowed=False),
|
|
PromptPlaceholder(label="LEARNINGS", content=learningsText, summaryAllowed=True),
|
|
]
|
|
|
|
# Add adaptive learning context if available
|
|
adaptiveContext = {}
|
|
if learningEngine:
|
|
workflowId = getattr(context, 'workflow_id', 'unknown')
|
|
adaptiveContext = learningEngine.getAdaptiveContextForParameters(workflowId, compoundActionName, parametersContext or "")
|
|
|
|
if adaptiveContext:
|
|
placeholders.extend([
|
|
PromptPlaceholder(label="PARAMETER_GUIDANCE", content=adaptiveContext.get('parameterGuidance', ''), summaryAllowed=True),
|
|
PromptPlaceholder(label="ATTEMPT_NUMBER", content=str(adaptiveContext.get('attemptNumber', 1)), summaryAllowed=False),
|
|
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
|
|
])
|
|
|
|
template = """You are a parameter generator. Set the parameters for this specific action.
|
|
|
|
OVERALL TASK CONTEXT:
|
|
-----------------
|
|
{{KEY:OVERALL_TASK_CONTEXT}}
|
|
-----------------
|
|
|
|
THIS ACTION'S SPECIFIC OBJECTIVE:
|
|
-----------------
|
|
{{KEY:ACTION_OBJECTIVE}}
|
|
-----------------
|
|
|
|
SELECTED_ACTION:
|
|
{{KEY:SELECTED_ACTION}}
|
|
|
|
LEARNING-BASED PARAMETER GUIDANCE:
|
|
{{KEY:PARAMETER_GUIDANCE}}
|
|
|
|
ATTEMPT NUMBER: {{KEY:ATTEMPT_NUMBER}}
|
|
|
|
PREVIOUS FAILURE ANALYSIS:
|
|
{{KEY:FAILURE_ANALYSIS}}
|
|
|
|
REPLY (ONLY JSON):
|
|
{{
|
|
"schema": "parameters_v1",
|
|
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}' explaining what this action will do (1 sentence, first person, friendly tone)",
|
|
"parameters": {{
|
|
"paramName": "value"
|
|
}}
|
|
}}
|
|
|
|
|
|
CONTEXT FOR PARAMETER VALUES:
|
|
-----------------
|
|
{{KEY:PARAMETERS_CONTEXT}}
|
|
-----------------
|
|
|
|
LEARNINGS (from prior attempts, if any):
|
|
{{KEY:LEARNINGS}}
|
|
|
|
REQUIRED PARAMETERS FOR THIS ACTION (use these exact parameter names):
|
|
{{KEY:ACTION_PARAMETERS}}
|
|
|
|
COMPLETION CRITERIA:
|
|
- Describe what "complete" means for this action in natural language
|
|
- Consider: What should be delivered? What quality level is expected? What format should the output be in?
|
|
|
|
INSTRUCTIONS:
|
|
- Use ONLY the parameter names listed in section REQUIRED PARAMETERS FOR THIS ACTION
|
|
- Fill in appropriate values based on the OVERALL TASK CONTEXT and THIS ACTION'S SPECIFIC OBJECTIVE
|
|
- Consider the overall task context when setting parameter values to ensure they align with the complete user request
|
|
- Do NOT invent new parameters
|
|
- Do NOT include: documentList, connectionReference, history, documents, connections
|
|
- CRITICAL: Follow the learning-based parameter guidance above
|
|
- Learn from previous validation failures and adjust parameters accordingly
|
|
|
|
RULES:
|
|
- Return ONLY JSON (no markdown, no prose)
|
|
- Use ONLY the exact parameter names listed in REQUIRED PARAMETERS FOR THIS ACTION
|
|
- Do NOT add any parameters not listed above
|
|
- Do NOT add nested objects or custom fields
|
|
- Apply learning insights to avoid repeated parameter mistakes
|
|
"""
|
|
|
|
return PromptBundle(prompt=template, placeholders=placeholders)
|
|
|
|
def generateDynamicRefinementPrompt(services, context: Any, reviewContent: str) -> PromptBundle:
|
|
"""Define placeholders first, then the template; return PromptBundle.
|
|
|
|
Review is per TASK, not per user prompt. Each task is handled independently.
|
|
"""
|
|
# Get task objective - this is what we're reviewing against
|
|
taskObjective = ""
|
|
if hasattr(context, 'taskStep') and context.taskStep and getattr(context.taskStep, 'objective', None):
|
|
taskObjective = context.taskStep.objective
|
|
else:
|
|
# Fallback to user prompt if task objective not available
|
|
taskObjective = extractUserPrompt(context)
|
|
|
|
placeholders: List[PromptPlaceholder] = [
|
|
PromptPlaceholder(label="TASK_OBJECTIVE", content=taskObjective, summaryAllowed=False),
|
|
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
|
|
PromptPlaceholder(label="REVIEW_CONTENT", content=reviewContent, summaryAllowed=True),
|
|
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
|
|
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_INDEX", content=extractAvailableDocumentsIndex(services, context), summaryAllowed=True),
|
|
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
|
|
]
|
|
|
|
template = """TASK DECISION
|
|
|
|
=== TASK OBJECTIVE ===
|
|
{{KEY:TASK_OBJECTIVE}}
|
|
|
|
=== DECISION RULES ===
|
|
1. "continue" = objective NOT fulfilled → MUST specify next action
|
|
2. "success" = objective fulfilled
|
|
3. Return ONLY JSON - no other text
|
|
|
|
=== AVAILABLE RESOURCES ===
|
|
ACTIONS: {{KEY:AVAILABLE_METHODS}}
|
|
AVAILABLE_DOCUMENTS_INDEX: {{KEY:AVAILABLE_DOCUMENTS_INDEX}}
|
|
AVAILABLE_CONNECTIONS_INDEX:
|
|
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
|
|
|
|
{{KEY:REVIEW_CONTENT}}
|
|
|
|
=== NEXT ACTIONS ===
|
|
Follow the improvement suggestions from CONTENT VALIDATION in priority order. Each suggestion indicates what action to take next.
|
|
|
|
CRITICAL: Use structureComparison and gap information from CONTENT VALIDATION to determine what is MISSING:
|
|
- Check "structureComparison.found" vs "structureComparison.required" to see what's already delivered
|
|
- Check "structureComparison.gap" to see what's missing. If quantitative gaps are available, use them.
|
|
- Next action should ONLY generate the MISSING part, NOT repeat what's already delivered
|
|
|
|
CRITICAL - Missing Data Generation Strategy:
|
|
- When gap analysis shows missing data (found count = 0 but required count > 0):
|
|
* Generate the missing data FIRST as separate outputs before attempting integration
|
|
* Do NOT try to generate AND integrate missing data in one step - data must exist before integration
|
|
* Only AFTER missing data exists can you integrate it with existing data in a subsequent action
|
|
|
|
=== OUTPUT FORMAT ===
|
|
Return ONLY JSON (no markdown, no explanations). The decision MUST:
|
|
- Use ONLY exact references from AVAILABLE_DOCUMENTS_INDEX (docList:... or docItem:...)
|
|
- ALWAYS use FULL document references with filename: docItem:<documentId>:<filename> (filename is required)
|
|
- Use ONLY exact labels from AVAILABLE_CONNECTIONS_INDEX (connection:...)
|
|
- Provide concrete parameter values in nextActionParameters (not placeholders)
|
|
- Match parameter names exactly as defined in AVAILABLE_METHODS
|
|
|
|
{{
|
|
"status": "continue" | "success",
|
|
"reason": "Brief reason explaining why continuing or why task is complete",
|
|
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}' explaining the task status (1 sentence, first person, friendly tone)",
|
|
"nextAction": "Selected_action_from_ACTIONS" | null,
|
|
"nextActionParameters": {{
|
|
"documentList": ["docItem:<documentId>:<filename>", "docList:<label>"],
|
|
"connectionReference": "connection:reference_from_AVAILABLE_CONNECTIONS_INDEX",
|
|
"parameter1": "value1",
|
|
"parameter2": "value2"
|
|
}} | null,
|
|
"nextActionObjective": "Clear description of what this action will achieve based on improvement suggestions" | null
|
|
}}
|
|
|
|
=== RULES ===
|
|
1. Return ONLY JSON - no markdown, no explanations
|
|
2. userMessage: REQUIRED - Provide a user-friendly message in language '{{KEY:USER_LANGUAGE}}' explaining the task status (for "continue": explain what's being done next; for "success": explain what was accomplished)
|
|
3. If "continue": MUST provide nextAction and nextActionParameters
|
|
4. nextAction: SPECIFIC action from AVAILABLE_METHODS (do not invent)
|
|
5. nextActionParameters: concrete parameters (check AVAILABLE_METHODS for valid names)
|
|
6. documentList: ONLY exact references from AVAILABLE_DOCUMENTS_INDEX (do not invent/modify)
|
|
- For individual documents: ALWAYS use docItem:<documentId>:<filename> format (include filename)
|
|
- For document lists: use docList:<label> format
|
|
- Copy references EXACTLY as shown in AVAILABLE_DOCUMENTS_INDEX (including filename)
|
|
7. connectionReference: ONLY exact label from AVAILABLE_CONNECTIONS_INDEX (required if action needs connection)
|
|
8. nextActionObjective: describe what this action will achieve based on the FIRST improvement suggestion from CONTENT VALIDATION
|
|
9. CRITICAL: Use structureComparison.gap to specify the missing part in nextActionParameters
|
|
10. Do NOT repeat failed actions - suggest DIFFERENT approach
|
|
11. If ACTION HISTORY shows repeated actions, suggest a fundamentally different approach
|
|
12. nextActionObjective must directly address the highest priority improvement suggestion from CONTENT VALIDATION
|
|
13. If validation shows partial data delivered, next action should CONTINUE from where it stopped, not restart
|
|
|
|
"""
|
|
|
|
return PromptBundle(prompt=template, placeholders=placeholders)
|