gateway/modules/workflows/processing/shared/promptGenerationActionsDynamic.py
2025-11-19 09:52:34 +01:00

312 lines
14 KiB
Python

"""
Dynamic Mode Prompt Generation
Handles prompt templates for dynamic mode action handling.
"""
import json
from typing import Any, List
from modules.datamodels.datamodelChat import PromptBundle, PromptPlaceholder
from modules.workflows.processing.shared.placeholderFactory import (
extractUserPrompt,
extractUserLanguage,
extractAvailableMethods,
extractAvailableDocumentsSummary,
extractAvailableDocumentsIndex,
extractAvailableConnectionsIndex,
extractPreviousActionResults,
extractLearningsAndImprovements,
extractLatestRefinementFeedback,
extractWorkflowHistory,
extractOverallTaskContext,
extractTaskObjective,
)
from modules.workflows.processing.shared.methodDiscovery import methods, getActionParameterList
def generateDynamicPlanSelectionPrompt(services, context: Any, learningEngine=None) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="OVERALL_TASK_CONTEXT", content=extractOverallTaskContext(services, context), summaryAllowed=False),
PromptPlaceholder(label="TASK_OBJECTIVE", content=extractTaskObjective(context), summaryAllowed=False),
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
# Provide enriched history context for Stage 1 to craft parametersContext
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services), summaryAllowed=True),
# Provide deterministic indexes so the planner can choose exact labels
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_INDEX", content=extractAvailableDocumentsIndex(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
]
# Add adaptive learning context if available
adaptiveContext = {}
if learningEngine:
workflowId = getattr(context, 'workflow_id', 'unknown')
userPrompt = extractUserPrompt(context)
adaptiveContext = learningEngine.getAdaptiveContextForActionSelection(workflowId, userPrompt)
if adaptiveContext:
# Add learning-aware placeholders
placeholders.extend([
PromptPlaceholder(label="ADAPTIVE_GUIDANCE", content=adaptiveContext.get('adaptiveGuidance', ''), summaryAllowed=True),
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
PromptPlaceholder(label="ESCALATION_LEVEL", content=adaptiveContext.get('escalationLevel', 'low'), summaryAllowed=False),
])
template = """Select exactly one next action to advance the task incrementally.
OVERALL TASK CONTEXT:
{{KEY:OVERALL_TASK_CONTEXT}}
OBJECTIVE:
{{KEY:TASK_OBJECTIVE}}
AVAILABLE_DOCUMENTS_SUMMARY:
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
AVAILABLE_METHODS:
{{KEY:AVAILABLE_METHODS}}
WORKFLOW_HISTORY (reverse-chronological, enriched):
{{KEY:WORKFLOW_HISTORY}}
AVAILABLE_DOCUMENTS_INDEX:
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
AVAILABLE_CONNECTIONS_INDEX:
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
LEARNING-BASED GUIDANCE:
{{KEY:ADAPTIVE_GUIDANCE}}
FAILURE ANALYSIS:
{{KEY:FAILURE_ANALYSIS}}
ESCALATION LEVEL: {{KEY:ESCALATION_LEVEL}}
REPLY: Return ONLY a JSON object with the following structure (no comments, no extra text). The chosen action MUST:
- be the next logical incremental step toward fulfilling the objective
- not attempt to complete the entire objective in one step
- if producing files, target exactly one output format for this step
- reference ONLY existing document IDs/labels from AVAILABLE_DOCUMENTS_INDEX
- learn from previous validation feedback and avoid repeated mistakes
{{
"action": "method.action_name",
"actionObjective": "...",
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}' explaining what this action will do (1 sentence, first person, friendly tone)",
"learnings": ["..."],
"requiredInputDocuments": ["docList:..."],
"requiredConnection": "connection:..." | null,
"parametersContext": "concise text that Stage 2 will use to set business parameters"
}}
EXAMPLE how to assign references from AVAILABLE_DOCUMENTS_INDEX and AVAILABLE_CONNECTIONS_INDEX:
"requiredInputDocuments": ["docList:msg_47a7a578-e8f2-4ba8-ac66-0dbff40605e0:round8_task1_action1_results","docItem:5d8b7aee-b546-4487-b6a8-835c86f7b186:AI_Generated_Document_20251006-104256.docx"],
"requiredConnection": "connection:msft:p.motsch@valueon.ch",
RULES:
1. Use EXACT action names from AVAILABLE_METHODS
2. Do NOT output a "parameters" object
3. parametersContext must be short and sufficient for Stage 2
4. Return ONLY JSON - no markdown, no explanations
5. For requiredInputDocuments, use ONLY exact references from AVAILABLE_DOCUMENTS_INDEX (docList:... or docItem:...)
- DO NOT invent or modify Message IDs
- DO NOT create new references
- Copy references EXACTLY as shown in AVAILABLE_DOCUMENTS_INDEX
6. For requiredConnection, use ONLY an exact label from AVAILABLE_CONNECTIONS_INDEX
7. Plan incrementally: if the overall intent needs multiple output formats (e.g., CSV and HTML), choose one format in this step and leave the other(s) for subsequent steps
8. CRITICAL: Learn from previous validation feedback - avoid repeating the same mistakes
9. If previous attempts failed, consider alternative approaches or more specific parameters
"""
return PromptBundle(prompt=template, placeholders=placeholders)
def generateDynamicParametersPrompt(services, context: Any, compoundActionName: str, learningEngine=None) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle.
Minimal Stage 2 (no fallback): consumes actionObjective, selectedAction, parametersContext only.
Excludes documents/connections/history entirely.
"""
# derive method/action and parameter list
methodName, actionName = (compoundActionName.split('.', 1) if '.' in compoundActionName else (compoundActionName, ''))
actionParameterList = getActionParameterList(methodName, actionName, methods)
def _formatBusinessParameters(params) -> str:
excluded = {"documentList", "connectionReference"}
# Case 1: params is a list of dicts or objects with 'name'
if isinstance(params, (list, tuple)):
entries = []
for p in params:
try:
if isinstance(p, dict):
name = p.get("name")
if not name or name in excluded:
continue
ptype = p.get("type") or p.get("dataType") or ""
req = p.get("required")
reqTxt = "required" if (req is True or str(req).lower() == "true") else "optional"
desc = p.get("description") or p.get("desc") or ""
entry = f"- {name} ({ptype}, {reqTxt})" + (f": {desc}" if desc else "")
entries.append(entry)
else:
# Try attribute access
name = getattr(p, "name", None)
if not name or name in excluded:
continue
ptype = getattr(p, "type", "") or getattr(p, "dataType", "")
req = getattr(p, "required", False)
reqTxt = "required" if (req is True or str(req).lower() == "true") else "optional"
desc = getattr(p, "description", None) or getattr(p, "desc", None) or ""
entry = f"- {name} ({ptype}, {reqTxt})" + (f": {desc}" if desc else "")
entries.append(entry)
except Exception:
continue
return "\n".join(entries)
# Case 2: params is a string description: filter out lines mentioning excluded names
if isinstance(params, str):
lines = [ln for ln in params.splitlines() if not any(ex in ln for ex in excluded)]
return "\n".join(lines).strip()
# Fallback: plain string
try:
return str(params)
except Exception:
return ""
actionParametersText = _formatBusinessParameters(actionParameterList)
# determine action objective if available, else fall back to user prompt
if hasattr(context, 'actionObjective') and context.actionObjective:
actionObjective = context.actionObjective
elif hasattr(context, 'taskStep') and context.taskStep and getattr(context.taskStep, 'objective', None):
actionObjective = context.taskStep.objective
else:
actionObjective = extractUserPrompt(context)
# Minimal Stage 2 (no fallback)
parametersContext = getattr(context, 'parametersContext', None)
learningsText = ""
try:
# If Stage 1 learnings were attached to context, pass them textually
if hasattr(context, 'learnings') and context.learnings:
if isinstance(context.learnings, (list, tuple)):
learningsText = "\n".join(f"- {str(x)}" for x in context.learnings)
else:
learningsText = str(context.learnings)
except Exception:
learningsText = ""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="OVERALL_TASK_CONTEXT", content=extractOverallTaskContext(services, context), summaryAllowed=False),
PromptPlaceholder(label="ACTION_OBJECTIVE", content=actionObjective, summaryAllowed=False),
PromptPlaceholder(label="SELECTED_ACTION", content=compoundActionName, summaryAllowed=False),
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
PromptPlaceholder(label="PARAMETERS_CONTEXT", content=(parametersContext or ""), summaryAllowed=True),
PromptPlaceholder(label="ACTION_PARAMETERS", content=actionParametersText, summaryAllowed=False),
PromptPlaceholder(label="LEARNINGS", content=learningsText, summaryAllowed=True),
]
# Add adaptive learning context if available
adaptiveContext = {}
if learningEngine:
workflowId = getattr(context, 'workflow_id', 'unknown')
adaptiveContext = learningEngine.getAdaptiveContextForParameters(workflowId, compoundActionName, parametersContext or "")
if adaptiveContext:
placeholders.extend([
PromptPlaceholder(label="PARAMETER_GUIDANCE", content=adaptiveContext.get('parameterGuidance', ''), summaryAllowed=True),
PromptPlaceholder(label="ATTEMPT_NUMBER", content=str(adaptiveContext.get('attemptNumber', 1)), summaryAllowed=False),
PromptPlaceholder(label="FAILURE_ANALYSIS", content=json.dumps(adaptiveContext.get('failureAnalysis', {}), indent=2), summaryAllowed=True),
])
template = """You are a parameter generator. Set the parameters for this specific action.
OVERALL TASK CONTEXT:
-----------------
{{KEY:OVERALL_TASK_CONTEXT}}
-----------------
THIS ACTION'S SPECIFIC OBJECTIVE:
-----------------
{{KEY:ACTION_OBJECTIVE}}
-----------------
SELECTED_ACTION:
{{KEY:SELECTED_ACTION}}
LEARNING-BASED PARAMETER GUIDANCE:
{{KEY:PARAMETER_GUIDANCE}}
ATTEMPT NUMBER: {{KEY:ATTEMPT_NUMBER}}
PREVIOUS FAILURE ANALYSIS:
{{KEY:FAILURE_ANALYSIS}}
REPLY (ONLY JSON):
{{
"schema": "parameters_v1",
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}' explaining what this action will do (1 sentence, first person, friendly tone)",
"parameters": {{
"paramName": "value"
}}
}}
CONTEXT FOR PARAMETER VALUES:
-----------------
{{KEY:PARAMETERS_CONTEXT}}
-----------------
LEARNINGS (from prior attempts, if any):
{{KEY:LEARNINGS}}
REQUIRED PARAMETERS FOR THIS ACTION (use these exact parameter names):
{{KEY:ACTION_PARAMETERS}}
INSTRUCTIONS:
- Use ONLY the parameter names listed in section REQUIRED PARAMETERS FOR THIS ACTION
- Fill in appropriate values based on the OVERALL TASK CONTEXT and THIS ACTION'S SPECIFIC OBJECTIVE
- Consider the overall task context when setting parameter values to ensure they align with the complete user request
- Do NOT invent new parameters
- Do NOT include: documentList, connectionReference, history, documents, connections
- CRITICAL: Follow the learning-based parameter guidance above
- Learn from previous validation failures and adjust parameters accordingly
RULES:
- Return ONLY JSON (no markdown, no prose)
- Use ONLY the exact parameter names listed in REQUIRED PARAMETERS FOR THIS ACTION
- Do NOT add any parameters not listed above
- Do NOT add nested objects or custom fields
- Apply learning insights to avoid repeated parameter mistakes
"""
return PromptBundle(prompt=template, placeholders=placeholders)
def generateDynamicRefinementPrompt(services, context: Any, reviewContent: str) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
PromptPlaceholder(label="REVIEW_CONTENT", content=reviewContent, summaryAllowed=True),
]
template = """TASK DECISION
OBJECTIVE: '{{KEY:USER_PROMPT}}'
DECISION RULES:
1. "continue" = objective NOT fulfilled
2. "stop" = objective fulfilled
3. Return ONLY JSON - no other text
OUTPUT FORMAT (only JSON object to deliver):
{{
"decision": "continue",
"reason": "Brief reason for decision"
}}
OBSERVATION: {{KEY:REVIEW_CONTENT}}
"""
return PromptBundle(prompt=template, placeholders=placeholders)