345 lines
13 KiB
Python
345 lines
13 KiB
Python
"""
|
|
Placeholder-based prompt factory for dynamic AI calls.
|
|
This module provides prompt templates with placeholders that can be filled dynamically.
|
|
"""
|
|
|
|
import json
|
|
from typing import Dict, Any
|
|
from modules.workflows.processing.promptFactory import (
|
|
_getAvailableDocuments,
|
|
_getPreviousRoundContext,
|
|
getMethodsList,
|
|
getEnhancedDocumentContext,
|
|
_getConnectionReferenceList,
|
|
methods
|
|
)
|
|
|
|
|
|
def createTaskPlanningPromptTemplate() -> str:
|
|
"""Create task planning prompt template with placeholders."""
|
|
return """You are a task planning AI that breaks down user requests into logical, executable task steps.
|
|
|
|
USER REQUEST:
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
AVAILABLE DOCUMENTS:
|
|
{{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
PREVIOUS WORKFLOW ROUNDS:
|
|
{{KEY:WORKFLOW_HISTORY}}
|
|
|
|
TASK PLANNING RULES:
|
|
- COMBINE related activities into single tasks to avoid fragmentation
|
|
- Focus on business value and meaningful outcomes
|
|
- Keep tasks at appropriate abstraction level (not implementation details)
|
|
- Each task should produce usable results for subsequent tasks
|
|
- If retry request, analyze previous rounds to understand what failed
|
|
|
|
REQUIRED JSON STRUCTURE:
|
|
{{
|
|
"overview": "Brief description of the overall plan",
|
|
"languageUserDetected": "en",
|
|
"userMessage": "User-friendly message explaining the task plan",
|
|
"tasks": [
|
|
{{
|
|
"id": "task_1",
|
|
"objective": "Clear business objective combining related activities",
|
|
"dependencies": ["task_0"],
|
|
"success_criteria": ["measurable criteria 1", "measurable criteria 2"],
|
|
"estimated_complexity": "low|medium|high",
|
|
"userMessage": "What this task will accomplish"
|
|
}}
|
|
]
|
|
}}
|
|
|
|
RESPONSE: Return ONLY the JSON object."""
|
|
|
|
|
|
def createActionDefinitionPromptTemplate() -> str:
|
|
"""Create action definition prompt template with placeholders."""
|
|
return """You are an action planning AI that generates specific, executable actions for task steps.
|
|
|
|
TASK OBJECTIVE: {{KEY:USER_PROMPT}}
|
|
|
|
AVAILABLE DOCUMENTS: {{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
WORKFLOW HISTORY: {{KEY:WORKFLOW_HISTORY}}
|
|
|
|
AVAILABLE METHODS: {{KEY:AVAILABLE_METHODS}}
|
|
|
|
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
|
|
|
|
ACTION SELECTION RULES:
|
|
- Use document.generateReport for creating formatted documents (Word, PDF, Excel, etc.)
|
|
- Use ai.process for text analysis, Q&A, research, brainstorming (plain text only)
|
|
- Use web.search for external information gathering
|
|
- Use document.extract for analyzing existing documents
|
|
- If no documents available, use web actions or create status reports
|
|
|
|
PARAMETER REQUIREMENTS:
|
|
- documentList must be a LIST of references from AVAILABLE DOCUMENTS
|
|
- Use specific, detailed prompts for document actions
|
|
- Include all necessary parameters for execution
|
|
- Reference previous action outputs using: "round{current_round}_task{current_task}_action{action_number}_{label}"
|
|
|
|
REQUIRED JSON STRUCTURE:
|
|
{{
|
|
"actions": [
|
|
{{
|
|
"method": "method_name",
|
|
"action": "action_name",
|
|
"parameters": {{}},
|
|
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
|
|
"description": "What this action accomplishes",
|
|
"userMessage": "User-friendly message in {{KEY:USER_LANGUAGE}}"
|
|
}}
|
|
]
|
|
}}
|
|
|
|
RESPONSE: Return ONLY the JSON object."""
|
|
|
|
|
|
def createActionSelectionPromptTemplate() -> str:
|
|
"""Create action selection prompt template with placeholders."""
|
|
return """Select exactly one action to advance the task.
|
|
|
|
OBJECTIVE: {{KEY:USER_PROMPT}}
|
|
AVAILABLE DOCUMENTS: {{KEY:AVAILABLE_DOCUMENTS}}
|
|
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
|
|
|
|
MINIMAL TOOL CATALOG (method -> action -> [parameterNames]):
|
|
{{KEY:AVAILABLE_METHODS}}
|
|
|
|
BUSINESS RULES:
|
|
- Pick exactly one action per step.
|
|
- Derive choice from objective and success criteria.
|
|
- Prefer user language.
|
|
- Keep it minimal; avoid provider specifics.
|
|
|
|
RESPONSE FORMAT (JSON only):
|
|
{{"action":{{"method":"web","name":"search"}}}}"""
|
|
|
|
|
|
def createActionParameterPromptTemplate() -> str:
|
|
"""Create action parameter prompt template with placeholders."""
|
|
return """Provide only the required parameters for this action.
|
|
|
|
SELECTED ACTION: {{KEY:SELECTED_ACTION}}
|
|
|
|
ACTION SIGNATURE: {{KEY:ACTION_SIGNATURE}}
|
|
|
|
OBJECTIVE: {{KEY:USER_PROMPT}}
|
|
|
|
AVAILABLE DOCUMENTS: {{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
AVAILABLE CONNECTIONS: {{KEY:AVAILABLE_CONNECTIONS}}
|
|
|
|
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
|
|
|
|
DOCUMENT REFERENCE TYPES:
|
|
- docItem: Reference to a single document (e.g., "docItem:uuid:filename.pdf")
|
|
- docList: Reference to a group of documents (e.g., "docList:msg_123:AnalysisResults")
|
|
- round{{round_number}}_task{{task_number}}_action{{action_number}}_{{label}}: Reference to resulting document list from previous action
|
|
|
|
CONNECTION REFERENCE TYPES:
|
|
- Use exact connection references from AVAILABLE CONNECTIONS (e.g., "conn_microsoft_123", "conn_sharepoint_456")
|
|
|
|
CRITICAL RULES:
|
|
- ONLY use exact document labels listed in AVAILABLE DOCUMENTS above
|
|
- ONLY use exact connection references from AVAILABLE CONNECTIONS
|
|
- For documentList parameters: Use docList references when you need multiple documents
|
|
- For documentList parameters: Use docItem references when you need specific documents
|
|
- For connectionReference parameters: Use the exact connection reference from AVAILABLE CONNECTIONS
|
|
- Return only the parameters object as JSON
|
|
- Include user language if relevant
|
|
- Avoid unnecessary fields; host applies defaults
|
|
- Use the ACTION SIGNATURE above to understand what parameters are required
|
|
- Convert the objective into appropriate parameter values as needed
|
|
|
|
RESPONSE FORMAT (JSON only):
|
|
{{"parameters":{{}}}}"""
|
|
|
|
|
|
def createRefinementPromptTemplate() -> str:
|
|
"""Create refinement prompt template with placeholders."""
|
|
return """Decide next step based on observation.
|
|
|
|
OBJECTIVE: {{KEY:USER_PROMPT}}
|
|
OBSERVATION:
|
|
{{KEY:REVIEW_CONTENT}}
|
|
|
|
RULES:
|
|
- If criteria are met or no further action helps, decide stop.
|
|
- Else decide continue.
|
|
|
|
RESPONSE FORMAT (JSON only):
|
|
{{"decision":"continue","reason":"Need more data"}}"""
|
|
|
|
|
|
def createResultReviewPromptTemplate() -> str:
|
|
"""Create result review prompt template with placeholders."""
|
|
return """You are a result validation AI that reviews task execution outcomes and determines success, retry needs, or failure.
|
|
|
|
TASK OBJECTIVE: {{KEY:USER_PROMPT}}
|
|
|
|
EXECUTION RESULTS:
|
|
{{KEY:REVIEW_CONTENT}}
|
|
|
|
VALIDATION CRITERIA:
|
|
- Review each action's success/failure status
|
|
- Check if required documents were produced
|
|
- Validate document quality and completeness
|
|
- Assess if success criteria were met
|
|
- Identify any missing or incomplete outputs
|
|
- Determine if retry would help or if task should be marked as failed
|
|
|
|
REQUIRED JSON STRUCTURE:
|
|
{{
|
|
"status": "success|retry|failed",
|
|
"reason": "Detailed explanation of the validation decision",
|
|
"improvements": ["specific improvement 1", "specific improvement 2"],
|
|
"quality_score": 8, // 1-10 scale
|
|
"met_criteria": ["criteria1", "criteria2"],
|
|
"unmet_criteria": ["criteria3", "criteria4"],
|
|
"confidence": 0.85, // 0.0-1.0 scale
|
|
"userMessage": "User-friendly message explaining the validation result"
|
|
}}
|
|
|
|
VALIDATION PRINCIPLES:
|
|
- Be thorough but fair in assessment
|
|
- Focus on business value and outcomes
|
|
- Consider both technical execution and business results
|
|
- Provide specific, actionable improvement suggestions
|
|
- Use quality scores to track progress across retries
|
|
- Clearly identify which success criteria were met vs. unmet
|
|
- Set appropriate confidence levels based on evidence quality
|
|
|
|
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
|
|
|
|
|
|
# Helper functions to extract content for placeholders
|
|
|
|
def extractUserPrompt(context) -> str:
|
|
"""Extract user prompt from context."""
|
|
if hasattr(context, 'task_step') and context.task_step:
|
|
return context.task_step.objective or 'No request specified'
|
|
return 'No request specified'
|
|
|
|
|
|
def extractAvailableDocuments(context) -> str:
|
|
"""Extract available documents from context."""
|
|
if hasattr(context, 'workflow') and context.workflow:
|
|
return _getAvailableDocuments(context.workflow)
|
|
return "No documents available"
|
|
|
|
|
|
def extractWorkflowHistory(service, context) -> str:
|
|
"""Extract workflow history from context."""
|
|
if hasattr(context, 'workflow') and context.workflow:
|
|
return _getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
|
|
return "No previous workflow rounds - this is the first round."
|
|
|
|
|
|
def extractAvailableMethods(service) -> str:
|
|
"""Extract available methods for action planning."""
|
|
methodList = getMethodsList(service)
|
|
method_actions = {}
|
|
for sig in methodList:
|
|
if '.' in sig:
|
|
method, rest = sig.split('.', 1)
|
|
action = rest.split('(')[0]
|
|
method_actions.setdefault(method, []).append((action, sig))
|
|
|
|
# Create a structured JSON format for better AI parsing
|
|
available_methods_json = {}
|
|
for method, actions in method_actions.items():
|
|
available_methods_json[method] = {}
|
|
# Get the method instance for accessing docstrings
|
|
method_instance = methods.get(method, {}).get('instance') if methods else None
|
|
|
|
for action, sig in actions:
|
|
# Get the main action description (not parameters) for Step 1 action selection
|
|
action_description = ""
|
|
|
|
# Get the actual function's docstring
|
|
if method_instance and hasattr(method_instance, action):
|
|
func = getattr(method_instance, action)
|
|
if hasattr(func, '__doc__') and func.__doc__:
|
|
docstring = func.__doc__
|
|
|
|
# Extract main description (everything before "Parameters:")
|
|
lines = docstring.split('\n')
|
|
description_lines = []
|
|
for line in lines:
|
|
line = line.strip()
|
|
if line.startswith('Parameters:'):
|
|
break
|
|
if line and not line.startswith('@'):
|
|
description_lines.append(line)
|
|
|
|
action_description = ' '.join(description_lines).strip()
|
|
|
|
# If no description found, create a basic one
|
|
if not action_description:
|
|
action_description = f"Execute {method}.{action} action"
|
|
|
|
available_methods_json[method][action] = action_description
|
|
|
|
return json.dumps(available_methods_json, indent=2, ensure_ascii=False)
|
|
|
|
|
|
def extractUserLanguage(service) -> str:
|
|
"""Extract user language from service."""
|
|
return service.user.language if service and service.user else 'en'
|
|
|
|
|
|
def extractReviewContent(context) -> str:
|
|
"""Extract review content from context with full document metadata."""
|
|
if hasattr(context, 'action_results') and context.action_results:
|
|
# Build result summary
|
|
result_summary = ""
|
|
for i, result in enumerate(context.action_results):
|
|
result_summary += f"\nRESULT {i+1}:\n"
|
|
result_summary += f" Success: {result.success}\n"
|
|
if result.error:
|
|
result_summary += f" Error: {result.error}\n"
|
|
|
|
if result.documents:
|
|
result_summary += f" Documents: {len(result.documents)} document(s)\n"
|
|
for doc in result.documents:
|
|
# Extract all available metadata without content
|
|
doc_metadata = {
|
|
"name": getattr(doc, 'documentName', 'Unknown'),
|
|
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
|
|
"size": getattr(doc, 'size', 'Unknown'),
|
|
"created": getattr(doc, 'created', 'Unknown'),
|
|
"modified": getattr(doc, 'modified', 'Unknown'),
|
|
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
|
|
"documentId": getattr(doc, 'documentId', 'Unknown'),
|
|
"reference": getattr(doc, 'reference', 'Unknown')
|
|
}
|
|
# Remove 'Unknown' values to keep it clean
|
|
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
|
|
result_summary += f" - {json.dumps(doc_metadata, indent=6, ensure_ascii=False)}\n"
|
|
else:
|
|
result_summary += f" Documents: None\n"
|
|
|
|
return result_summary
|
|
elif hasattr(context, 'observation') and context.observation:
|
|
# For observation data, show full content but handle documents specially
|
|
if isinstance(context.observation, dict):
|
|
# Create a copy to modify
|
|
obs_copy = context.observation.copy()
|
|
|
|
# If there are previews with documents, show only metadata
|
|
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
|
|
for preview in obs_copy['previews']:
|
|
if isinstance(preview, dict) and 'snippet' in preview:
|
|
# Replace snippet with metadata indicator
|
|
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
|
|
|
|
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
|
|
else:
|
|
return json.dumps(context.observation, ensure_ascii=False)
|
|
else:
|
|
return "No review content available"
|