241 lines
8.3 KiB
Python
241 lines
8.3 KiB
Python
"""
|
|
Actionplan Mode Prompt Generation
|
|
Handles prompt templates and extraction functions for actionplan mode action handling.
|
|
"""
|
|
|
|
import logging
|
|
from typing import Dict, Any, List
|
|
from modules.datamodels.datamodelChat import PromptBundle, PromptPlaceholder
|
|
from modules.workflows.processing.shared.placeholderFactory import (
|
|
extractUserPrompt,
|
|
extractAvailableDocumentsSummary,
|
|
extractWorkflowHistory,
|
|
extractAvailableMethods,
|
|
extractUserLanguage,
|
|
extractAvailableConnectionsIndex,
|
|
extractReviewContent,
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def generateActionDefinitionPrompt(services, context: Any) -> PromptBundle:
|
|
"""Define placeholders first, then the template; return PromptBundle."""
|
|
placeholders: List[PromptPlaceholder] = [
|
|
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
|
|
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
|
|
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
|
|
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services, context), summaryAllowed=True),
|
|
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
|
|
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
|
|
]
|
|
|
|
template = """# Action Definition
|
|
|
|
Generate the next action to advance toward completing the task objective.
|
|
|
|
## 📋 Context
|
|
|
|
### User Language
|
|
{{KEY:USER_LANGUAGE}}
|
|
|
|
### Task Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Available Documents
|
|
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
|
|
|
|
### Available Connections
|
|
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
|
|
|
|
### Workflow History
|
|
{{KEY:WORKFLOW_HISTORY}}
|
|
|
|
### Available Methods
|
|
{{KEY:AVAILABLE_METHODS}}
|
|
|
|
## ⚠️ RULES
|
|
|
|
### Action Names
|
|
- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
|
|
- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
|
|
- **DO NOT separate** method and action names - use the full compound name
|
|
|
|
### Parameter Guidelines
|
|
- **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
|
|
- **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
|
|
- **Include user language** if relevant
|
|
- **Avoid unnecessary fields** - host applies defaults
|
|
|
|
## 📊 Required JSON Structure
|
|
|
|
```json
|
|
{
|
|
"actions": [
|
|
{
|
|
"action": "method.action_name",
|
|
"parameters": {},
|
|
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
|
|
"description": "What this action accomplishes",
|
|
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}'"
|
|
}
|
|
],
|
|
"continuation": null
|
|
}
|
|
```
|
|
|
|
## ✅ Correct Example
|
|
|
|
```json
|
|
{
|
|
"actions": [
|
|
{
|
|
"action": "document.extract",
|
|
"parameters": {"documentList": ["docList:msg_123:results"]},
|
|
"resultLabel": "round1_task1_action1_extract_results",
|
|
"description": "Extract data from documents",
|
|
"userMessage": "Extracting data from documents"
|
|
}
|
|
],
|
|
"continuation": null
|
|
}
|
|
```
|
|
|
|
|
|
## 🎯 Action Planning Guidelines
|
|
|
|
### Method Selection
|
|
- **Choose appropriate method** based on task requirements
|
|
- **Consider available resources** (documents, connections)
|
|
- **Match method capabilities** to task objectives
|
|
|
|
### Parameter Design
|
|
- **Use ACTION SIGNATURE** to understand required parameters
|
|
- **Convert objective** into appropriate parameter values
|
|
- **Include all required parameters** for the action
|
|
|
|
### Result Labeling
|
|
- **Use descriptive labels** that explain what the action produces
|
|
- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
|
|
- **Make labels meaningful** for future reference
|
|
|
|
### User Messages
|
|
- **Write in user language:** '{{KEY:USER_LANGUAGE}}'
|
|
- **Explain what's happening** in user-friendly terms
|
|
- **Keep messages concise** but informative
|
|
|
|
## 🚀 Response Format
|
|
Return ONLY the JSON object with complete action objects. If you cannot complete the full response, set "continuation" to a brief description of what still needs to be generated. If you can complete the response, keep "continuation" as null.
|
|
LOOP_INSTRUCTION
|
|
"""
|
|
|
|
return PromptBundle(prompt=template, placeholders=placeholders)
|
|
|
|
def generateResultReviewPrompt(context: Any) -> PromptBundle:
|
|
"""Define placeholders first, then the template; return PromptBundle."""
|
|
placeholders: List[PromptPlaceholder] = [
|
|
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
|
|
PromptPlaceholder(label="REVIEW_CONTENT", content=extractReviewContent(context), summaryAllowed=True),
|
|
]
|
|
|
|
template = """# Result Review & Validation
|
|
|
|
Review task execution outcomes and determine success, retry needs, or failure.
|
|
|
|
## 📋 Context
|
|
|
|
### Task Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Execution Results
|
|
{{KEY:REVIEW_CONTENT}}
|
|
|
|
## 🔍 Validation Criteria
|
|
|
|
### Action Assessment
|
|
- **Review each action's success/failure status**
|
|
- **Check if required documents were produced**
|
|
- **Validate document quality and completeness**
|
|
- **Assess if success criteria were met**
|
|
- **Identify any missing or incomplete outputs**
|
|
|
|
### Decision Making
|
|
- **Determine if retry would help** or if task should be marked as failed
|
|
- **Consider business value** and user satisfaction
|
|
- **Evaluate technical execution** and results quality
|
|
|
|
## 📊 Required JSON Structure
|
|
|
|
```json
|
|
{
|
|
"status": "success|retry|failed",
|
|
"reason": "Detailed explanation of the validation decision",
|
|
"improvements": ["specific improvement 1", "specific improvement 2"],
|
|
"quality_score": 8,
|
|
"met_criteria": ["criteria1", "criteria2"],
|
|
"unmet_criteria": ["criteria3", "criteria4"],
|
|
"confidence": 0.85,
|
|
"userMessage": "User-friendly message explaining the validation result in language '{{KEY:USER_LANGUAGE}}'",
|
|
"continuation": null
|
|
}
|
|
```
|
|
|
|
## 🎯 Validation Principles
|
|
|
|
### Assessment Approach
|
|
- **Be thorough but fair** in assessment
|
|
- **Focus on business value** and outcomes
|
|
- **Consider both technical execution** and business results
|
|
- **Provide specific, actionable** improvement suggestions
|
|
|
|
### Quality Scoring
|
|
- **Use quality scores** to track progress across retries
|
|
- **Scale 1-10**: 1 = Poor, 5 = Average, 10 = Excellent
|
|
- **Consider completeness, accuracy, and usefulness**
|
|
|
|
### Criteria Evaluation
|
|
- **Clearly identify** which success criteria were met vs. unmet
|
|
- **List specific criteria** that were achieved
|
|
- **Note missing requirements** that need attention
|
|
|
|
### Confidence Levels
|
|
- **Set appropriate confidence levels** based on evidence quality
|
|
- **Scale 0.0-1.0**: 0.0 = No confidence, 1.0 = Complete confidence
|
|
- **Consider data quality** and result reliability
|
|
|
|
## 📝 Status Definitions
|
|
|
|
### Success
|
|
- **All objectives met** - User got what they asked for
|
|
- **Quality standards met** - Results are complete and accurate
|
|
- **No retry needed** - Task is fully complete
|
|
|
|
### Retry
|
|
- **Partial success** - Some but not all objectives met
|
|
- **Improvement possible** - Retry could lead to better results
|
|
- **Technical issues** - Action failures that can be resolved
|
|
|
|
### Failed
|
|
- **No progress made** - Objectives not achieved
|
|
- **Technical limitations** - Cannot be resolved with retry
|
|
- **Resource constraints** - Missing required inputs
|
|
|
|
## 💡 Improvement Suggestions
|
|
|
|
### Actionable Improvements
|
|
- **Be specific** - Don't just say "improve quality"
|
|
- **Focus on process** - How to do better next time
|
|
- **Consider resources** - What additional inputs might help
|
|
- **Technical fixes** - Address specific technical issues
|
|
|
|
### Examples
|
|
- "Use more specific document references from AVAILABLE_DOCUMENTS_INDEX"
|
|
- "Include user language parameter for better localization"
|
|
- "Break down complex objective into smaller, focused actions"
|
|
- "Verify document references before processing"
|
|
|
|
|
|
LOOP_INSTRUCTION
|
|
"""
|
|
|
|
return PromptBundle(prompt=template, placeholders=placeholders)
|
|
|