660 lines
22 KiB
Python
660 lines
22 KiB
Python
"""
|
|
Placeholder-based prompt factory for dynamic AI calls.
|
|
This module provides prompt templates with placeholders that can be filled dynamically.
|
|
"""
|
|
|
|
import json
|
|
import logging
|
|
from typing import Dict, Any
|
|
|
|
logger = logging.getLogger(__name__)
|
|
from modules.workflows.processing.shared.promptFactory import (
|
|
getAvailableDocuments,
|
|
getPreviousRoundContext,
|
|
getMethodsList,
|
|
getEnhancedDocumentContext,
|
|
getConnectionReferenceList,
|
|
methods,
|
|
discoverMethods
|
|
)
|
|
|
|
|
|
def createTaskPlanningPromptTemplate() -> str:
|
|
"""Create task planning prompt template with placeholders."""
|
|
return """# Task Planning
|
|
|
|
Break down user requests into logical, executable task steps.
|
|
|
|
## 📋 Context
|
|
|
|
### User Request
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Available Documents
|
|
{{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
### Previous Workflow Rounds
|
|
{{KEY:WORKFLOW_HISTORY}}
|
|
|
|
## 📝 Task Planning Rules
|
|
|
|
### High-Level Focus
|
|
- **Create HIGH-LEVEL tasks** - one topic per task, not detailed implementation steps
|
|
- **Focus on DELIVERING** what the user asked for, not how to do it
|
|
- **Keep tasks simple** and focused on outcomes, not implementation details
|
|
- **Each task should produce** usable results for subsequent tasks
|
|
|
|
### Request Type Handling
|
|
- **DATA requests** (numbers, lists, calculations): Plan to deliver the actual data
|
|
- **DOCUMENT requests** (Word, PDF, Excel): Plan to create the formatted document
|
|
- **ANALYSIS requests**: Plan to analyze and deliver insights
|
|
|
|
### Retry Handling
|
|
- **If retry request**: Analyze previous rounds to understand what failed
|
|
- **Learn from mistakes**: Improve the plan based on previous failures
|
|
|
|
## 📊 Required JSON Structure
|
|
|
|
```json
|
|
{
|
|
"overview": "Brief description of the overall plan",
|
|
"languageUserDetected": "en",
|
|
"userMessage": "User-friendly message explaining the task plan",
|
|
"tasks": [
|
|
{
|
|
"id": "task_1",
|
|
"objective": "Clear business objective focusing on what to deliver",
|
|
"dependencies": ["task_0"],
|
|
"success_criteria": ["measurable criteria 1", "measurable criteria 2"],
|
|
"estimated_complexity": "low|medium|high",
|
|
"userMessage": "What this task will accomplish"
|
|
}
|
|
]
|
|
}
|
|
```
|
|
|
|
## 🎯 Task Structure Guidelines
|
|
|
|
### Task ID Format
|
|
- Use sequential numbering: `task_1`, `task_2`, `task_3`
|
|
- Keep IDs simple and clear
|
|
|
|
### Objective Writing
|
|
- **Focus on business value** - what will be delivered
|
|
- **Be specific** about the expected outcome
|
|
- **Avoid technical jargon** - use business language
|
|
|
|
### Dependencies
|
|
- **List prerequisite tasks** that must complete first
|
|
- **Use task IDs** from the same plan
|
|
- **Keep dependencies minimal** - avoid complex chains
|
|
|
|
### Success Criteria
|
|
- **Make them measurable** - specific, quantifiable outcomes
|
|
- **Focus on deliverables** - what the user will receive
|
|
- **Keep criteria realistic** - achievable within the task scope
|
|
|
|
### Complexity Estimation
|
|
- **Low**: Simple, straightforward tasks (1-2 actions)
|
|
- **Medium**: Moderate complexity (3-5 actions)
|
|
- **High**: Complex tasks requiring multiple steps (6+ actions)
|
|
|
|
## 🚀 Response Format
|
|
Return ONLY the JSON object."""
|
|
|
|
|
|
def createActionDefinitionPromptTemplate() -> str:
|
|
"""Create action definition prompt template with placeholders."""
|
|
return """# Action Definition
|
|
|
|
Generate the next action to advance toward completing the task objective.
|
|
|
|
## 📋 Context
|
|
|
|
### Task Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Available Documents
|
|
{{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
### Workflow History
|
|
{{KEY:WORKFLOW_HISTORY}}
|
|
|
|
### Available Methods
|
|
{{KEY:AVAILABLE_METHODS}}
|
|
|
|
### Available Connections
|
|
{{KEY:AVAILABLE_CONNECTIONS}}
|
|
|
|
### User Language
|
|
{{KEY:USER_LANGUAGE}}
|
|
|
|
## ⚠️ RULES
|
|
|
|
### Action Names
|
|
- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
|
|
- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
|
|
- **DO NOT separate** method and action names - use the full compound name
|
|
|
|
### Parameter Guidelines
|
|
- **Use exact document references** from AVAILABLE_DOCUMENTS
|
|
- **Use exact connection references** from AVAILABLE_CONNECTIONS
|
|
- **Include user language** if relevant
|
|
- **Avoid unnecessary fields** - host applies defaults
|
|
|
|
## 📊 Required JSON Structure
|
|
|
|
```json
|
|
{
|
|
"actions": [
|
|
{
|
|
"action": "method.action_name",
|
|
"parameters": {},
|
|
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
|
|
"description": "What this action accomplishes",
|
|
"userMessage": "User-friendly message in {{KEY:USER_LANGUAGE}}"
|
|
}
|
|
]
|
|
}
|
|
```
|
|
|
|
## ✅ Correct Example
|
|
|
|
```json
|
|
{
|
|
"actions": [
|
|
{
|
|
"action": "document.extract",
|
|
"parameters": {"documentList": ["docList:msg_123:results"]},
|
|
"resultLabel": "round1_task1_action1_extract_results",
|
|
"description": "Extract data from documents",
|
|
"userMessage": "Extracting data from documents"
|
|
}
|
|
]
|
|
}
|
|
```
|
|
|
|
|
|
## 🎯 Action Planning Guidelines
|
|
|
|
### Method Selection
|
|
- **Choose appropriate method** based on task requirements
|
|
- **Consider available resources** (documents, connections)
|
|
- **Match method capabilities** to task objectives
|
|
|
|
### Parameter Design
|
|
- **Use ACTION SIGNATURE** to understand required parameters
|
|
- **Convert objective** into appropriate parameter values
|
|
- **Include all required parameters** for the action
|
|
|
|
### Result Labeling
|
|
- **Use descriptive labels** that explain what the action produces
|
|
- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
|
|
- **Make labels meaningful** for future reference
|
|
|
|
### User Messages
|
|
- **Write in user language** ({{KEY:USER_LANGUAGE}})
|
|
- **Explain what's happening** in user-friendly terms
|
|
- **Keep messages concise** but informative
|
|
|
|
## 🚀 Response Format
|
|
Return ONLY the JSON object."""
|
|
|
|
|
|
def createActionSelectionPromptTemplate() -> str:
|
|
"""Create action selection prompt template with placeholders."""
|
|
return """# Action Selection
|
|
|
|
Select exactly one action to advance the task.
|
|
|
|
## 📋 Context
|
|
|
|
### Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Available Documents
|
|
{{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
### User Language
|
|
{{KEY:USER_LANGUAGE}}
|
|
|
|
### Available Methods
|
|
{{KEY:AVAILABLE_METHODS}}
|
|
|
|
## ⚠️ CRITICAL RULES
|
|
|
|
### Selection Requirements
|
|
- **Return ONLY the compound action name**
|
|
- **Do NOT include parameters or prompts**
|
|
- **Use EXACT compound action names** from AVAILABLE_METHODS above
|
|
- **DO NOT create** new action names
|
|
|
|
### Action Format
|
|
- **Compound action names**: Use exact names from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
|
|
- **Single field format**: Use the full compound action name as a single string
|
|
|
|
## 📝 Required JSON Format
|
|
|
|
```json
|
|
{"action":"method.action_name"}
|
|
```
|
|
|
|
## ✅ Correct Examples
|
|
|
|
```json
|
|
{"action":"ai.process"}
|
|
{"action":"document.extract"}
|
|
{"action":"web.search"}
|
|
```
|
|
|
|
|
|
## 🎯 Selection Guidelines
|
|
|
|
### Choose Appropriate Action
|
|
- **Match action to objective** - select the most relevant action
|
|
- **Consider available resources** - ensure required documents/connections are available
|
|
- **Think about the next step** - what action will advance the task
|
|
|
|
### Method Selection
|
|
- **AI methods**: For processing, analysis, or generation tasks
|
|
- **Document methods**: For document operations (extract, generate, etc.)
|
|
- **Web methods**: For web searches or external data retrieval
|
|
- **Other methods**: Based on specific requirements
|
|
|
|
## 🚀 Response Format
|
|
Return ONLY the JSON object."""
|
|
|
|
|
|
def createActionParameterPromptTemplate() -> str:
|
|
"""Create action parameter prompt template with placeholders."""
|
|
return """# Action Parameter Generation
|
|
|
|
You are an AI assistant tasked with generating parameters for a selected action.
|
|
|
|
## 🎯 Your Goal
|
|
Provide the EXACT parameters required by the ACTION SIGNATURE, using information from the OBJECTIVE, AVAILABLE DOCUMENTS, and AVAILABLE CONNECTIONS.
|
|
|
|
## ⚠️ CRITICAL RULES
|
|
- **MUST respond with a JSON object**
|
|
- **All parameters MUST be wrapped in a "parameters" object**
|
|
- **ONLY include parameters listed in the ACTION SIGNATURE**
|
|
- **Do NOT use code blocks or markdown in your response**
|
|
- **Return ONLY the JSON object**
|
|
|
|
## 📋 Document & Connection References
|
|
- **Document references**: Copy the EXACT reference string from AVAILABLE DOCUMENTS (e.g., `docList:msg_UUID:label`)
|
|
- **Connection references**: Copy the EXACT reference string from AVAILABLE CONNECTIONS (e.g., `connection:msft:user@domain.com:uuid [status:active, token:valid]`)
|
|
- **Do NOT invent, shorten, or modify any references**
|
|
- **If unsure**: Use "UNCLEAR_REFERENCE" or "UNCLEAR_OBJECTIVE" and explain in a comment
|
|
|
|
## 📝 Input Context
|
|
|
|
### Selected Action
|
|
{{KEY:SELECTED_ACTION}}
|
|
|
|
### Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Available Documents
|
|
{{KEY:AVAILABLE_DOCUMENTS}}
|
|
|
|
### Available Connections
|
|
{{KEY:AVAILABLE_CONNECTIONS}}
|
|
|
|
### User Language
|
|
{{KEY:USER_LANGUAGE}}
|
|
|
|
### Action Requirements
|
|
{{KEY:ACTION_SIGNATURE}}
|
|
|
|
## 📚 Reference Types
|
|
|
|
### Document References
|
|
- **docItem**: Reference to a single document (e.g., "docItem:uuid:filename.pdf")
|
|
- **docList**: Reference to a group of documents (e.g., "docList:msg_123:AnalysisResults")
|
|
- **Use EXACT reference strings** shown in AVAILABLE_DOCUMENTS
|
|
|
|
### Connection References
|
|
- **Use exact connection references** from AVAILABLE CONNECTIONS
|
|
- **Examples**: "connection:msft:user@domain.com:uuid [status:active, token:valid]", "connection:sp:user@domain.com:uuid [status:active, token:valid]"
|
|
|
|
## 💡 Basic Examples
|
|
|
|
```json
|
|
{"parameters":{"aiPrompt": "Summarize the document"}}
|
|
{"parameters":{"documentList": ["docList:msg_UUID:label"]}}
|
|
{"parameters":{"connectionReference": "connection:msft:user@domain.com:uuid [status:active, token:valid]"}}
|
|
```
|
|
|
|
## ❌ Wrong Format (DO NOT USE)
|
|
|
|
```json
|
|
{"aiPrompt": "Your prompt here"}
|
|
```
|
|
|
|
```json
|
|
{"parameters":{"aiPrompt": "Your prompt here"}}
|
|
```
|
|
|
|
## 🎯 Parameter Guidelines
|
|
|
|
### Required Parameters
|
|
- **Use ACTION SIGNATURE** to understand what parameters are required
|
|
- **Convert objective** into appropriate parameter values
|
|
- **Include user language** if relevant
|
|
- **Avoid unnecessary fields** - host applies defaults
|
|
|
|
### Document Reference Rules
|
|
- **ONLY use exact document reference strings** from AVAILABLE_DOCUMENTS
|
|
- **DO NOT add file paths** or individual filenames to document references
|
|
- **For documentList parameters**: Use the EXACT reference strings shown in AVAILABLE_DOCUMENTS
|
|
|
|
### Connection Reference Rules
|
|
- **ONLY use exact connection references** from AVAILABLE CONNECTIONS
|
|
- **For connectionReference parameters**: Use the exact connection reference from AVAILABLE CONNECTIONS
|
|
|
|
## 🚀 Response Format
|
|
Return your JSON response immediately after this prompt."""
|
|
|
|
|
|
def createRefinementPromptTemplate() -> str:
|
|
"""Create refinement prompt template with placeholders."""
|
|
return """# Workflow Refinement Decision
|
|
|
|
Decide the next step based on the observation.
|
|
|
|
## 📋 Context
|
|
|
|
### Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Observation
|
|
{{KEY:REVIEW_CONTENT}}
|
|
|
|
## ⚠️ CRITICAL RULES
|
|
|
|
### Data Requirements
|
|
- **If user wants DATA** (numbers, lists, calculations): Ensure AI delivers the actual data, not code
|
|
- **If user wants DOCUMENTS** (Word, PDF, Excel): Ensure appropriate method is used to create the document
|
|
- **If user wants ANALYSIS**: Ensure AI analyzes and delivers insights
|
|
- **NEVER accept code when user wants data** - demand the actual data
|
|
- **NEVER accept algorithms when user wants results** - demand the actual results
|
|
|
|
## 🤔 Decision Rules
|
|
|
|
### Continue Conditions
|
|
- The objective is **NOT fulfilled** (user didn't get what they asked for)
|
|
- More data or processing is needed
|
|
- The current result is incomplete
|
|
|
|
### Stop Conditions
|
|
- The objective is **fulfilled** (user got what they asked for)
|
|
- All required data has been delivered
|
|
- The task is complete
|
|
|
|
### Focus
|
|
- Focus on what the user actually wants, not what was delivered
|
|
- Consider the user's original request carefully
|
|
|
|
## 📝 Response Format
|
|
|
|
```json
|
|
{"decision":"continue","reason":"Need more data"}
|
|
```
|
|
|
|
### Decision Options
|
|
- `"continue"` - Keep working on the objective
|
|
- `"stop"` - Objective has been fulfilled
|
|
|
|
### Reason Examples
|
|
- `"Need more data"`
|
|
- `"Objective fulfilled"`
|
|
- `"User got the requested document"`
|
|
- `"Analysis complete"`
|
|
|
|
## 🎯 Decision Guidelines
|
|
|
|
### When to Continue
|
|
- **Incomplete results** - User didn't get what they asked for
|
|
- **Missing data** - Need to gather more information
|
|
- **Partial success** - Some but not all requirements met
|
|
- **Technical issues** - Action failed and needs retry
|
|
|
|
### When to Stop
|
|
- **Complete success** - User got exactly what they asked for
|
|
- **All criteria met** - Success criteria have been achieved
|
|
- **Document created** - Required document has been generated
|
|
- **Data delivered** - All requested data has been provided
|
|
|
|
### Quality Assessment
|
|
- **Check completeness** - Is the result complete?
|
|
- **Verify accuracy** - Is the data correct?
|
|
- **Assess usefulness** - Does it meet the user's needs?
|
|
- **Consider format** - Is it in the requested format?
|
|
|
|
## 🚀 Response Format
|
|
Return your JSON response immediately after this prompt."""
|
|
|
|
|
|
def createResultReviewPromptTemplate() -> str:
|
|
"""Create result review prompt template with placeholders."""
|
|
return """# Result Review & Validation
|
|
|
|
Review task execution outcomes and determine success, retry needs, or failure.
|
|
|
|
## 📋 Context
|
|
|
|
### Task Objective
|
|
{{KEY:USER_PROMPT}}
|
|
|
|
### Execution Results
|
|
{{KEY:REVIEW_CONTENT}}
|
|
|
|
## 🔍 Validation Criteria
|
|
|
|
### Action Assessment
|
|
- **Review each action's success/failure status**
|
|
- **Check if required documents were produced**
|
|
- **Validate document quality and completeness**
|
|
- **Assess if success criteria were met**
|
|
- **Identify any missing or incomplete outputs**
|
|
|
|
### Decision Making
|
|
- **Determine if retry would help** or if task should be marked as failed
|
|
- **Consider business value** and user satisfaction
|
|
- **Evaluate technical execution** and results quality
|
|
|
|
## 📊 Required JSON Structure
|
|
|
|
```json
|
|
{
|
|
"status": "success|retry|failed",
|
|
"reason": "Detailed explanation of the validation decision",
|
|
"improvements": ["specific improvement 1", "specific improvement 2"],
|
|
"quality_score": 8,
|
|
"met_criteria": ["criteria1", "criteria2"],
|
|
"unmet_criteria": ["criteria3", "criteria4"],
|
|
"confidence": 0.85,
|
|
"userMessage": "User-friendly message explaining the validation result"
|
|
}
|
|
```
|
|
|
|
## 🎯 Validation Principles
|
|
|
|
### Assessment Approach
|
|
- **Be thorough but fair** in assessment
|
|
- **Focus on business value** and outcomes
|
|
- **Consider both technical execution** and business results
|
|
- **Provide specific, actionable** improvement suggestions
|
|
|
|
### Quality Scoring
|
|
- **Use quality scores** to track progress across retries
|
|
- **Scale 1-10**: 1 = Poor, 5 = Average, 10 = Excellent
|
|
- **Consider completeness, accuracy, and usefulness**
|
|
|
|
### Criteria Evaluation
|
|
- **Clearly identify** which success criteria were met vs. unmet
|
|
- **List specific criteria** that were achieved
|
|
- **Note missing requirements** that need attention
|
|
|
|
### Confidence Levels
|
|
- **Set appropriate confidence levels** based on evidence quality
|
|
- **Scale 0.0-1.0**: 0.0 = No confidence, 1.0 = Complete confidence
|
|
- **Consider data quality** and result reliability
|
|
|
|
## 📝 Status Definitions
|
|
|
|
### Success
|
|
- **All objectives met** - User got what they asked for
|
|
- **Quality standards met** - Results are complete and accurate
|
|
- **No retry needed** - Task is fully complete
|
|
|
|
### Retry
|
|
- **Partial success** - Some but not all objectives met
|
|
- **Improvement possible** - Retry could lead to better results
|
|
- **Technical issues** - Action failures that can be resolved
|
|
|
|
### Failed
|
|
- **No progress made** - Objectives not achieved
|
|
- **Technical limitations** - Cannot be resolved with retry
|
|
- **Resource constraints** - Missing required inputs
|
|
|
|
## 💡 Improvement Suggestions
|
|
|
|
### Actionable Improvements
|
|
- **Be specific** - Don't just say "improve quality"
|
|
- **Focus on process** - How to do better next time
|
|
- **Consider resources** - What additional inputs might help
|
|
- **Technical fixes** - Address specific technical issues
|
|
|
|
### Examples
|
|
- "Use more specific document references from AVAILABLE_DOCUMENTS"
|
|
- "Include user language parameter for better localization"
|
|
- "Break down complex objective into smaller, focused actions"
|
|
- "Verify document references before processing"
|
|
|
|
## 🚀 Response Format
|
|
Return ONLY the JSON object. Do not include any explanatory text."""
|
|
|
|
|
|
# Helper functions to extract content for placeholders
|
|
|
|
def extractUserPrompt(context) -> str:
|
|
"""Extract user prompt from context."""
|
|
if hasattr(context, 'task_step') and context.task_step:
|
|
return context.task_step.objective or 'No request specified'
|
|
return 'No request specified'
|
|
|
|
|
|
def extractAvailableDocuments(context) -> str:
|
|
"""Extract available documents from context."""
|
|
if hasattr(context, 'available_documents') and context.available_documents:
|
|
return context.available_documents
|
|
return "No documents available"
|
|
|
|
|
|
def extractWorkflowHistory(service, context) -> str:
|
|
"""Extract workflow history from context."""
|
|
if hasattr(context, 'workflow') and context.workflow:
|
|
return getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
|
|
return "No previous workflow rounds - this is the first round."
|
|
|
|
|
|
def extractAvailableMethods(service) -> str:
|
|
"""Extract available methods for action planning using compound action names."""
|
|
try:
|
|
# Get the methods dictionary directly from the global methods variable
|
|
if not methods:
|
|
discoverMethods(service)
|
|
|
|
# Create a flat JSON format with compound action names for better AI parsing
|
|
available_actions_json = {}
|
|
for methodName, methodInfo in methods.items():
|
|
# Convert MethodAi -> ai, MethodDocument -> document, etc.
|
|
shortName = methodName.replace('Method', '').lower()
|
|
|
|
for actionName, actionInfo in methodInfo['actions'].items():
|
|
# Create compound action name: method.action
|
|
compoundActionName = f"{shortName}.{actionName}"
|
|
# Get the action description
|
|
action_description = actionInfo.get('description', f"Execute {actionName} action")
|
|
available_actions_json[compoundActionName] = action_description
|
|
|
|
return json.dumps(available_actions_json, indent=2, ensure_ascii=False)
|
|
except Exception as e:
|
|
logger.error(f"Error extracting available methods: {str(e)}")
|
|
return json.dumps({}, indent=2, ensure_ascii=False)
|
|
|
|
|
|
def extractUserLanguage(service) -> str:
|
|
"""Extract user language from service."""
|
|
return service.user.language if service and service.user else 'en'
|
|
|
|
|
|
def extractReviewContent(context) -> str:
|
|
"""Extract review content from context with full document metadata."""
|
|
if hasattr(context, 'action_results') and context.action_results:
|
|
# Build result summary
|
|
result_summary = ""
|
|
for i, result in enumerate(context.action_results):
|
|
result_summary += f"\nRESULT {i+1}:\n"
|
|
result_summary += f" Success: {result.success}\n"
|
|
if result.error:
|
|
result_summary += f" Error: {result.error}\n"
|
|
|
|
if result.documents:
|
|
result_summary += f" Documents: {len(result.documents)} document(s)\n"
|
|
for doc in result.documents:
|
|
# Extract all available metadata without content
|
|
doc_metadata = {
|
|
"name": getattr(doc, 'documentName', 'Unknown'),
|
|
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
|
|
"size": getattr(doc, 'size', 'Unknown'),
|
|
"created": getattr(doc, 'created', 'Unknown'),
|
|
"modified": getattr(doc, 'modified', 'Unknown'),
|
|
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
|
|
"documentId": getattr(doc, 'documentId', 'Unknown'),
|
|
"reference": getattr(doc, 'reference', 'Unknown')
|
|
}
|
|
# Remove 'Unknown' values to keep it clean
|
|
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
|
|
result_summary += f" - {json.dumps(doc_metadata, indent=6, ensure_ascii=False)}\n"
|
|
else:
|
|
result_summary += f" Documents: None\n"
|
|
|
|
return result_summary
|
|
elif hasattr(context, 'observation') and context.observation:
|
|
# For observation data, show full content but handle documents specially
|
|
if isinstance(context.observation, dict):
|
|
# Create a copy to modify
|
|
obs_copy = context.observation.copy()
|
|
|
|
# If there are previews with documents, show only metadata
|
|
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
|
|
for preview in obs_copy['previews']:
|
|
if isinstance(preview, dict) and 'snippet' in preview:
|
|
# Replace snippet with metadata indicator
|
|
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
|
|
|
|
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
|
|
else:
|
|
return json.dumps(context.observation, ensure_ascii=False)
|
|
elif hasattr(context, 'step_result') and context.step_result and 'observation' in context.step_result:
|
|
# For observation data in step_result, show full content but handle documents specially
|
|
observation = context.step_result['observation']
|
|
if isinstance(observation, dict):
|
|
# Create a copy to modify
|
|
obs_copy = observation.copy()
|
|
|
|
# If there are previews with documents, show only metadata
|
|
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
|
|
for preview in obs_copy['previews']:
|
|
if isinstance(preview, dict) and 'snippet' in preview:
|
|
# Replace snippet with metadata indicator
|
|
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
|
|
|
|
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
|
|
else:
|
|
return json.dumps(observation, ensure_ascii=False)
|
|
else:
|
|
return "No review content available"
|