1098 lines
No EOL
55 KiB
Python
1098 lines
No EOL
55 KiB
Python
# promptFactory.py
|
||
# Contains all prompt creation functions
|
||
|
||
import json
|
||
import logging
|
||
import importlib
|
||
import pkgutil
|
||
import inspect
|
||
from typing import Any, Dict, List
|
||
from modules.datamodels.datamodelWorkflow import TaskContext, ReviewContext, DocumentExchange
|
||
from modules.datamodels.datamodelChat import ChatDocument
|
||
from modules.services.serviceDocument.subDocumentUtility import getFileExtension
|
||
from modules.workflows.methods.methodBase import MethodBase
|
||
|
||
# Set up logger
|
||
logger = logging.getLogger(__name__)
|
||
|
||
# Global methods catalog - moved from serviceCenter
|
||
methods = {}
|
||
|
||
def _discoverMethods(serviceCenter):
|
||
"""Dynamically discover all method classes and their actions in modules methods package"""
|
||
try:
|
||
# Import the methods package
|
||
methodsPackage = importlib.import_module('modules.workflows.methods')
|
||
|
||
# Discover all modules in the package
|
||
for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
|
||
if not isPkg and name.startswith('method'):
|
||
try:
|
||
# Import the module
|
||
module = importlib.import_module(f'modules.workflows.methods.{name}')
|
||
|
||
# Find all classes in the module that inherit from MethodBase
|
||
for itemName, item in inspect.getmembers(module):
|
||
if (inspect.isclass(item) and
|
||
issubclass(item, MethodBase) and
|
||
item != MethodBase):
|
||
# Instantiate the method
|
||
methodInstance = item(serviceCenter)
|
||
|
||
# Discover actions from public methods
|
||
actions = {}
|
||
for methodName, method in inspect.getmembers(type(methodInstance), predicate=inspect.iscoroutinefunction):
|
||
if not methodName.startswith('_'):
|
||
# Bind the method to the instance
|
||
bound_method = method.__get__(methodInstance, type(methodInstance))
|
||
sig = inspect.signature(method)
|
||
params = {}
|
||
for paramName, param in sig.parameters.items():
|
||
if paramName not in ['self']:
|
||
# Get parameter type
|
||
paramType = param.annotation if param.annotation != param.empty else Any
|
||
|
||
# Get parameter description from docstring or default
|
||
paramDesc = None
|
||
if param.default != param.empty and hasattr(param.default, '__doc__'):
|
||
paramDesc = param.default.__doc__
|
||
|
||
params[paramName] = {
|
||
'type': paramType,
|
||
'required': param.default == param.empty,
|
||
'description': paramDesc,
|
||
'default': param.default if param.default != param.empty else None
|
||
}
|
||
|
||
actions[methodName] = {
|
||
'description': method.__doc__ or '',
|
||
'parameters': params,
|
||
'method': bound_method
|
||
}
|
||
|
||
# Add method instance with discovered actions
|
||
methods[methodInstance.name] = {
|
||
'instance': methodInstance,
|
||
'description': methodInstance.description,
|
||
'actions': actions
|
||
}
|
||
logger.info(f"Discovered method: {methodInstance.name} with {len(actions)} actions")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error loading method module {name}: {str(e)}", exc_info=True)
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error discovering methods: {str(e)}")
|
||
|
||
def getMethodsList(serviceCenter) -> List[str]:
|
||
"""Get list of available methods with their signatures in the required format"""
|
||
# Initialize methods if not already done
|
||
if not methods:
|
||
_discoverMethods(serviceCenter)
|
||
|
||
methodList = []
|
||
for methodName, method in methods.items():
|
||
methodInstance = method['instance']
|
||
for actionName, action in method['actions'].items():
|
||
# Use the new signature format from MethodBase
|
||
signature = methodInstance.getActionSignature(actionName)
|
||
if signature:
|
||
methodList.append(signature)
|
||
return methodList
|
||
|
||
def getEnhancedDocumentContext(serviceCenter) -> str:
|
||
"""Get enhanced document context formatted for action planning prompts with proper docList and docItem references"""
|
||
try:
|
||
document_list = serviceCenter.getDocumentReferenceList()
|
||
|
||
# Build technical context string for AI action planning
|
||
context = "AVAILABLE DOCUMENTS:\n\n"
|
||
|
||
# Process chat exchanges (current round)
|
||
if document_list["chat"]:
|
||
context += "CURRENT ROUND DOCUMENTS:\n"
|
||
for exchange in document_list["chat"]:
|
||
# Generate docList reference for the exchange (using message ID and label)
|
||
# Find the message that corresponds to this exchange
|
||
message_id = None
|
||
for message in serviceCenter.workflow.messages:
|
||
if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
|
||
message_id = message.id
|
||
break
|
||
|
||
if message_id:
|
||
doc_list_ref = f"docList:{message_id}:{exchange.documentsLabel}"
|
||
else:
|
||
# Fallback to label-only format if message ID not found
|
||
doc_list_ref = f"docList:{exchange.documentsLabel}"
|
||
|
||
logger.debug(f"Using document label for action planning: {exchange.documentsLabel} (message_id: {message_id})")
|
||
context += f"- {doc_list_ref} contains:\n"
|
||
# Generate docItem references for each document in the list
|
||
for doc_ref in exchange.documents:
|
||
if doc_ref.startswith("docItem:"):
|
||
context += f" - {doc_ref}\n"
|
||
else:
|
||
# Convert to proper docItem format if needed
|
||
context += f" - docItem:{doc_ref}\n"
|
||
context += "\n"
|
||
|
||
# Process history exchanges (previous rounds)
|
||
if document_list["history"]:
|
||
context += "WORKFLOW HISTORY DOCUMENTS:\n"
|
||
for exchange in document_list["history"]:
|
||
# Generate docList reference for the exchange (using message ID and label)
|
||
# Find the message that corresponds to this exchange
|
||
message_id = None
|
||
for message in serviceCenter.workflow.messages:
|
||
if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
|
||
message_id = message.id
|
||
break
|
||
|
||
if message_id:
|
||
doc_list_ref = f"docList:{message_id}:{exchange.documentsLabel}"
|
||
else:
|
||
# Fallback to label-only format if message ID not found
|
||
doc_list_ref = f"docList:{exchange.documentsLabel}"
|
||
|
||
logger.debug(f"Using history document label for action planning: {exchange.documentsLabel} (message_id: {message_id})")
|
||
context += f"- {doc_list_ref} contains:\n"
|
||
# Generate docItem references for each document in the list
|
||
for doc_ref in exchange.documents:
|
||
if doc_ref.startswith("docItem:"):
|
||
context += f" - {doc_ref}\n"
|
||
else:
|
||
# Convert to proper docItem format if needed
|
||
context += f" - docItem:{doc_ref}\n"
|
||
context += "\n"
|
||
|
||
if not document_list["chat"] and not document_list["history"]:
|
||
context += "NO DOCUMENTS AVAILABLE - This workflow has no documents to process.\n"
|
||
|
||
return context
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error generating enhanced document context: {str(e)}")
|
||
return "NO DOCUMENTS AVAILABLE - Error generating document context."
|
||
|
||
# Prompt creation helpers
|
||
|
||
def _getAvailableDocuments(workflow) -> str:
|
||
"""
|
||
Get simple description of available documents for task planning.
|
||
|
||
Args:
|
||
workflow: ChatWorkflow object
|
||
|
||
Returns:
|
||
str: Simple description of document availability
|
||
"""
|
||
total_documents = 0
|
||
document_types = set()
|
||
|
||
for message in workflow.messages:
|
||
if message.documents:
|
||
total_documents += len(message.documents)
|
||
for doc in message.documents:
|
||
try:
|
||
file_extension = getFileExtension(doc.fileName)
|
||
if file_extension:
|
||
document_types.add(file_extension.upper())
|
||
except:
|
||
pass
|
||
|
||
if total_documents == 0:
|
||
return "No documents available"
|
||
elif len(document_types) == 0:
|
||
return f"{total_documents} document(s) available"
|
||
else:
|
||
types_str = ", ".join(sorted(document_types))
|
||
return f"{total_documents} document(s) available ({types_str} files)"
|
||
|
||
def _getConnectionReferenceList(service) -> List[str]:
|
||
"""Get list of all UserConnection objects as references with enhanced state information"""
|
||
connections = []
|
||
# Get user connections
|
||
user_connections = service.interfaceDbApp.getUserConnections(service.user.id)
|
||
|
||
refreshed_count = 0
|
||
for conn in user_connections:
|
||
# Get enhanced connection reference with state information
|
||
enhanced_ref = service.getConnectionReferenceFromUserConnection(conn)
|
||
connections.append(enhanced_ref)
|
||
|
||
# Count refreshed tokens
|
||
if "refreshed" in enhanced_ref:
|
||
refreshed_count += 1
|
||
|
||
# Sort by connection reference
|
||
if refreshed_count > 0:
|
||
logger.info(f"Refreshed {refreshed_count} connection tokens while building action planning prompt")
|
||
return sorted(connections)
|
||
|
||
def _getPreviousRoundContext(service, workflow) -> str:
|
||
"""Get context from previous workflow rounds to help understand follow-up prompts"""
|
||
try:
|
||
if not workflow or not hasattr(workflow, 'messages') or not workflow.messages:
|
||
return ""
|
||
|
||
# Get current round number
|
||
current_round = getattr(workflow, 'currentRound', 0)
|
||
|
||
# If this is round 0 or 1, there's no previous context
|
||
if current_round <= 1:
|
||
return ""
|
||
|
||
# Find messages from previous rounds (rounds before current)
|
||
previous_messages = []
|
||
for message in workflow.messages:
|
||
message_round = getattr(message, 'roundNumber', 0)
|
||
if message_round > 0 and message_round < current_round:
|
||
previous_messages.append(message)
|
||
|
||
if not previous_messages:
|
||
return ""
|
||
|
||
# Sort by round number and sequence to get chronological order
|
||
previous_messages.sort(key=lambda msg: (getattr(msg, 'roundNumber', 0), getattr(msg, 'sequenceNr', 0)))
|
||
|
||
# Build context summary
|
||
context_parts = []
|
||
current_round_context = {}
|
||
|
||
for message in previous_messages:
|
||
round_num = getattr(message, 'roundNumber', 0)
|
||
if round_num not in current_round_context:
|
||
current_round_context[round_num] = {
|
||
'user_inputs': [],
|
||
'assistant_responses': [],
|
||
'task_outcomes': [],
|
||
'documents_processed': []
|
||
}
|
||
|
||
# Categorize messages
|
||
if message.role == 'user':
|
||
current_round_context[round_num]['user_inputs'].append(message.message)
|
||
elif message.role == 'assistant':
|
||
# Check if it's a task completion or error message
|
||
if 'task' in message.message.lower() and ('completed' in message.message.lower() or 'failed' in message.message.lower() or 'error' in message.message.lower()):
|
||
current_round_context[round_num]['task_outcomes'].append(message.message)
|
||
else:
|
||
current_round_context[round_num]['assistant_responses'].append(message.message)
|
||
|
||
# Check for document processing
|
||
if hasattr(message, 'documents') and message.documents:
|
||
doc_names = [doc.fileName for doc in message.documents if hasattr(doc, 'fileName')]
|
||
if doc_names:
|
||
current_round_context[round_num]['documents_processed'].extend(doc_names)
|
||
|
||
# Build context summary
|
||
for round_num in sorted(current_round_context.keys()):
|
||
round_data = current_round_context[round_num]
|
||
context_parts.append(f"ROUND {round_num} CONTEXT:")
|
||
|
||
if round_data['user_inputs']:
|
||
context_parts.append(f" User requests: {'; '.join(round_data['user_inputs'])}")
|
||
|
||
if round_data['task_outcomes']:
|
||
context_parts.append(f" Task outcomes: {'; '.join(round_data['task_outcomes'])}")
|
||
|
||
if round_data['documents_processed']:
|
||
context_parts.append(f" Documents processed: {', '.join(set(round_data['documents_processed']))}")
|
||
|
||
if context_parts:
|
||
return "\n".join(context_parts)
|
||
else:
|
||
return ""
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error getting previous round context: {str(e)}")
|
||
return ""
|
||
|
||
def createTaskPlanningPrompt(context: TaskContext, service) -> str:
|
||
"""Create enhanced prompt for task planning with user-friendly message generation and language detection"""
|
||
# Get user language directly from service.user.language
|
||
user_language = service.user.language if service and service.user else 'en'
|
||
|
||
# Extract user request from context - use Pydantic model directly
|
||
user_request = context.task_step.objective if context.task_step else 'No request specified'
|
||
|
||
# Get available documents using generic function
|
||
available_documents = _getAvailableDocuments(context.workflow) if context.workflow else "No documents available"
|
||
|
||
# Get previous workflow round context for better understanding of follow-up prompts
|
||
previous_round_context = _getPreviousRoundContext(service, context.workflow)
|
||
|
||
return f"""You are a task planning AI that analyzes user requests and creates structured, self-contained task plans with user-friendly feedback messages.
|
||
|
||
USER REQUEST: {user_request}
|
||
|
||
AVAILABLE DOCUMENTS: {available_documents}
|
||
|
||
PREVIOUS WORKFLOW ROUNDS CONTEXT:
|
||
{previous_round_context if previous_round_context else "No previous workflow rounds - this is the first round."}
|
||
|
||
INSTRUCTIONS:
|
||
1. Analyze the user request, available documents, and previous workflow rounds context
|
||
2. If the user request appears to be a follow-up (like "try again", "versuche es nochmals", "retry", etc.),
|
||
use the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what the user wants to retry or continue
|
||
3. Group related topics and sequential steps into single, comprehensive tasks
|
||
4. Focus on business outcomes, not technical operations
|
||
5. Make each task self-contained: clearly state what to do and what outputs are expected
|
||
6. Ensure proper handover between tasks (later actions will use your task outputs)
|
||
7. Detect the language of the user request and include it in languageUserDetected
|
||
8. Generate user-friendly messages for each task in the user's request language
|
||
9. Return a JSON object with the exact structure shown below
|
||
|
||
TASK GROUPING PRINCIPLES:
|
||
- COMBINE RELATED TOPICS: Group related subjects, sequential steps, or workflow-structured activities into single tasks
|
||
- SEQUENTIAL WORKFLOWS: If the user says "first do this, then that, then that" → create ONE task that handles the entire sequence
|
||
- SIMILAR CONTENT: If multiple items deal with the same subject matter → combine into ONE comprehensive task
|
||
- ONLY SPLIT WHEN DIFFERENT: Create separate tasks ONLY when the user explicitly wants different, independent things
|
||
|
||
EXAMPLES OF GOOD TASK GROUPING:
|
||
|
||
COMBINE INTO ONE TASK:
|
||
- "Analyze the documents, extract key insights, and create a summary report" → ONE task: "Analyze documents and create comprehensive summary report"
|
||
- "First check my emails, then respond to urgent ones, then organize my inbox" → ONE task: "Process and organize email inbox with priority responses"
|
||
- "Review the budget, analyze spending patterns, and suggest cost-cutting measures" → ONE task: "Comprehensive budget analysis with optimization recommendations"
|
||
- "Create a business strategy, develop marketing plan, and prepare presentation" → ONE task: "Develop complete business strategy with marketing plan and presentation"
|
||
|
||
SPLIT INTO MULTIPLE TASKS:
|
||
- "Create a business strategy for Q4" AND "Check my emails for messages from my assistant" → TWO separate tasks (different subjects)
|
||
- "Analyze customer feedback" AND "Prepare quarterly financial report" → TWO separate tasks (different business areas)
|
||
- "Review project timeline" AND "Update employee handbook" → TWO separate tasks (unrelated activities)
|
||
|
||
TASK PLANNING PRINCIPLES:
|
||
- Break down complex requests into logical, sequential steps
|
||
- Focus on business value and outcomes
|
||
- Keep tasks at a meaningful level of abstraction (not implementation details)
|
||
- Each task should produce results that can be used by subsequent tasks
|
||
- Ensure clear dependencies and handovers between tasks
|
||
- Provide clear, actionable user messages in the user's request language
|
||
- Group related activities to minimize task fragmentation
|
||
- Only create multiple tasks when dealing with truly different, independent objectives
|
||
- Make task objectives action-oriented and specific (include scope, data sources to consider, and output intent at high level)
|
||
- Write success_criteria as measurable acceptance criteria focusing on outputs (what artifacts or insights will exist and how they are validated)
|
||
|
||
FOLLOW-UP PROMPT HANDLING:
|
||
- If the user request is a follow-up (e.g., "try again", "versuche es nochmals", "retry", "continue", "proceed"),
|
||
analyze the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what failed or was incomplete
|
||
- Use the previous round's user requests and task outcomes to determine what the user wants to retry
|
||
- If previous rounds failed due to missing documents, and documents are now available,
|
||
create tasks that use the newly available documents to accomplish the original request
|
||
- Maintain the same business objective from previous rounds but adapt to current available resources
|
||
|
||
SPECIFIC SCENARIO HANDLING:
|
||
- If previous round failed with "documents missing" error and current round has documents available,
|
||
the user likely wants to retry the same operation with the newly provided documents
|
||
- Example: Previous round "speichere mir die 3 dokumente im sharepoint unter xxx" failed due to missing documents,
|
||
current round "versuche es nochmals" with documents should retry the SharePoint save operation
|
||
- Always check if the current request is a retry by looking for retry keywords and previous round context
|
||
|
||
REQUIRED JSON STRUCTURE:
|
||
{{
|
||
"overview": "Brief description of the overall plan",
|
||
"languageUserDetected": "en", // Language code detected from user request (en, de, fr, it, es, etc.)
|
||
"userMessage": "User-friendly message explaining the task plan in user's request language",
|
||
"tasks": [
|
||
{{
|
||
"id": "task_1",
|
||
"objective": "Clear business objective this task accomplishes (combining related activities)",
|
||
"dependencies": ["task_0"], // IDs of tasks that must complete first
|
||
"success_criteria": ["criteria1", "criteria2"],
|
||
"estimated_complexity": "low|medium|high",
|
||
"userMessage": "User-friendly message explaining what this task will accomplish in user's request language"
|
||
}}
|
||
]
|
||
}}
|
||
|
||
EXAMPLES OF GOOD TASK OBJECTIVES (COMBINING RELATED ACTIVITIES):
|
||
- "Analyze documents and extract key insights for business communication"
|
||
- "Create professional business communication incorporating analyzed information"
|
||
- "Execute business communication using specified channels and document outcomes"
|
||
- "Develop comprehensive business strategy with implementation roadmap and success metrics"
|
||
|
||
EXAMPLES OF WELL-FORMED SUCCESS CRITERIA (OUTPUT-FOCUSED):
|
||
- "Deliver a prioritized list of 10–20 candidates with justification"
|
||
- "Provide a structured JSON with fields: company, ticker, rationale, metrics"
|
||
- "Produce a presentation outline with 5 sections and bullet points per section"
|
||
- "Include data sources and date stamped references for traceability"
|
||
|
||
EXAMPLES OF GOOD SUCCESS CRITERIA:
|
||
- "Key insights extracted and ready for business use"
|
||
- "Professional communication created with clear business value"
|
||
- "Business communication successfully delivered and documented"
|
||
- "All outcomes properly documented and accessible"
|
||
|
||
EXAMPLES OF BAD TASK OBJECTIVES:
|
||
- "Read the PDF file" (too granular - should be "Analyze document content")
|
||
- "Convert data to CSV" (implementation detail - should be "Structure data for analysis")
|
||
- "Send email" (too specific - should be "Deliver business communication")
|
||
|
||
LANGUAGE DETECTION:
|
||
- Analyze the user request text to identify the language
|
||
- Use standard language codes: en (English), de (German), fr (French), it (Italian), es (Spanish), etc.
|
||
- If the language cannot be determined, use "en" as default
|
||
- Include the detected language in the languageUserDetected field
|
||
|
||
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
|
||
|
||
async def createActionDefinitionPrompt(context: TaskContext, service) -> str:
|
||
"""Create enhanced prompt for action generation with user-friendly messages and enhanced document context"""
|
||
methodList = getMethodsList(service)
|
||
method_actions = {}
|
||
for sig in methodList:
|
||
if '.' in sig:
|
||
method, rest = sig.split('.', 1)
|
||
action = rest.split('(')[0]
|
||
method_actions.setdefault(method, []).append((action, sig))
|
||
|
||
messageSummary = await service.methodService.summarizeChat(context.workflow.messages) if context.workflow else ""
|
||
|
||
# Get enhanced document context using the new method
|
||
available_documents_str = getEnhancedDocumentContext(service)
|
||
|
||
# Get available documents and connections using generic functions
|
||
available_docs_summary = _getAvailableDocuments(context.workflow)
|
||
connRefs = _getConnectionReferenceList(service)
|
||
|
||
# Create a structured JSON format for better AI parsing
|
||
# This replaces the old hard-to-read format with a clean JSON structure
|
||
# that the AI can easily parse and understand
|
||
available_methods_json = {}
|
||
for method, actions in method_actions.items():
|
||
available_methods_json[method] = {}
|
||
# Get the method instance for accessing docstrings
|
||
method_instance = methods.get(method, {}).get('instance') if methods else None
|
||
|
||
for action, sig in actions:
|
||
# Parse the signature to extract parameters
|
||
if '(' in sig and ')' in sig:
|
||
# Extract parameters from signature
|
||
params_start = sig.find('(')
|
||
params_end = sig.find(')')
|
||
params_str = sig[params_start+1:params_end]
|
||
|
||
# Parse parameters directly from the docstring - much simpler and more reliable!
|
||
parameters = []
|
||
|
||
# Get the actual function's docstring
|
||
if method_instance and hasattr(method_instance, action):
|
||
func = getattr(method_instance, action)
|
||
if hasattr(func, '__doc__') and func.__doc__:
|
||
docstring = func.__doc__
|
||
|
||
# Parse Parameters section from docstring
|
||
lines = docstring.split('\n')
|
||
in_parameters = False
|
||
for i, line in enumerate(lines):
|
||
original_line = line
|
||
line = line.strip()
|
||
|
||
if line == 'Parameters:':
|
||
in_parameters = True
|
||
continue
|
||
elif in_parameters and line and not original_line.startswith(' ') and not original_line.startswith('\t'):
|
||
# End of parameters section
|
||
break
|
||
elif in_parameters and (original_line.startswith(' ') or original_line.startswith('\t')):
|
||
# This is a parameter line - already stripped
|
||
# Format: "paramName (type): description"
|
||
if ':' in line:
|
||
# Find the colon that separates param from description
|
||
colon_pos = line.find(':')
|
||
param_part = line[:colon_pos].strip()
|
||
description = line[colon_pos+1:].strip()
|
||
|
||
# Parse parameter name and type
|
||
if '(' in param_part and ')' in param_part:
|
||
param_name = param_part.split('(')[0].strip()
|
||
type_part = param_part[param_part.find('(')+1:param_part.find(')')].strip()
|
||
|
||
# Check if optional
|
||
is_optional = 'optional' in type_part
|
||
param_type = type_part.replace('optional', '').strip().rstrip(',').strip()
|
||
|
||
parameters.append({
|
||
"name": param_name,
|
||
"type": param_type,
|
||
"description": description,
|
||
"required": not is_optional
|
||
})
|
||
|
||
available_methods_json[method][action] = {
|
||
"signature": sig,
|
||
"parameters": parameters,
|
||
"description": f"{method}.{action} action"
|
||
}
|
||
|
||
# Convert to a compact, AI-friendly format
|
||
available_methods_str = f"""
|
||
AVAILABLE ACTIONS (JSON format for better AI parsing):
|
||
{json.dumps(available_methods_json, indent=1, separators=(',', ':'))}
|
||
"""
|
||
retry_context = ""
|
||
if context.retry_count and context.retry_count > 0:
|
||
retry_context = f"""
|
||
RETRY CONTEXT (Attempt {context.retry_count}):
|
||
Previous action results that failed or were incomplete:
|
||
"""
|
||
for i, result in enumerate(context.previous_action_results or []):
|
||
retry_context += f"- Action {i+1}: ActionResult\n"
|
||
retry_context += f" Status: {result.success and 'success' or 'failed'}\n"
|
||
retry_context += f" Error: {result.error or 'None'}\n"
|
||
# Check if result has documents and show document info
|
||
if result.documents:
|
||
doc_info = f"Documents: {len(result.documents)} document(s)"
|
||
if result.documents[0].documentName:
|
||
doc_info += f" - {result.documents[0].documentName}"
|
||
retry_context += f" {doc_info}\n"
|
||
else:
|
||
retry_context += f" Documents: None\n"
|
||
|
||
if context.previous_review_result:
|
||
retry_context += f"""
|
||
Previous review feedback:
|
||
- Status: {context.previous_review_result.status or 'unknown'}
|
||
- Reason: {context.previous_review_result.reason or 'No reason provided'}
|
||
- Quality Score: {context.previous_review_result.quality_score or 0}/10
|
||
- Unmet Criteria: {', '.join(context.previous_review_result.unmet_criteria or [])}
|
||
"""
|
||
|
||
# Use Pydantic model directly - no need for getattr
|
||
success_criteria_str = ', '.join(context.task_step.success_criteria) if context.task_step and context.task_step.success_criteria else 'No criteria specified'
|
||
previous_results_str = ', '.join(context.previous_results) if context.previous_results else 'None'
|
||
improvements_str = str(context.improvements) if context.improvements else 'None'
|
||
available_connections_str = '\n'.join(f"- {conn}" for conn in connRefs)
|
||
|
||
# Get user language from service - this is the correct way
|
||
user_language = service.user.language if service and service.user else 'en'
|
||
|
||
# Get current workflow context for dynamic examples
|
||
workflow_context = service.methodService.getWorkflowContext()
|
||
current_round = workflow_context.get('currentRound', 0)
|
||
current_task = workflow_context.get('currentTask', 1)
|
||
|
||
prompt = f"""
|
||
You are an action generation AI that creates specific actions to accomplish a task step with user-friendly messages.
|
||
|
||
DOCUMENT REFERENCE TYPES:
|
||
- docItem: Reference to a single document
|
||
- docList: Reference to a group of documents
|
||
- round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}: Reference to resulting document list from previous action
|
||
|
||
USAGE GUIDE:
|
||
- Use docItem when you need a specific document: "docItem:doc_123:component_diagram.pdf"
|
||
- Use docList when you need all documents in a group: "docList:msg_456:AnalysisResults"
|
||
- Use round/task/action format when referencing outputs from previous actions: "round{current_round}_task{current_task}_action2_AnalysisResults"
|
||
|
||
CRITICAL DOCUMENT REFERENCE RULES:
|
||
- ONLY use the exact labels listed in AVAILABLE DOCUMENTS below, or result labels from previous actions
|
||
- When generating multiple actions, you may only use as input documents those that are already present in AVAILABLE DOCUMENTS or produced by actions that come earlier in the list. Do NOT use as input any document label that will be produced by a later action.
|
||
- If there are no documents available, you CANNOT create document extraction actions. Instead, prefer using web actions (web.search, web.scrape, web.crawl) when external information can satisfy the request; only generate a status/information report if the task truly requires user-provided documents.
|
||
|
||
CURRENT WORKFLOW CONTEXT:
|
||
- Current Round: {current_round}
|
||
- Current Task: {current_task}
|
||
- Use these values when creating resultLabel references
|
||
|
||
TASK STEP: {context.task_step.objective if context.task_step else 'No task step specified'} (ID: {context.task_step.id if context.task_step else 'unknown'})
|
||
|
||
SUCCESS CRITERIA: {success_criteria_str}
|
||
|
||
CONTEXT - Chat History:
|
||
{messageSummary}
|
||
|
||
WORKFLOW CONTEXT - Previous Messages Summary:
|
||
The following summarizes key information from previous workflow interactions to provide context for continued workflows:
|
||
- Previous user inputs and their outcomes
|
||
- Key decisions and findings from earlier tasks
|
||
- Document processing results and insights
|
||
- User preferences and requirements established
|
||
|
||
This context helps ensure your actions build upon previous work and maintain consistency with the overall workflow objectives.
|
||
|
||
AVAILABLE METHODS AND ACTIONS (with signatures):
|
||
{available_methods_str}
|
||
|
||
AVAILABLE CONNECTIONS:
|
||
{available_connections_str}
|
||
|
||
AVAILABLE DOCUMENTS:
|
||
{available_documents_str}
|
||
|
||
DOCUMENT REFERENCE EXAMPLES:
|
||
✅ CORRECT: Use exact references from AVAILABLE DOCUMENTS above or result labels from previous actions
|
||
- "docList:msg_456:diagram_analysis_results" (access all documents in a list)
|
||
- "docItem:doc_123:component_diagram.pdf" (access specific document)
|
||
- "round{current_round}_task{current_task}_action3_contextinfo" (document list from previous action)
|
||
|
||
❌ INCORRECT: These will cause errors
|
||
- "msg_xxx:documents" (invalid format - missing docList/docItem prefix)
|
||
- "task_2_results" (not a valid reference - use exact references from AVAILABLE DOCUMENTS)
|
||
- Inventing document IDs not produces from a preceeding action
|
||
|
||
PREVIOUS RESULTS: {previous_results_str}
|
||
IMPROVEMENTS NEEDED: {improvements_str}
|
||
|
||
PREVIOUS TASK HANDOVER CONTEXT:
|
||
{context.previous_handover.workflowSummary if context.previous_handover and context.previous_handover.workflowSummary else 'No previous task handover available'}
|
||
|
||
{retry_context}
|
||
|
||
ACTION GENERATION PRINCIPLES:
|
||
- Create meaningful actions per task step
|
||
- Focus on business outcomes, not technical operations
|
||
- Combine related operations into single actions when possible
|
||
- Select the method that best fulfills the objective based on context (do not default to any specific method).
|
||
- Each action must be self-contained and executable with the provided parameters
|
||
- For document extraction, ensure prompts are specific and detailed
|
||
- Include validation steps in extraction prompts where relevant
|
||
- If this is a retry, learn from previous failures and improve the approach
|
||
- Address specific issues mentioned in previous review feedback
|
||
- When specifying expectedDocumentFormats, ensure AI prompts explicitly request pure data without markdown formatting
|
||
- Generate user-friendly messages for each action in the user's language ({user_language})
|
||
|
||
PARAMETER COMPLETENESS REQUIREMENTS:
|
||
- Every parameter must contain all information needed to execute without implicit context
|
||
- Use explicit, concrete values (units, languages, formats, limits, date ranges, IDs) when applicable
|
||
- For search-like parameters (if any method requires a query), derive the query from the task objective AND ALL success criteria dimensions. Include:
|
||
- Key entities and domain terms from the objective
|
||
- All distinct facets from success_criteria (e.g., valuation AND AI potential AND know-how needs)
|
||
- Geography/localization (e.g., Schweiz/Suisse/Switzerland; use multilingual synonyms when helpful)
|
||
- Time horizon or recency if relevant
|
||
- Boolean operators and synonyms to increase precision (use AND/OR, quotes, parentheses)
|
||
- Avoid single-topic or generic queries focused only on one facet (e.g., pure valuation metrics)
|
||
- When facets are truly distinct, create 1–3 focused actions with precise queries rather than one vague catch-all
|
||
- Document list parameters must reference only existing labels or prior action outputs; do not reference future outputs
|
||
|
||
USER LANGUAGE: {user_language} - All user messages must be generated in this language.
|
||
|
||
DOCUMENT ROUTING GUIDANCE:
|
||
- Each action should produce documents with a clear resultLabel for routing
|
||
- Use consistent naming: "round{current_round}_task{{task_id}}_action{{action_number}}_{{descriptive_label}}"
|
||
- Ensure document flow: Action A produces documents that Action B can consume
|
||
- Document labels should be descriptive of content, not just "results" or "output"
|
||
- Consider what subsequent actions will need and structure outputs accordingly
|
||
|
||
INSTRUCTIONS:
|
||
- Generate actions to accomplish this task step using available documents, connections, and previous results
|
||
- Use docItem for single documents and docList for groups of documents as shown in AVAILABLE DOCUMENTS
|
||
- If there are no documents available, do not create document extraction actions. Select methods strictly based on the task objective; choose web actions when external information is required. Otherwise, generate a status/information report requesting needed inputs.
|
||
- Always pass documentList as a LIST of references (docItem and/or docList) - this list CANNOT be empty for document extraction actions
|
||
- For referencing documents from previous actions, use the format "round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}"
|
||
- For resultLabel, use the format: "round{current_round}_task{{task_id}}_action{{action_number}}_{{short_label}}" where:
|
||
- {{round_number}} = the current round number ({current_round})
|
||
- {{task_id}} = the current task's id ({current_task})
|
||
- {{action_number}} = the sequence number of the action within the task (e.g., 1, 2, 3)
|
||
- {{short_label}} = a short, descriptive label for the output (e.g., "AnalysisResults")
|
||
Example: "round{current_round}_task{current_task}_action1_AnalysisResults"
|
||
- If this is a retry, ensure the new actions address the specific issues from previous attempts
|
||
- Follow the JSON structure below. All fields are required.
|
||
|
||
REQUIRED JSON STRUCTURE:
|
||
{{
|
||
"actions": [
|
||
{{
|
||
"method": "method_name", // Use only the method name (e.g., "document")
|
||
"action": "action_name", // Use only the action name (e.g., "extract")
|
||
"parameters": {{
|
||
"documentList": ["docItem:doc_abc:round{current_round}_task{current_task}_action1_AnalysisResults", "round{current_round}_task{current_task}_action1_input"],
|
||
"aiPrompt": "Comprehensive AI prompt describing what to accomplish"
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action2_AnalysisResults",
|
||
"expectedDocumentFormats": [ // OPTIONAL: Specify expected document formats when needed
|
||
{{
|
||
"extension": ".txt",
|
||
"mimeType": "text/plain",
|
||
"description": "Structured data output"
|
||
}}
|
||
],
|
||
"description": "What this action accomplishes (business outcome)",
|
||
"userMessage": "User-friendly message explaining what this action will do in the user's language"
|
||
}}
|
||
]
|
||
}}
|
||
|
||
FIELD REQUIREMENTS:
|
||
- "method": Must be from AVAILABLE METHODS
|
||
- "action": Must be valid for the method
|
||
- "parameters": Method-specific, must include documentList as a list if required by the signature
|
||
- "resultLabel": Must follow the format above (e.g., "round{current_round}_task{current_task}_action3_AnalysisResults")
|
||
- "expectedDocumentFormats": OPTIONAL - Only specify when you need to control output format
|
||
- Use when you need specific file types (e.g., CSV for data, JSON for structured output)
|
||
- Omit when format is flexible (e.g., folder queries with mixed file types)
|
||
- Each format should specify: extension, mimeType, description
|
||
- When using expectedDocumentFormats, ensure the aiPrompt explicitly requests pure data without markdown formatting
|
||
- "description": Clear summary of the business outcome
|
||
- "userMessage": User-friendly message explaining what the action will accomplish in the user's language
|
||
|
||
EXAMPLES OF GOOD ACTIONS:
|
||
|
||
1. Document analysis with specific output format and user message:
|
||
{{
|
||
"method": "document",
|
||
"action": "extract",
|
||
"parameters": {{
|
||
"documentList": ["docItem:doc_57520394-6b6d-41c2-b641-bab3fc6d7f4b:candidate_profile.txt"],
|
||
"aiPrompt": "Extract and analyze the candidate's qualifications, experience, skills, and suitability for the product designer position. Identify key strengths, relevant experience, technical skills, and any areas of concern. Provide a comprehensive assessment that can be used for evaluation."
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action2_candidate_analysis",
|
||
"expectedDocumentFormats": [
|
||
{{
|
||
"extension": ".json",
|
||
"mimeType": "application/json",
|
||
"description": "Structured candidate analysis data"
|
||
}}
|
||
],
|
||
"description": "Comprehensive analysis of candidate profile for evaluation",
|
||
"userMessage": "Ich analysiere das Kandidatenprofil und extrahiere alle wichtigen Informationen für die Bewertung."
|
||
}}
|
||
|
||
2. Multi-document processing with user message:
|
||
{{
|
||
"method": "document",
|
||
"action": "extract",
|
||
"parameters": {{
|
||
"documentList": ["docList:msg_456:candidate_analysis_results"],
|
||
"aiPrompt": "Compare all candidate profiles and create an evaluation matrix. Rate each candidate on technical skills, experience level, cultural fit, portfolio quality, and communication skills. Provide clear rankings and recommendations for the product designer position."
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action5_evaluation_matrix",
|
||
"description": "Create comprehensive evaluation matrix comparing all candidates",
|
||
"userMessage": "Ich vergleiche alle Kandidatenprofile und erstelle eine umfassende Bewertungsmatrix mit klaren Empfehlungen."
|
||
}}
|
||
|
||
3. Data extraction with specific CSV format and user message:
|
||
{{
|
||
"method": "document",
|
||
"action": "extract",
|
||
"parameters": {{
|
||
"documentList": ["docItem:doc_abc:table_data.pdf"],
|
||
"aiPrompt": "Extract all table data and convert to structured CSV format with proper headers and data types. IMPORTANT: Deliver pure CSV data without any markdown formatting, code blocks, or additional text. Output only the CSV content with proper headers and data rows."
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action2_structured_data",
|
||
"expectedDocumentFormats": [
|
||
{{
|
||
"extension": ".csv",
|
||
"mimeType": "text/csv",
|
||
"description": "Structured table data in CSV format"
|
||
}}
|
||
],
|
||
"description": "Extract and structure table data for analysis",
|
||
"userMessage": "Ich extrahiere alle Tabellendaten und konvertiere sie in ein strukturiertes CSV-Format für die weitere Analyse."
|
||
}}
|
||
|
||
4. Comprehensive summary report with user message:
|
||
{{
|
||
"method": "document",
|
||
"action": "generateReport",
|
||
"parameters": {{
|
||
"documentList": ["docList:msg_456:candidate_analysis_results"],
|
||
"title": "Comprehensive Candidate Evaluation Report"
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action6_summary_report",
|
||
"description": "Generate a comprehensive, professional HTML report consolidating all candidate analyses and findings",
|
||
"userMessage": "Ich erstelle einen umfassenden, professionellen Bericht, der alle Kandidatenanalysen und Erkenntnisse zusammenfasst."
|
||
}}
|
||
|
||
5. Correct chaining of actions within a task:
|
||
{{
|
||
"actions": [
|
||
{{
|
||
"method": "document",
|
||
"action": "extract",
|
||
"parameters": {{
|
||
"documentList": ["docItem:doc_abc:round{current_round}_task{current_task}_action1_file1.txt"],
|
||
"aiPrompt": "Extract data from file1."
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action1_extracted_data",
|
||
"description": "Extract data from file1.",
|
||
"userMessage": "Ich extrahiere die Daten aus der Datei."
|
||
}},
|
||
{{
|
||
"method": "document",
|
||
"action": "generateReport",
|
||
"parameters": {{
|
||
"documentList": ["round{current_round}_task{current_task}_action1_extracted_data"],
|
||
"title": "Report"
|
||
}},
|
||
"resultLabel": "round{current_round}_task{current_task}_action2_report",
|
||
"description": "Generate report from extracted data.",
|
||
"userMessage": "Ich erstelle einen Bericht basierend auf den extrahierten Daten."
|
||
}}
|
||
]
|
||
}}
|
||
|
||
IMPORTANT NOTES:
|
||
- Respond with ONLY the JSON object. Do not include any explanatory text.
|
||
- Before creating any document extraction action, verify that AVAILABLE DOCUMENTS contains actual document references.
|
||
- Always include a user-friendly userMessage for each action in the user's language ({user_language}).
|
||
- The examples above show German user messages as reference - adapt the language to match the USER LANGUAGE specified above."""
|
||
|
||
# Removed sensitive data from debug logging
|
||
logging.debug(f"[ACTION PLAN PROMPT] Document context and methods prepared")
|
||
|
||
return prompt
|
||
|
||
def createResultReviewPrompt(context: ReviewContext, service) -> str:
|
||
"""Create enhanced prompt for result review with user-friendly messages and document context"""
|
||
# Build comprehensive action and result summary
|
||
action_summary = ""
|
||
for i, action in enumerate(context.task_actions or []):
|
||
action_summary += f"\nACTION {i+1}: {action.execMethod}.{action.execAction}\n"
|
||
action_summary += f" Status: {action.status}\n"
|
||
if action.error:
|
||
action_summary += f" Error: {action.error}\n"
|
||
if action.resultDocuments:
|
||
action_summary += f" Documents: {len(action.resultDocuments)} document(s)\n"
|
||
for doc in action.resultDocuments:
|
||
# Use Pydantic model properties directly
|
||
fileName = doc.fileName
|
||
fileSize = doc.fileSize
|
||
mimeType = doc.mimeType
|
||
|
||
action_summary += f" - {fileName} ({fileSize} bytes, {mimeType})\n"
|
||
else:
|
||
action_summary += f" Documents: None\n"
|
||
|
||
# Build result summary with SIMPLE DOCUMENT VALIDATION
|
||
result_summary = ""
|
||
document_validation_summary = ""
|
||
document_access_warnings = []
|
||
|
||
if context.action_results:
|
||
for i, result in enumerate(context.action_results):
|
||
result_summary += f"\nRESULT {i+1}:\n"
|
||
result_summary += f" Success: {result.success}\n"
|
||
if result.error:
|
||
result_summary += f" Error: {result.error}\n"
|
||
|
||
if result.documents:
|
||
result_summary += f" Documents: {len(result.documents)} document(s)\n"
|
||
for doc in result.documents:
|
||
# Use correct ActionDocument attributes
|
||
doc_name = getattr(doc, 'documentName', 'Unknown')
|
||
doc_mime = getattr(doc, 'mimeType', 'Unknown')
|
||
doc_data = getattr(doc, 'documentData', None)
|
||
|
||
result_summary += f" - {doc_name} ({doc_mime})\n"
|
||
|
||
# SIMPLE VALIDATION: Check if documents exist and have basic properties
|
||
validation_status = "✅ Valid"
|
||
if not doc_name or str(doc_name).strip() == "":
|
||
validation_status = "❌ Missing document name"
|
||
elif not doc_mime or str(doc_mime).strip() == "":
|
||
validation_status = "❌ Missing MIME type"
|
||
elif doc_data is None:
|
||
validation_status = "⚠️ No document data"
|
||
elif hasattr(doc_data, '__len__') and len(doc_data) == 0:
|
||
validation_status = "⚠️ Empty document data"
|
||
|
||
document_validation_summary += f" - {doc_name}: {validation_status}\n"
|
||
else:
|
||
result_summary += f" Documents: None\n"
|
||
document_validation_summary += f" - No documents produced\n"
|
||
|
||
# Get enhanced document context using the new method
|
||
document_context = getEnhancedDocumentContext(service)
|
||
|
||
# Get user language from service
|
||
user_language = service.user.language if service and service.user else 'en'
|
||
|
||
# Build warnings section (only for critical issues)
|
||
warnings_section = ""
|
||
if document_access_warnings:
|
||
warnings_section = f"""
|
||
⚠️ DOCUMENT VALIDATION ISSUES:
|
||
{chr(10).join(f"- {warning}" for warning in document_access_warnings)}
|
||
"""
|
||
|
||
prompt = f"""
|
||
You are a result review AI that evaluates task execution results and provides feedback with user-friendly messages.
|
||
|
||
TASK OBJECTIVE: {context.task_step.objective if context.task_step else 'No task objective specified'}
|
||
SUCCESS CRITERIA: {', '.join(context.task_step.success_criteria) if context.task_step and context.task_step.success_criteria else 'No success criteria specified'}
|
||
|
||
EXECUTION SUMMARY:
|
||
{action_summary}
|
||
|
||
RESULT SUMMARY:
|
||
{result_summary}
|
||
|
||
{warnings_section}
|
||
|
||
DOCUMENT VALIDATION SUMMARY:
|
||
{document_validation_summary if document_validation_summary else "No documents to validate"}
|
||
|
||
DOCUMENT CONTEXT (Available Documents):
|
||
{document_context}
|
||
|
||
PREVIOUS RESULTS: {', '.join(context.previous_results) if context.previous_results else 'None'}
|
||
|
||
REVIEW INSTRUCTIONS:
|
||
1. Evaluate if the task step was completed successfully
|
||
2. Check if all success criteria were met
|
||
3. Assess the quality and completeness of outputs
|
||
4. Identify any missing or incomplete results
|
||
5. Provide specific improvement suggestions
|
||
6. Generate user-friendly messages explaining the results
|
||
7. Return a JSON object with the exact structure shown below
|
||
|
||
DOCUMENT VALIDATION FOCUS:
|
||
- Check if the agreed result documents label is correct (matches expected format)
|
||
- Verify that documents are actually present and have basic properties
|
||
- Do NOT attempt to analyze document content deeply
|
||
- Focus on document existence and basic metadata validation
|
||
|
||
REQUIRED JSON STRUCTURE:
|
||
{{
|
||
"status": "success|retry|failed",
|
||
"reason": "Brief explanation of the status",
|
||
"improvements": ["improvement1", "improvement2"],
|
||
"quality_score": 8, // 1-10 scale
|
||
"missing_outputs": ["missing_output1", "missing_output2"],
|
||
"met_criteria": ["criteria1", "criteria2"],
|
||
"unmet_criteria": ["criteria3", "criteria4"],
|
||
"confidence": 0.85, // 0.0-1.0 confidence level in this assessment
|
||
"userMessage": "User-friendly message explaining the review results in the user's language"
|
||
}}
|
||
|
||
FIELD REQUIREMENTS:
|
||
- "status": Overall task completion status
|
||
- "success": All criteria met, high-quality outputs
|
||
- "retry": Some criteria met, outputs need improvement and retry
|
||
- "failed": Most criteria unmet, significant issues
|
||
- "reason": Clear explanation of why this status was assigned
|
||
- "improvements": List of specific, actionable improvements
|
||
- "quality_score": 1-10 rating of output quality
|
||
- "missing_outputs": List of expected outputs that were not produced
|
||
- "met_criteria": List of success criteria that were fully met
|
||
- "unmet_criteria": List of success criteria that were not met
|
||
- "confidence": 0.0-1.0 confidence level in this assessment
|
||
- "userMessage": User-friendly explanation of results in the user's language
|
||
|
||
EXAMPLES OF GOOD IMPROVEMENTS:
|
||
- "Increase AI prompt specificity for better data extraction"
|
||
- "Add validation steps to ensure output completeness"
|
||
- "Improve error handling for failed document processing"
|
||
- "Enhance document format specifications for better output quality"
|
||
|
||
EXAMPLES OF GOOD MISSING OUTPUTS:
|
||
- "Structured analysis report in JSON format"
|
||
- "Comparison matrix of candidate profiles"
|
||
- "Data validation summary with quality metrics"
|
||
- "Professional business communication document"
|
||
|
||
QUALITY SCORE GUIDELINES:
|
||
- 9-10: Exceptional quality, exceeds expectations
|
||
- 7-8: Good quality, meets all requirements
|
||
- 5-6: Acceptable quality, minor issues
|
||
- 3-4: Poor quality, significant issues
|
||
- 1-2: Very poor quality, major problems
|
||
|
||
USER LANGUAGE: {user_language} - All user messages must be generated in this language.
|
||
|
||
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
|
||
|
||
return prompt
|
||
|
||
# ===== New compact prompts for React-style workflow =====
|
||
|
||
def _build_tiny_catalog(service) -> str:
|
||
"""Return minimal tool catalog: method -> { action -> [paramNames] }"""
|
||
try:
|
||
method_signatures = getMethodsList(service)
|
||
except Exception:
|
||
method_signatures = []
|
||
catalog: Dict[str, Dict[str, List[str]]] = {}
|
||
for sig in method_signatures:
|
||
if '.' not in sig or '(' not in sig or ')' not in sig:
|
||
continue
|
||
method, rest = sig.split('.', 1)
|
||
action = rest.split('(')[0]
|
||
params_str = rest[rest.find('(')+1:rest.find(')')].strip()
|
||
param_names = []
|
||
if params_str:
|
||
for p in params_str.split(','):
|
||
name = p.strip().split(':')[0].split('=')[0].strip()
|
||
if name:
|
||
param_names.append(name)
|
||
catalog.setdefault(method, {})[action] = param_names
|
||
return json.dumps(catalog, separators=(',', ':'), ensure_ascii=False)
|
||
|
||
def createActionSelectionPrompt(context: TaskContext, service) -> str:
|
||
"""Prompt that returns exactly one action selection: {"action":{"method":"..","name":".."}}"""
|
||
user_language = service.user.language if service and service.user else 'en'
|
||
tiny_catalog = _build_tiny_catalog(service)
|
||
objective = context.task_step.objective if context and context.task_step else ''
|
||
available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available"
|
||
return f"""Select exactly one action to advance the task.
|
||
|
||
OBJECTIVE: {objective}
|
||
AVAILABLE DOCUMENTS: {available_docs}
|
||
USER LANGUAGE: {user_language}
|
||
|
||
MINIMAL TOOL CATALOG (method -> action -> [parameterNames]):
|
||
{tiny_catalog}
|
||
|
||
BUSINESS RULES:
|
||
- Pick exactly one action per step.
|
||
- Derive choice from objective and success criteria.
|
||
- Prefer user language.
|
||
- Keep it minimal; avoid provider specifics.
|
||
|
||
RESPONSE FORMAT (JSON only):
|
||
{{"action":{{"method":"web","name":"search"}}}}
|
||
"""
|
||
|
||
def createActionParameterPrompt(context: TaskContext, selected_action: Dict[str, str], service=None) -> str:
|
||
"""Prompt that returns only parameters for the selected action: {"parameters":{...}}"""
|
||
user_language = service.user.language if service and service.user else 'en'
|
||
method = selected_action.get('method', '') if selected_action else ''
|
||
name = selected_action.get('name', '') if selected_action else ''
|
||
available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available"
|
||
|
||
# Get action signature from service center
|
||
action_signature = ""
|
||
if service and method in methods:
|
||
method_instance = methods[method]['instance']
|
||
action_signature = method_instance.getActionSignature(name)
|
||
|
||
return f"""Provide only the required parameters for this action.
|
||
|
||
SELECTED ACTION: {method}.{name}
|
||
ACTION SIGNATURE: {action_signature}
|
||
OBJECTIVE: {context.task_step.objective if context and context.task_step else ''}
|
||
AVAILABLE DOCUMENTS: {available_docs}
|
||
USER LANGUAGE: {user_language}
|
||
|
||
RULES:
|
||
- Return only the parameters object.
|
||
- Include user language if relevant.
|
||
- Reference documents only by exact labels available.
|
||
- Avoid unnecessary fields; host applies defaults.
|
||
- Use the ACTION SIGNATURE above to understand what parameters are required.
|
||
- Convert the objective into appropriate parameter values as needed.
|
||
|
||
RESPONSE FORMAT (JSON only):
|
||
{{"parameters":{{}}}}
|
||
"""
|
||
|
||
def createRefinementPrompt(context: TaskContext, observation: Dict[str, Any]) -> str:
|
||
"""Prompt that decides to continue or stop based on observation: {"decision":"continue|stop","reason":".."} """
|
||
user_language = context.workflow.messages[-1].role if False else (getattr(context.workflow, 'user_language', None) or (getattr(context.workflow, 'language', None))) # not used, keep minimal
|
||
objective = context.task_step.objective if context and context.task_step else ''
|
||
return f"""Decide next step based on observation.
|
||
|
||
OBJECTIVE: {objective}
|
||
OBSERVATION:
|
||
{json.dumps(observation, ensure_ascii=False)}
|
||
|
||
RULES:
|
||
- If criteria are met or no further action helps, decide stop.
|
||
- Else decide continue.
|
||
|
||
RESPONSE FORMAT (JSON only):
|
||
{{"decision":"continue","reason":"Need more data"}}
|
||
""" |