gateway/modules/chat/handling/promptFactory.py
2025-08-29 11:22:49 +02:00

607 lines
30 KiB
Python

# promptFactory.py
# Contains all prompt creation functions extracted from managerChat.py
import json
import logging
from typing import Any, Dict
from modules.interfaces.interfaceChatModel import TaskContext, ReviewContext
# Set up logger
logger = logging.getLogger(__name__)
# Prompt creation helpers extracted from managerChat.py
def createTaskPlanningPrompt(context: TaskContext, service) -> str:
"""Create enhanced prompt for task planning with user-friendly message generation and language detection"""
# Get user language directly from service.user.language
user_language = service.user.language if service and service.user else 'en'
# Extract user request from context - use Pydantic model directly
user_request = context.task_step.objective if context.task_step else 'No request specified'
# Extract available documents from context - use Pydantic model directly
available_documents = context.available_documents or []
return f"""You are a task planning AI that analyzes user requests and creates structured task plans with user-friendly feedback messages.
USER REQUEST: {user_request}
AVAILABLE DOCUMENTS: {', '.join(available_documents)}
INSTRUCTIONS:
1. Analyze the user request and available documents
2. Group related topics and sequential steps into single, comprehensive tasks
3. Focus on business outcomes, not technical operations
4. Each task should produce meaningful, usable outputs
5. Ensure proper handover between tasks using result labels
6. Generate user-friendly messages for each task in the user's language ({user_language})
7. Detect the language of the user request and include it in languageUserDetected
8. Return a JSON object with the exact structure shown below
TASK GROUPING PRINCIPLES:
- COMBINE RELATED TOPICS: Group related subjects, sequential steps, or workflow-structured activities into single tasks
- SEQUENTIAL WORKFLOWS: If the user says "first do this, then that, then that" → create ONE task that handles the entire sequence
- SIMILAR CONTENT: If multiple items deal with the same subject matter → combine into ONE comprehensive task
- ONLY SPLIT WHEN DIFFERENT: Create separate tasks ONLY when the user explicitly wants different, independent things
EXAMPLES OF GOOD TASK GROUPING:
COMBINE INTO ONE TASK:
- "Analyze the documents, extract key insights, and create a summary report" → ONE task: "Analyze documents and create comprehensive summary report"
- "First check my emails, then respond to urgent ones, then organize my inbox" → ONE task: "Process and organize email inbox with priority responses"
- "Review the budget, analyze spending patterns, and suggest cost-cutting measures" → ONE task: "Comprehensive budget analysis with optimization recommendations"
- "Create a business strategy, develop marketing plan, and prepare presentation" → ONE task: "Develop complete business strategy with marketing plan and presentation"
SPLIT INTO MULTIPLE TASKS:
- "Create a business strategy for Q4" AND "Check my emails for messages from my assistant" → TWO separate tasks (different subjects)
- "Analyze customer feedback" AND "Prepare quarterly financial report" → TWO separate tasks (different business areas)
- "Review project timeline" AND "Update employee handbook" → TWO separate tasks (unrelated activities)
TASK PLANNING PRINCIPLES:
- Break down complex requests into logical, sequential steps
- Focus on business value and outcomes
- Keep tasks at a meaningful level of abstraction
- Each task should produce results that can be used by subsequent tasks
- Ensure clear dependencies and handovers between tasks
- Provide clear, actionable user messages in the user's language ({user_language})
- Group related activities to minimize task fragmentation
- Only create multiple tasks when dealing with truly different, independent objectives
REQUIRED JSON STRUCTURE:
{{
"overview": "Brief description of the overall plan",
"userMessage": "User-friendly message explaining the task plan in {user_language}",
"languageUserDetected": "en", // Language code detected from user request (en, de, fr, it, es, etc.)
"tasks": [
{{
"id": "task_1",
"objective": "Clear business objective this task accomplishes (combining related activities)",
"dependencies": ["task_0"], // IDs of tasks that must complete first
"success_criteria": ["criteria1", "criteria2"],
"estimated_complexity": "low|medium|high",
"userMessage": "User-friendly message explaining what this task will accomplish in {user_language}"
}}
]
}}
EXAMPLES OF GOOD TASK OBJECTIVES (COMBINING RELATED ACTIVITIES):
- "Analyze documents and extract key insights for business communication"
- "Create professional business communication incorporating analyzed information"
- "Execute business communication using specified channels and document outcomes"
- "Develop comprehensive business strategy with implementation roadmap and success metrics"
EXAMPLES OF GOOD SUCCESS CRITERIA:
- "Key insights extracted and ready for business use"
- "Professional communication created with clear business value"
- "Business communication successfully delivered and documented"
- "All outcomes properly documented and accessible"
EXAMPLES OF BAD TASK OBJECTIVES:
- "Read the PDF file" (too granular - should be "Analyze document content")
- "Convert data to CSV" (implementation detail - should be "Structure data for analysis")
- "Send email" (too specific - should be "Deliver business communication")
LANGUAGE DETECTION:
- Analyze the user request text to identify the language
- Use standard language codes: en (English), de (German), fr (French), it (Italian), es (Spanish), etc.
- If the language cannot be determined, use "en" as default
- Include the detected language in the languageUserDetected field
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
async def createActionDefinitionPrompt(context: TaskContext, service) -> str:
"""Create enhanced prompt for action generation with user-friendly messages and enhanced document context"""
methodList = service.getMethodsList()
method_actions = {}
for sig in methodList:
if '.' in sig:
method, rest = sig.split('.', 1)
action = rest.split('(')[0]
method_actions.setdefault(method, []).append((action, sig))
messageSummary = await service.summarizeChat(context.workflow.messages) if context.workflow else ""
# Get enhanced document context using the new method
available_documents_str = service.getEnhancedDocumentContext()
connRefs = service.getConnectionReferenceList()
# Debug logging for connections
logging.debug(f"Connection references retrieved: {connRefs}")
logging.debug(f"Connection references type: {type(connRefs)}")
logging.debug(f"Connection references length: {len(connRefs) if connRefs else 0}")
# Log document availability for debugging
logging.debug(f"Enhanced document context length: {len(available_documents_str)}")
available_methods_str = ''
for method, actions in method_actions.items():
available_methods_str += f"- {method}:\n"
for action, sig in actions:
available_methods_str += f" - {action}: {sig}\n"
retry_context = ""
if context.retry_count and context.retry_count > 0:
retry_context = f"""
RETRY CONTEXT (Attempt {context.retry_count}):
Previous action results that failed or were incomplete:
"""
for i, result in enumerate(context.previous_action_results or []):
retry_context += f"- Action {i+1}: ActionResult\n"
retry_context += f" Status: {result.success and 'success' or 'failed'}\n"
retry_context += f" Error: {result.error or 'None'}\n"
# Check if result has documents and show document info
if result.documents:
doc_info = f"Documents: {len(result.documents)} document(s)"
if result.documents[0].documentName:
doc_info += f" - {result.documents[0].documentName}"
retry_context += f" {doc_info}\n"
else:
retry_context += f" Documents: None\n"
if context.previous_review_result:
retry_context += f"""
Previous review feedback:
- Status: {context.previous_review_result.get('status', 'unknown') or 'unknown'}
- Reason: {context.previous_review_result.get('reason', 'No reason provided') or 'No reason provided'}
- Quality Score: {context.previous_review_result.get('quality_score', 0) or 0}/10
- Unmet Criteria: {', '.join(context.previous_review_result.get('unmet_criteria', []) or [])}
"""
# Use Pydantic model directly - no need for getattr
success_criteria_str = ', '.join(context.task_step.success_criteria) if context.task_step and context.task_step.success_criteria else 'No criteria specified'
previous_results_str = ', '.join(context.previous_results) if context.previous_results else 'None'
improvements_str = str(context.improvements) if context.improvements else 'None'
available_connections_str = '\n'.join(f"- {conn}" for conn in connRefs)
# Get user language from service - this is the correct way
user_language = service.user.language if service and service.user else 'en'
# Get current workflow context for dynamic examples
workflow_context = service.getWorkflowContext()
current_round = workflow_context.get('currentRound', 1)
current_task = workflow_context.get('currentTask', 1)
prompt = f"""
You are an action generation AI that creates specific actions to accomplish a task step with user-friendly messages.
DOCUMENT REFERENCE TYPES:
- docItem: Reference to a single document
- docList: Reference to a group of documents
- round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}: Reference to resulting document list from previous action
USAGE GUIDE:
- Use docItem when you need a specific document: "docItem:doc_123:component_diagram.pdf"
- Use docList when you need all documents in a group: "docList:msg_456:AnalysisResults"
- Use round/task/action format when referencing outputs from previous actions: "round{current_round}_task{current_task}_action2_AnalysisResults"
CRITICAL DOCUMENT REFERENCE RULES:
- ONLY use the exact labels listed in AVAILABLE DOCUMENTS below, or result labels from previous actions
- When generating multiple actions, you may only use as input documents those that are already present in AVAILABLE DOCUMENTS or produced by actions that come earlier in the list. Do NOT use as input any document label that will be produced by a later action.
- If AVAILABLE DOCUMENTS shows "NO DOCUMENTS AVAILABLE", you CANNOT create document extraction actions. Instead, create actions that generate new content or inform the user that documents are needed, if you miss something.
CURRENT WORKFLOW CONTEXT:
- Current Round: {current_round}
- Current Task: {current_task}
- Use these values when creating resultLabel references
TASK STEP: {context.task_step.objective if context.task_step else 'No task step specified'} (ID: {context.task_step.id if context.task_step else 'unknown'})
SUCCESS CRITERIA: {success_criteria_str}
CONTEXT - Chat History:
{messageSummary}
WORKFLOW CONTEXT - Previous Messages Summary:
The following summarizes key information from previous workflow interactions to provide context for continued workflows:
- Previous user inputs and their outcomes
- Key decisions and findings from earlier tasks
- Document processing results and insights
- User preferences and requirements established
This context helps ensure your actions build upon previous work and maintain consistency with the overall workflow objectives.
AVAILABLE METHODS AND ACTIONS (with signatures):
{available_methods_str}
AVAILABLE CONNECTIONS:
{available_connections_str}
AVAILABLE DOCUMENTS:
{available_documents_str}
DOCUMENT REFERENCE EXAMPLES:
✅ CORRECT: Use exact references from AVAILABLE DOCUMENTS above or result labels from previous actions
- "docList:msg_456:diagram_analysis_results" (access all documents in a list)
- "docItem:doc_123:component_diagram.pdf" (access specific document)
- "round{current_round}_task{current_task}_action3_contextinfo" (document list from previous action)
❌ INCORRECT: These will cause errors
- "msg_xxx:documents" (invalid format - missing docList/docItem prefix)
- "task_2_results" (not a valid reference - use exact references from AVAILABLE DOCUMENTS)
- Inventing document IDs not produces from a preceeding action
PREVIOUS RESULTS: {previous_results_str}
IMPROVEMENTS NEEDED: {improvements_str}
PREVIOUS TASK HANDOVER CONTEXT:
{context.previous_handover.workflowSummary if context.previous_handover and context.previous_handover.workflowSummary else 'No previous task handover available'}
{retry_context}
ACTION GENERATION PRINCIPLES:
- Create meaningful actions per task step
- Use comprehensive AI prompts for document processing
- Focus on business outcomes, not technical operations
- Combine related operations into single actions when possible
- Use the task's AI prompt if provided, or create a comprehensive one
- Each action should produce meaningful, usable outputs
- For document extraction, ensure prompts are specific and detailed
- Include validation steps in extraction prompts
- If this is a retry, learn from previous failures and improve the approach
- Address specific issues mentioned in previous review feedback
- When specifying expectedDocumentFormats, ensure AI prompts explicitly request pure data without markdown formatting
- Generate user-friendly messages for each action in the user's language ({user_language})
USER LANGUAGE: {user_language} - All user messages must be generated in this language.
DOCUMENT ROUTING GUIDANCE:
- Each action should produce documents with a clear resultLabel for routing
- Use consistent naming: "round{current_round}_task{{task_id}}_action{{action_number}}_{{descriptive_label}}"
- Ensure document flow: Action A produces documents that Action B can consume
- Document labels should be descriptive of content, not just "results" or "output"
- Consider what subsequent actions will need and structure outputs accordingly
INSTRUCTIONS:
- Generate actions to accomplish this task step using available documents, connections, and previous results
- Use docItem for single documents and docList for groups of documents as shown in AVAILABLE DOCUMENTS
- If AVAILABLE DOCUMENTS shows "NO DOCUMENTS AVAILABLE", you cannot create document extraction actions. Instead, create actions that generate new content or inform the user that documents are needed.
- Always pass documentList as a LIST of references (docItem and/or docList) - this list CANNOT be empty for document extraction actions
- For referencing documents from previous actions, use the format "round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}"
- For resultLabel, use the format: "round{current_round}_task{{task_id}}_action{{action_number}}_{{short_label}}" where:
- {{round_number}} = the current round number ({current_round})
- {{task_id}} = the current task's id ({current_task})
- {{action_number}} = the sequence number of the action within the task (e.g., 1, 2, 3)
- {{short_label}} = a short, descriptive label for the output (e.g., "AnalysisResults")
Example: "round{current_round}_task{current_task}_action1_AnalysisResults"
- If this is a retry, ensure the new actions address the specific issues from previous attempts
- Follow the JSON structure below. All fields are required.
REQUIRED JSON STRUCTURE:
{{
"actions": [
{{
"method": "method_name", // Use only the method name (e.g., "document")
"action": "action_name", // Use only the action name (e.g., "extract")
"parameters": {{
"documentList": ["docItem:doc_abc:round{current_round}_task{current_task}_action1_AnalysisResults", "round{current_round}_task{current_task}_action1_input"],
"aiPrompt": "Comprehensive AI prompt describing what to accomplish"
}},
"resultLabel": "round{current_round}_task{current_task}_action2_AnalysisResults",
"expectedDocumentFormats": [ // OPTIONAL: Specify expected document formats when needed
{{
"extension": ".txt",
"mimeType": "text/plain",
"description": "Structured data output"
}}
],
"description": "What this action accomplishes (business outcome)",
"userMessage": "User-friendly message explaining what this action will do in the user's language"
}}
]
}}
FIELD REQUIREMENTS:
- "method": Must be from AVAILABLE METHODS
- "action": Must be valid for the method
- "parameters": Method-specific, must include documentList as a list if required by the signature
- "resultLabel": Must follow the format above (e.g., "round{current_round}_task{current_task}_action3_AnalysisResults")
- "expectedDocumentFormats": OPTIONAL - Only specify when you need to control output format
- Use when you need specific file types (e.g., CSV for data, JSON for structured output)
- Omit when format is flexible (e.g., folder queries with mixed file types)
- Each format should specify: extension, mimeType, description
- When using expectedDocumentFormats, ensure the aiPrompt explicitly requests pure data without markdown formatting
- "description": Clear summary of the business outcome
- "userMessage": User-friendly message explaining what the action will accomplish in the user's language
EXAMPLES OF GOOD ACTIONS:
1. Document analysis with specific output format and user message:
{{
"method": "document",
"action": "extract",
"parameters": {{
"documentList": ["docItem:doc_57520394-6b6d-41c2-b641-bab3fc6d7f4b:candidate_profile.txt"],
"aiPrompt": "Extract and analyze the candidate's qualifications, experience, skills, and suitability for the product designer position. Identify key strengths, relevant experience, technical skills, and any areas of concern. Provide a comprehensive assessment that can be used for evaluation."
}},
"resultLabel": "round{current_round}_task{current_task}_action2_candidate_analysis",
"expectedDocumentFormats": [
{{
"extension": ".json",
"mimeType": "application/json",
"description": "Structured candidate analysis data"
}}
],
"description": "Comprehensive analysis of candidate profile for evaluation",
"userMessage": "Ich analysiere das Kandidatenprofil und extrahiere alle wichtigen Informationen für die Bewertung."
}}
2. Multi-document processing with user message:
{{
"method": "document",
"action": "extract",
"parameters": {{
"documentList": ["docList:msg_456:candidate_analysis_results"],
"aiPrompt": "Compare all candidate profiles and create an evaluation matrix. Rate each candidate on technical skills, experience level, cultural fit, portfolio quality, and communication skills. Provide clear rankings and recommendations for the product designer position."
}},
"resultLabel": "round{current_round}_task{current_task}_action5_evaluation_matrix",
"description": "Create comprehensive evaluation matrix comparing all candidates",
"userMessage": "Ich vergleiche alle Kandidatenprofile und erstelle eine umfassende Bewertungsmatrix mit klaren Empfehlungen."
}}
3. Data extraction with specific CSV format and user message:
{{
"method": "document",
"action": "extract",
"parameters": {{
"documentList": ["docItem:doc_abc:table_data.pdf"],
"aiPrompt": "Extract all table data and convert to structured CSV format with proper headers and data types. IMPORTANT: Deliver pure CSV data without any markdown formatting, code blocks, or additional text. Output only the CSV content with proper headers and data rows."
}},
"resultLabel": "round{current_round}_task{current_task}_action2_structured_data",
"expectedDocumentFormats": [
{{
"extension": ".csv",
"mimeType": "text/csv",
"description": "Structured table data in CSV format"
}}
],
"description": "Extract and structure table data for analysis",
"userMessage": "Ich extrahiere alle Tabellendaten und konvertiere sie in ein strukturiertes CSV-Format für die weitere Analyse."
}}
4. Comprehensive summary report with user message:
{{
"method": "document",
"action": "generateReport",
"parameters": {{
"documentList": ["docList:msg_456:candidate_analysis_results"],
"title": "Comprehensive Candidate Evaluation Report"
}},
"resultLabel": "round{current_round}_task{current_task}_action6_summary_report",
"description": "Generate a comprehensive, professional HTML report consolidating all candidate analyses and findings",
"userMessage": "Ich erstelle einen umfassenden, professionellen Bericht, der alle Kandidatenanalysen und Erkenntnisse zusammenfasst."
}}
5. Correct chaining of actions within a task:
{{
"actions": [
{{
"method": "document",
"action": "extract",
"parameters": {{
"documentList": ["docItem:doc_abc:round{current_round}_task{current_task}_action1_file1.txt"],
"aiPrompt": "Extract data from file1."
}},
"resultLabel": "round{current_round}_task{current_task}_action1_extracted_data",
"description": "Extract data from file1.",
"userMessage": "Ich extrahiere die Daten aus der Datei."
}},
{{
"method": "document",
"action": "generateReport",
"parameters": {{
"documentList": ["round{current_round}_task{current_task}_action1_extracted_data"],
"title": "Report"
}},
"resultLabel": "round{current_round}_task{current_task}_action2_report",
"description": "Generate report from extracted data.",
"userMessage": "Ich erstelle einen Bericht basierend auf den extrahierten Daten."
}}
]
}}
6. When no documents are available (NO DOCUMENTS AVAILABLE scenario):
{{
"method": "document",
"action": "generateReport",
"parameters": {{
"documentList": [],
"title": "Workflow Status Report"
}},
"resultLabel": "round{current_round}_task{current_task}_action1_status_report",
"description": "Generate a status report informing the user that no documents are available for processing and requesting document upload or alternative input.",
"userMessage": "Ich erstelle einen Statusbericht, der Sie darüber informiert, dass keine Dokumente zur Verarbeitung verfügbar sind und um Dokumente oder alternative Eingaben bittet."
}}
IMPORTANT NOTES:
- Respond with ONLY the JSON object. Do not include any explanatory text.
- Before creating any document extraction action, verify that AVAILABLE DOCUMENTS contains actual document references.
- If AVAILABLE DOCUMENTS shows "NO DOCUMENTS AVAILABLE", use example 6 above to create a status report action instead of document extraction.
- Always include a user-friendly userMessage for each action in the user's language ({user_language}).
- The examples above show German user messages as reference - adapt the language to match the USER LANGUAGE specified above."""
logging.debug(f"[ACTION PLAN PROMPT] Enhanced Document Context:\n{available_documents_str}\nUser Connections Section:\n{available_connections_str}\nAvailable Methods (detailed):\n{available_methods_str}")
return prompt
def createResultReviewPrompt(context: ReviewContext, service) -> str:
"""Create enhanced prompt for result review with user-friendly messages and document context"""
# Build comprehensive action and result summary
action_summary = ""
for i, action in enumerate(context.task_actions or []):
action_summary += f"\nACTION {i+1}: {action.execMethod}.{action.execAction}\n"
action_summary += f" Status: {action.status}\n"
if action.error:
action_summary += f" Error: {action.error}\n"
if action.resultDocuments:
action_summary += f" Documents: {len(action.resultDocuments)} document(s)\n"
for doc in action.resultDocuments:
# Use Pydantic model properties directly
fileName = doc.fileName
fileSize = doc.fileSize
mimeType = doc.mimeType
action_summary += f" - {fileName} ({fileSize} bytes, {mimeType})\n"
else:
action_summary += f" Documents: None\n"
# Build result summary with SIMPLE DOCUMENT VALIDATION
result_summary = ""
document_validation_summary = ""
document_access_warnings = []
if context.action_results:
for i, result in enumerate(context.action_results):
result_summary += f"\nRESULT {i+1}:\n"
result_summary += f" Success: {result.success}\n"
if result.error:
result_summary += f" Error: {result.error}\n"
if result.documents:
result_summary += f" Documents: {len(result.documents)} document(s)\n"
for doc in result.documents:
# Use correct ActionDocument attributes
doc_name = getattr(doc, 'documentName', 'Unknown')
doc_mime = getattr(doc, 'mimeType', 'Unknown')
doc_data = getattr(doc, 'documentData', None)
result_summary += f" - {doc_name} ({doc_mime})\n"
# SIMPLE VALIDATION: Check if documents exist and have basic properties
validation_status = "✅ Valid"
if not doc_name or str(doc_name).strip() == "":
validation_status = "❌ Missing document name"
elif not doc_mime or str(doc_mime).strip() == "":
validation_status = "❌ Missing MIME type"
elif doc_data is None:
validation_status = "⚠️ No document data"
elif hasattr(doc_data, '__len__') and len(doc_data) == 0:
validation_status = "⚠️ Empty document data"
document_validation_summary += f" - {doc_name}: {validation_status}\n"
else:
result_summary += f" Documents: None\n"
document_validation_summary += f" - No documents produced\n"
# Get enhanced document context using the new method
document_context = service.getEnhancedDocumentContext()
# Get user language from service
user_language = service.user.language if service and service.user else 'en'
# Build warnings section (only for critical issues)
warnings_section = ""
if document_access_warnings:
warnings_section = f"""
⚠️ DOCUMENT VALIDATION ISSUES:
{chr(10).join(f"- {warning}" for warning in document_access_warnings)}
"""
prompt = f"""
You are a result review AI that evaluates task execution results and provides feedback with user-friendly messages.
TASK OBJECTIVE: {context.task_step.objective if context.task_step else 'No task objective specified'}
SUCCESS CRITERIA: {', '.join(context.task_step.success_criteria) if context.task_step and context.task_step.success_criteria else 'No success criteria specified'}
EXECUTION SUMMARY:
{action_summary}
RESULT SUMMARY:
{result_summary}
{warnings_section}
DOCUMENT VALIDATION SUMMARY:
{document_validation_summary if document_validation_summary else "No documents to validate"}
DOCUMENT CONTEXT (Available Documents):
{document_context}
PREVIOUS RESULTS: {', '.join(context.previous_results) if context.previous_results else 'None'}
REVIEW INSTRUCTIONS:
1. Evaluate if the task step was completed successfully
2. Check if all success criteria were met
3. Assess the quality and completeness of outputs
4. Identify any missing or incomplete results
5. Provide specific improvement suggestions
6. Generate user-friendly messages explaining the results
7. Return a JSON object with the exact structure shown below
DOCUMENT VALIDATION FOCUS:
- Check if the agreed result documents label is correct (matches expected format)
- Verify that documents are actually present and have basic properties
- Do NOT attempt to analyze document content deeply
- Focus on document existence and basic metadata validation
REQUIRED JSON STRUCTURE:
{{
"status": "success|retry|failed",
"reason": "Brief explanation of the status",
"improvements": ["improvement1", "improvement2"],
"quality_score": 8, // 1-10 scale
"missing_outputs": ["missing_output1", "missing_output2"],
"met_criteria": ["criteria1", "criteria2"],
"unmet_criteria": ["criteria3", "criteria4"],
"confidence": 0.85, // 0.0-1.0 confidence level in this assessment
"userMessage": "User-friendly message explaining the review results in the user's language"
}}
FIELD REQUIREMENTS:
- "status": Overall task completion status
- "success": All criteria met, high-quality outputs
- "retry": Some criteria met, outputs need improvement and retry
- "failed": Most criteria unmet, significant issues
- "reason": Clear explanation of why this status was assigned
- "improvements": List of specific, actionable improvements
- "quality_score": 1-10 rating of output quality
- "missing_outputs": List of expected outputs that were not produced
- "met_criteria": List of success criteria that were fully met
- "unmet_criteria": List of success criteria that were not met
- "confidence": 0.0-1.0 confidence level in this assessment
- "userMessage": User-friendly explanation of results in the user's language
EXAMPLES OF GOOD IMPROVEMENTS:
- "Increase AI prompt specificity for better data extraction"
- "Add validation steps to ensure output completeness"
- "Improve error handling for failed document processing"
- "Enhance document format specifications for better output quality"
EXAMPLES OF GOOD MISSING OUTPUTS:
- "Structured analysis report in JSON format"
- "Comparison matrix of candidate profiles"
- "Data validation summary with quality metrics"
- "Professional business communication document"
QUALITY SCORE GUIDELINES:
- 9-10: Exceptional quality, exceeds expectations
- 7-8: Good quality, meets all requirements
- 5-6: Acceptable quality, minor issues
- 3-4: Poor quality, significant issues
- 1-2: Very poor quality, major problems
USER LANGUAGE: {user_language} - All user messages must be generated in this language.
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
return prompt