# promptFactory.py # Contains all prompt creation functions extracted from managerChat.py import json import logging from typing import Any, Dict # Prompt creation helpers extracted from managerChat.py def createTaskPlanningPrompt(context: Dict[str, Any]) -> str: """Create prompt for task planning""" return f"""You are a task planning AI that analyzes user requests and creates structured task plans. USER REQUEST: {context['user_request']} AVAILABLE DOCUMENTS: {', '.join(context['available_documents'])} INSTRUCTIONS: 1. Analyze the user request and available documents 2. Break down the request into 2-4 meaningful high-level task steps 3. Focus on business outcomes, not technical operations 4. Each task should produce meaningful, usable outputs 5. Ensure proper handover between tasks using result labels 6. Return a JSON object with the exact structure shown below TASK PLANNING PRINCIPLES: - Break down complex requests into logical, sequential steps - Focus on business value and outcomes - Keep tasks at a meaningful level of abstraction - Each task should produce results that can be used by subsequent tasks - Ensure clear dependencies and handovers between tasks REQUIRED JSON STRUCTURE: {{ \"overview\": \"Brief description of the overall plan\", \"tasks\": [ {{ \"id\": \"task_1\", \"objective\": \"Clear business objective this task accomplishes\", \"dependencies\": [\"task_0\"], // IDs of tasks that must complete first \"success_criteria\": [\"criteria1\", \"criteria2\"], \"estimated_complexity\": \"low|medium|high\" }} ] }} EXAMPLES OF GOOD TASK OBJECTIVES: - \"Extract key information from documents for email preparation\" - \"Draft professional email incorporating analyzed information\" - \"Send email using specified email account\" - \"Store email draft and confirmation in system\" EXAMPLES OF GOOD SUCCESS CRITERIA: - \"Document analysis completed with key points identified\" - \"Email draft created with professional tone and clear structure\" - \"Email successfully sent with delivery confirmation\" - \"All outputs properly stored and accessible for future use\" EXAMPLES OF BAD TASK OBJECTIVES: - \"Open and read the PDF file\" (too granular) - \"Identify table structure\" (technical detail) - \"Convert data to CSV format\" (implementation detail) NOTE: Respond with ONLY the JSON object. Do not include any explanatory text.""" async def createActionDefinitionPrompt(context, service) -> str: """Create prompt for action generation with enhanced document extraction guidance and retry context""" task_step = context.task_step workflow = context.workflow available_docs = context.available_documents or [] previous_results = context.previous_results or [] improvements = context.improvements or [] retry_count = context.retry_count or 0 previous_action_results = context.previous_action_results or [] previous_review_result = context.previous_review_result methodList = service.getMethodsList() method_actions = {} for sig in methodList: if '.' in sig: method, rest = sig.split('.', 1) action = rest.split('(')[0] method_actions.setdefault(method, []).append((action, sig)) messageSummary = await service.summarizeChat(workflow.messages) # Get ALL documents from the entire workflow, not just current round docRefs = service.getDocumentReferenceList() connRefs = service.getConnectionReferenceList() # Get documents from current round (chat) and entire workflow history current_round_docs = docRefs.get('chat', []) workflow_history_docs = docRefs.get('history', []) # Combine all documents, prioritizing current round first, then workflow history all_doc_refs = current_round_docs + workflow_history_docs # Log document availability for debugging logging.debug(f"Document references - Current round: {len(current_round_docs)}, Workflow history: {len(workflow_history_docs)}, Total: {len(all_doc_refs)}") available_methods_str = '' for method, actions in method_actions.items(): available_methods_str += f"- {method}:\n" for action, sig in actions: available_methods_str += f" - {action}: {sig}\n" retry_context = "" if retry_count > 0: retry_context = f""" RETRY CONTEXT (Attempt {retry_count}): Previous action results that failed or were incomplete: """ for i, result in enumerate(previous_action_results): retry_context += f"- Action {i+1}: {result.actionMethod or 'unknown'}.{result.actionName or 'unknown'}\n" retry_context += f" Status: {result.success and 'success' or 'failed'}\n" retry_context += f" Error: {result.error or 'None'}\n" retry_context += f" Result: {(result.data.get('result', '') if result.data else '')[:100]}...\n" if previous_review_result: retry_context += f""" Previous review feedback: - Status: {previous_review_result.status or 'unknown'} - Reason: {previous_review_result.reason or 'No reason provided'} - Quality Score: {previous_review_result.quality_score or 0}/10 - Unmet Criteria: {', '.join(previous_review_result.unmet_criteria or [])} """ success_criteria_str = ', '.join(task_step.success_criteria or []) previous_results_str = ', '.join(previous_results) if previous_results else 'None' improvements_str = str(improvements) if improvements else 'None' available_connections_str = '\n'.join(f"- {conn}" for conn in connRefs) # Build comprehensive document list showing both current round and workflow history if all_doc_refs: available_documents_str = "CURRENT ROUND DOCUMENTS:\n" if current_round_docs: for doc in current_round_docs: available_documents_str += f"- {doc.documentsLabel} contains {', '.join(doc.documents)}\n" else: available_documents_str += "- No documents in current round\n" available_documents_str += "\nWORKFLOW HISTORY DOCUMENTS:\n" if workflow_history_docs: for doc in workflow_history_docs: available_documents_str += f"- {doc.documentsLabel} contains {', '.join(doc.documents)}\n" else: available_documents_str += "- No documents in workflow history\n" else: available_documents_str = "NO DOCUMENTS AVAILABLE - This workflow has no documents to process." # Debug logging for document availability logging.debug(f"Available documents string length: {len(available_documents_str)}") logging.debug(f"Current round docs count: {len(current_round_docs)}") logging.debug(f"Workflow history docs count: {len(workflow_history_docs)}") logging.debug(f"Total doc refs: {len(all_doc_refs)}") prompt = f""" You are an action generation AI that creates specific actions to accomplish a task step. DOCUMENT REFERENCE TYPES: - docItem: Reference to a single document. Format: "docItem::" - docList: Reference to a group of documents under a label. Format: