1251 lines
57 KiB
Python
1251 lines
57 KiB
Python
"""
|
|
Refactored architecture for the Agentservice multi-agent system.
|
|
This module defines the revised workflow execution with improved agent handovers.
|
|
"""
|
|
|
|
import os
|
|
import logging
|
|
import asyncio
|
|
import uuid
|
|
from datetime import datetime
|
|
from typing import List, Dict, Any, Optional, Tuple, Union
|
|
import json
|
|
import re
|
|
|
|
logger = logging.getLogger(__name__)
|
|
logging.getLogger('matplotlib.font_manager').setLevel(logging.INFO)
|
|
|
|
class WorkflowExecution:
|
|
"""
|
|
Handles the execution of workflows with improved agent collaboration.
|
|
Integrates planning and execution phases for better context awareness.
|
|
"""
|
|
|
|
def __init__(self, workflow_manager, workflow_id: str, mandate_id: int, user_id: int, ai_service, lucydom_interface):
|
|
"""Initialize the workflow execution"""
|
|
self.workflow_manager = workflow_manager
|
|
self.workflow_id = workflow_id
|
|
self.mandate_id = mandate_id
|
|
self.user_id = user_id
|
|
self.ai_service = ai_service
|
|
self.lucydom_interface = lucydom_interface
|
|
|
|
# Import necessary modules
|
|
from modules.agentservice_utils import WorkflowUtils, MessageUtils, LoggingUtils
|
|
from modules.agentservice_registry import AgentRegistry
|
|
from modules.agentservice_filemanager import get_workflow_file_manager
|
|
|
|
# Initialize utilities
|
|
self.workflow_utils = WorkflowUtils(workflow_id)
|
|
self.message_utils = MessageUtils()
|
|
self.logging_utils = LoggingUtils(workflow_id, self._add_log)
|
|
|
|
# Initialize agent registry
|
|
self.agent_registry = AgentRegistry.get_instance()
|
|
# Set dependencies for agents
|
|
|
|
# Initialize file manager
|
|
self.file_manager = get_workflow_file_manager(workflow_id, lucydom_interface)
|
|
|
|
# Import and initialize document handler
|
|
from modules.agentservice_document_handler import get_document_handler
|
|
self.document_handler = get_document_handler(workflow_id, lucydom_interface, ai_service)
|
|
|
|
self.agent_registry.set_dependencies(
|
|
ai_service=ai_service,
|
|
document_handler=self.document_handler,
|
|
lucydom_interface=lucydom_interface
|
|
)
|
|
|
|
async def execute(self, message: Dict[str, Any], workflow: Dict[str, Any], files: List[Dict[str, Any]] = None, is_user_input: bool = False):
|
|
"""
|
|
Execute the workflow with integrated planning and agent selection.
|
|
|
|
Args:
|
|
message: The initiating message (prompt or user input)
|
|
workflow: The workflow object
|
|
files: Optional list of file metadata
|
|
is_user_input: Flag indicating if this is user input
|
|
|
|
Returns:
|
|
Dict with workflow status and result
|
|
"""
|
|
try:
|
|
# 1. Initialize workflow logging
|
|
self.logging_utils.info("Starting workflow execution", "workflow", "Workflow initialized")
|
|
|
|
# 2. Process user message and files
|
|
user_message = await self._process_user_message(workflow, message, files)
|
|
self.logging_utils.info("User message processed", "workflow", "User input added to workflow")
|
|
|
|
# 3. Create agent-aware work plan
|
|
work_plan = await self._create_agent_aware_work_plan(workflow, user_message)
|
|
self.logging_utils.info(f"Created agent-aware work plan with {len(work_plan)} activities", "planning")
|
|
self.logging_utils.debug(f"{work_plan}.", "planning")
|
|
|
|
# 4. Execute the activities in the work plan
|
|
results = await self._execute_work_plan(workflow, work_plan)
|
|
|
|
# 5. Create summary
|
|
summary = await self._create_summary(workflow, results)
|
|
self.logging_utils.info("Created workflow summary", "summary")
|
|
|
|
# Set workflow status to completed
|
|
workflow["status"] = "completed"
|
|
workflow["last_activity"] = datetime.now().isoformat()
|
|
|
|
# Final save
|
|
self.workflow_manager._save_workflow(workflow)
|
|
|
|
return {
|
|
"workflow_id": self.workflow_id,
|
|
"status": "completed",
|
|
"messages": workflow.get("messages", [])
|
|
}
|
|
|
|
except Exception as e:
|
|
self.logging_utils.error(f"Workflow execution failed: {str(e)}", "error")
|
|
workflow["status"] = "failed"
|
|
self.workflow_manager._save_workflow(workflow)
|
|
|
|
return {
|
|
"workflow_id": self.workflow_id,
|
|
"status": "failed",
|
|
"error": str(e)
|
|
}
|
|
|
|
async def _process_user_message(self, workflow: Dict[str, Any], message: Dict[str, Any], files: List[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
"""
|
|
Process the user message and add it to the workflow.
|
|
|
|
Args:
|
|
workflow: The workflow object
|
|
message: The user message
|
|
files: Optional list of file metadata
|
|
|
|
Returns:
|
|
The processed user message
|
|
"""
|
|
# Create a message with user input
|
|
user_message = self._create_message(workflow, message.get("role", "user"))
|
|
user_message["content"] = message.get("content", "")
|
|
|
|
# Process files if provided
|
|
if files and len(files) > 0:
|
|
self.logging_utils.info(f"Processing {len(files)} files", "files")
|
|
|
|
# Add files to message via file manager instead of _process_files
|
|
user_message = await self.file_manager.add_files_to_message(
|
|
user_message,
|
|
[f.get('id') for f in files],
|
|
self._add_log
|
|
)
|
|
|
|
# Add the message to the workflow
|
|
if "messages" not in workflow:
|
|
workflow["messages"] = []
|
|
workflow["messages"].append(user_message)
|
|
|
|
# Save workflow state
|
|
self.workflow_manager._save_workflow(workflow)
|
|
|
|
return user_message
|
|
|
|
|
|
|
|
async def _create_agent_aware_work_plan(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
"""
|
|
Create an agent-aware work plan that integrates agent selection during planning.
|
|
|
|
Args:
|
|
workflow: The workflow object
|
|
message: The initiating message
|
|
|
|
Returns:
|
|
List of structured activities with agent assignments
|
|
"""
|
|
|
|
import json
|
|
import re
|
|
import os
|
|
|
|
# Extract context information
|
|
task = message.get("content", "")
|
|
|
|
# Direct check for PowerOn keyword as an additional safeguard
|
|
if "poweron" in task.lower():
|
|
self.logging_utils.info("PowerOn keyword directly detected, creating specialized plan with creative agent", "planning")
|
|
return [{
|
|
"title": "PowerOn Response",
|
|
"description": "Generate specialized PowerOn response",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# Get all available agents and their capabilities
|
|
agent_infos = self.agent_registry.get_agent_infos()
|
|
|
|
# Extract documents
|
|
documents = message.get("documents", [])
|
|
document_info = []
|
|
|
|
# Analyze documents without language-specific criteria
|
|
has_documents = len(documents) > 0
|
|
pdf_documents = []
|
|
table_documents = []
|
|
already_extracted_docs = []
|
|
|
|
for doc in documents:
|
|
source = doc.get("source", {})
|
|
doc_name = source.get("name", "unnamed")
|
|
doc_type = source.get("type", "unknown")
|
|
content_type = source.get("content_type", "unknown")
|
|
|
|
# Add to general document info
|
|
document_info.append({
|
|
"id": doc.get("id"),
|
|
"name": doc_name,
|
|
"type": doc_type,
|
|
"content_type": content_type
|
|
})
|
|
|
|
# Identify document types
|
|
if "pdf" in content_type.lower():
|
|
pdf_documents.append(doc_name)
|
|
|
|
# Look for signs of tables based on content structure, not language
|
|
if doc.get("contents"):
|
|
contents = doc.get("contents")
|
|
for content_item in contents:
|
|
if isinstance(content_item, dict) and content_item.get("type") == "table":
|
|
table_documents.append(doc_name)
|
|
break
|
|
|
|
# Check for already extracted content
|
|
if doc.get("contents") or (source and source.get("extracted_content")):
|
|
already_extracted_docs.append(doc_name)
|
|
|
|
# Create a more detailed document list for analysis
|
|
detailed_document_info = []
|
|
for doc in documents:
|
|
source = doc.get("source", {})
|
|
doc_name = source.get("name", "unnamed")
|
|
doc_type = source.get("type", "unknown")
|
|
content_type = source.get("content_type", "unknown")
|
|
doc_id = doc.get("id", "unknown_id")
|
|
|
|
# Extract document properties that might help in matching
|
|
doc_properties = {
|
|
"id": doc_id,
|
|
"name": doc_name,
|
|
"type": doc_type,
|
|
"content_type": content_type
|
|
}
|
|
|
|
# Add file extension if present, handling scope properly
|
|
if "." in doc_name:
|
|
doc_properties["file_extension"] = os.path.splitext(doc_name)[1].lower()
|
|
|
|
detailed_document_info.append(doc_properties)
|
|
|
|
# Convert to JSON string safely before using in f-string
|
|
detailed_docs_json = "No documents provided"
|
|
if detailed_document_info:
|
|
try:
|
|
detailed_docs_json = json.dumps(detailed_document_info, indent=2)
|
|
except Exception as e:
|
|
self.logging_utils.warning(f"Error converting document info to JSON: {str(e)}", "planning")
|
|
|
|
# Update the task analysis prompt to better identify document processing tasks
|
|
task_analysis_prompt = f"""
|
|
Analyze the following user task and classify it.
|
|
This analysis will be used internally by the system to optimize the workflow.
|
|
|
|
TASK: {task}
|
|
|
|
AVAILABLE DOCUMENTS:
|
|
{json.dumps(detailed_document_info, indent=2) if detailed_document_info else "No documents provided"}
|
|
|
|
Please determine:
|
|
1. The primary type of operation requested (extraction, transformation, formatting, analysis, creation)
|
|
2. Whether the task appears to be primarily about:
|
|
- Extracting information from documents
|
|
- Transforming existing information
|
|
- Analyzing available information
|
|
- Creating new content
|
|
3. The documents relevant to this task (any documents that might be needed)
|
|
4. The expected output format or presentation style
|
|
5. Whether the task involves any kind of document processing (such as extracting information,
|
|
transforming data, creating tables, summarizing text, or analyzing document contents)
|
|
6. Whether the task requires online information retrieval
|
|
7. Whether the task requires complex computational algorithms or repetitive calculations
|
|
8. Whether the task contains the keyword "poweron" in any form
|
|
|
|
Return your analysis as a JSON object with these properties:
|
|
- primaryOperationType: string (extraction, transformation, formatting, analysis, creation)
|
|
- isUsingExistingData: boolean (true if primarily using already available data)
|
|
- mentionedDocuments: array of document IDs or names that are relevant to this task
|
|
- expectedOutputFormat: string (html, text, table, etc. or "unspecified")
|
|
- involvesDocumentProcessing: boolean (true if task involves any document extraction, transformation, summarization, etc.)
|
|
- requiresWebResearch: boolean (true if task requires online information)
|
|
- requiresComplexComputation: boolean (true if task requires complex algorithms or repetitive calculations)
|
|
- containsPowerOnKeyword: boolean (true if the keyword "poweron" is found in any form)
|
|
"""
|
|
|
|
# Call AI to analyze the task
|
|
self.logging_utils.info("Analyzing task to determine optimal planning approach", "planning")
|
|
|
|
# Initialize task analysis variables with defaults
|
|
operation_type = ""
|
|
is_using_existing_data = False
|
|
mentioned_documents = []
|
|
expected_output = "unspecified"
|
|
contains_poweron = False
|
|
requires_web_research = False
|
|
requires_complex_computation = False
|
|
involves_document_processing = False
|
|
can_use_optimized_plan = False
|
|
task_analysis = {}
|
|
|
|
try:
|
|
task_analysis_response = await self.ai_service.call_api([{"role": "user", "content": task_analysis_prompt}])
|
|
|
|
# Extract JSON from response
|
|
json_match = re.search(r'\{.*\}', task_analysis_response, re.DOTALL)
|
|
|
|
if json_match:
|
|
json_str = json_match.group(0)
|
|
task_analysis = json.loads(json_str)
|
|
|
|
# Log the analysis
|
|
try:
|
|
analysis_str = json.dumps(task_analysis)
|
|
self.logging_utils.info(f"Task analysis: {analysis_str}", "planning")
|
|
except Exception as e:
|
|
self.logging_utils.warning(f"Error logging task analysis: {str(e)}", "planning")
|
|
|
|
# Extract all analysis criteria from the response
|
|
operation_type = task_analysis.get("primaryOperationType", "").lower()
|
|
is_using_existing_data = task_analysis.get("isUsingExistingData", False)
|
|
mentioned_documents = task_analysis.get("mentionedDocuments", [])
|
|
expected_output = task_analysis.get("expectedOutputFormat", "").lower()
|
|
contains_poweron = task_analysis.get("containsPowerOnKeyword", False)
|
|
requires_web_research = task_analysis.get("requiresWebResearch", False)
|
|
requires_complex_computation = task_analysis.get("requiresComplexComputation", False)
|
|
involves_document_processing = task_analysis.get("involvesDocumentProcessing", False)
|
|
|
|
# PowerOn handling takes highest priority - check it first
|
|
if contains_poweron:
|
|
self.logging_utils.info("PowerOn keyword detected, creating specialized plan with creative agent", "planning")
|
|
return [{
|
|
"title": "PowerOn Response",
|
|
"description": "Generate specialized PowerOn response",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# For web research tasks, create a simple plan with webcrawler agent
|
|
if requires_web_research:
|
|
self.logging_utils.info("Web research task detected, creating specialized plan with webcrawler agent", "planning")
|
|
return [{
|
|
"title": "Web Research",
|
|
"description": "Perform web research to answer the query",
|
|
"assigned_agents": ["webcrawler"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# If documents are available and task involves document processing, prioritize creative agent
|
|
if has_documents and involves_document_processing:
|
|
self.logging_utils.info("Document processing task detected with available documents, using creative agent", "planning")
|
|
return [{
|
|
"title": "Document Processing",
|
|
"description": "Process documents according to requirements",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "All available documents",
|
|
"expected_output": expected_output if expected_output != "unspecified" else "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# If task is a document processing task even without documents, still use creative agent
|
|
if involves_document_processing and not requires_complex_computation:
|
|
self.logging_utils.info("Document processing task detected, using creative agent", "planning")
|
|
return [{
|
|
"title": "Document Processing",
|
|
"description": "Process content according to requirements",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "",
|
|
"expected_output": expected_output if expected_output != "unspecified" else "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# Only use coder for complex computation tasks
|
|
if requires_complex_computation:
|
|
self.logging_utils.info("Complex computation task detected, using coder agent", "planning")
|
|
return [{
|
|
"title": "Complex Computation",
|
|
"description": "Perform complex calculations or processing",
|
|
"assigned_agents": ["coder"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "All available documents may be needed",
|
|
"expected_output": expected_output if expected_output != "unspecified" else "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# Flag for optimized planning
|
|
can_use_optimized_plan = (
|
|
(operation_type in ["formatting", "transformation"]) and
|
|
is_using_existing_data and
|
|
has_documents
|
|
)
|
|
|
|
except Exception as e:
|
|
self.logging_utils.warning(f"Error analyzing task: {str(e)}, proceeding with standard planning", "planning")
|
|
|
|
|
|
|
|
# Create the base planning prompt
|
|
plan_prompt = f"""
|
|
As an AI workflow manager, create a detailed agent-aware work plan for the following task:
|
|
|
|
TASK: {task}
|
|
|
|
AVAILABLE AGENTS:
|
|
{self._format_agent_info(agent_infos)}
|
|
|
|
AVAILABLE DOCUMENTS:
|
|
{json.dumps(document_info, indent=2) if document_info else "No documents provided"}
|
|
|
|
"""
|
|
|
|
# Add context about documents if they exist
|
|
if already_extracted_docs:
|
|
plan_prompt += f"""
|
|
IMPORTANT CONTEXT:
|
|
The following documents already have extracted content ready to use: {', '.join(already_extracted_docs)}
|
|
This means NO extraction step is needed for these documents - the data is ALREADY AVAILABLE.
|
|
"""
|
|
|
|
# Add context specific to this task based on AI analysis, not language-specific keywords
|
|
if task_analysis:
|
|
if operation_type and is_using_existing_data and has_documents:
|
|
plan_prompt += f"""
|
|
CRITICAL INSTRUCTION FOR THIS TASK:
|
|
Based on analysis, this task involves {operation_type} of data that is ALREADY AVAILABLE.
|
|
The system has identified this as primarily working with existing data, not requiring new extraction.
|
|
If this task involves structured data, that data has already been parsed and is immediately available.
|
|
DO NOT create separate extraction tasks - go directly to creating the requested output.
|
|
"""
|
|
|
|
plan_prompt += """
|
|
DOCUMENT HANDLING REQUIREMENTS:
|
|
1. When a task involves document analysis, focus on WHAT information is needed, not HOW to extract it
|
|
2. The document handler automatically extracts and processes all document components including:
|
|
- Text content from documents
|
|
- Images embedded within documents
|
|
- Charts and graphics
|
|
- Structured data and tables
|
|
3. Each document's content is pre-processed and made available to any agent that needs it
|
|
4. For document extraction specifications, simply state what information is needed from which document
|
|
5. The system will handle conversion between formats, extraction, and specialized processing
|
|
|
|
AGENT SELECTION GUIDELINES:
|
|
1. The creative agent should handle:
|
|
- All document processing tasks (extraction, summarization, analysis)
|
|
- All content creation and knowledge-based tasks
|
|
- All tasks involving documents and text transformation
|
|
- All document descriptions and data extraction
|
|
- All table creation and data representation
|
|
- All tasks with PowerOn keyword
|
|
|
|
2. The webcrawler agent should ONLY handle:
|
|
- Tasks explicitly requiring online information retrieval
|
|
- Tasks needing current information from the web
|
|
|
|
3. The coder agent should ONLY handle:
|
|
- Tasks requiring complex computational algorithms
|
|
- Tasks involving repetitive mathematical calculations
|
|
- Tasks requiring specialized programming logic
|
|
|
|
IMPORTANT DOCUMENT HANDLING PRIORITIES:
|
|
- For tasks involving document processing, ALWAYS use the creative agent even for structured data
|
|
- For tasks to extract information from documents, ALWAYS use the creative agent
|
|
- For tasks to describe or summarize document content, ALWAYS use the creative agent
|
|
- For tasks to transform data from documents, ALWAYS use the creative agent
|
|
- Only use the coder agent when complex computational logic is the primary requirement
|
|
"""
|
|
|
|
# Add task optimization advice - language agnostic, based on AI analysis
|
|
if can_use_optimized_plan:
|
|
plan_prompt += """
|
|
TASK-SPECIFIC OPTIMIZATION:
|
|
This task appears to be primarily about formatting or transforming ALREADY EXTRACTED data.
|
|
The most efficient approach is:
|
|
1. DO NOT include any extraction activities - the document data is already parsed and available
|
|
2. Use a SINGLE activity with an appropriate agent to create the requested output format
|
|
3. Focus on specifying the desired output format in detail, not on how to extract the data
|
|
"""
|
|
|
|
plan_prompt += """
|
|
The work plan should include a structured list of activities. Each activity should have:
|
|
1. title - A short descriptive title for the activity
|
|
2. description - What needs to be done in this activity
|
|
3. assigned_agents - List of agent IDs that should handle this activity (can be multiple in sequence)
|
|
4. agent_prompts - Specific instructions for each agent (matched by index to assigned_agents)
|
|
5. document_requirements - Description of WHAT information is needed from which documents (not HOW to extract it)
|
|
6. expected_output - The expected output format and content
|
|
7. dependencies - List of previous activities this depends on (by index)
|
|
|
|
IMPORTANT GUIDELINES:
|
|
- Optimize agent assignments based on their specialized capabilities
|
|
- Create a logical sequence of activities that builds toward the final output
|
|
- DO NOT create activities solely for document extraction - specify needed information in document_requirements
|
|
- DO NOT assign extraction tasks to specific agents - the system handles this automatically
|
|
- When a document contains both text and images, both will be processed automatically
|
|
- If a task requires analyzing images, specify what to look for in the images
|
|
- Create detailed agent_prompts that clearly explain what each agent should accomplish
|
|
- ELIMINATE redundant steps - if data is already extracted, go directly to generating the desired output format
|
|
|
|
Return the work plan as a JSON array of activity objects, each with the above properties.
|
|
"""
|
|
|
|
self.logging_utils.info("Creating agent-aware work plan", "planning")
|
|
|
|
# For tasks that can use optimized plans, generate one directly
|
|
if can_use_optimized_plan:
|
|
# For formatting/transformation tasks with extracted data, use an optimized 1-step plan
|
|
self.logging_utils.info("Using optimized single-step plan based on task analysis", "planning")
|
|
|
|
# Use the specific output format from the task analysis
|
|
expected_format = task_analysis.get("expectedOutputFormat", "HTML").upper()
|
|
if expected_format.lower() == "unspecified":
|
|
expected_format = "Text"
|
|
|
|
# Create appropriate agent assignment based on expected output and task classification
|
|
# Prefer creative agent for document processing tasks
|
|
agent_id = "creative" if involves_document_processing else "coder"
|
|
|
|
# Create a direct single-activity plan
|
|
optimized_plan = [{
|
|
"title": f"Process and Format Data",
|
|
"description": f"Process the existing data and format it as {expected_format}",
|
|
"assigned_agents": [agent_id],
|
|
"agent_prompts": [
|
|
f"The data from the documents has already been extracted and is available. "
|
|
f"Create a well-formatted {expected_format} representation of this data. "
|
|
f"No extraction is needed - focus only on proper formatting and presentation."
|
|
],
|
|
"document_requirements": f"Use the already extracted data from the available documents",
|
|
"expected_output": expected_format,
|
|
"dependencies": []
|
|
}]
|
|
|
|
# Log the optimized plan
|
|
self.logging_utils.info(f"Created optimized single-step plan with agent: {agent_id}", "planning")
|
|
|
|
return optimized_plan
|
|
|
|
# For more complex tasks, use the AI to generate a plan
|
|
try:
|
|
plan_response = await self.ai_service.call_api([{"role": "user", "content": plan_prompt}])
|
|
|
|
# Extract JSON plan
|
|
json_pattern = r'\[\s*\{.*\}\s*\]'
|
|
json_match = re.search(json_pattern, plan_response, re.DOTALL)
|
|
|
|
if json_match:
|
|
json_str = json_match.group(0)
|
|
work_plan = json.loads(json_str)
|
|
self.logging_utils.info(f"Work plan created with {len(work_plan)} activities", "planning")
|
|
|
|
# Post-process to ensure document tasks go to creative agent
|
|
for activity in work_plan:
|
|
doc_requirements = activity.get("document_requirements", "")
|
|
activity_description = activity.get("description", "").lower()
|
|
|
|
# If activity involves documents or document processing terms but isn't assigned to creative
|
|
if (doc_requirements or
|
|
"document" in activity_description or
|
|
"extract" in activity_description or
|
|
"summarize" in activity_description):
|
|
|
|
# Check if creative is not already assigned
|
|
if "creative" not in activity.get("assigned_agents", []):
|
|
activity["assigned_agents"] = ["creative"]
|
|
self.logging_utils.info("Changed agent assignment for document activity to creative agent", "planning")
|
|
|
|
# Post-process based on the task analysis to optimize if needed
|
|
if task_analysis and task_analysis.get("isUsingExistingData", False):
|
|
work_plan = self._optimize_work_plan(work_plan, task_analysis)
|
|
self.logging_utils.info(f"Post-processed work plan now has {len(work_plan)} activities", "planning")
|
|
|
|
# Log detailed work plan to console
|
|
for i, activity in enumerate(work_plan):
|
|
activity_title = activity.get("title", f"Activity {i+1}")
|
|
activity_agents = ", ".join(activity.get("assigned_agents", ["unknown"]))
|
|
self.logging_utils.info(f"Activity {i+1}: {activity_title} (Agents: {activity_agents})", "planning")
|
|
|
|
# Log document requirements if any
|
|
if activity.get("document_requirements"):
|
|
self.logging_utils.info(f" Document requirements: {activity.get('document_requirements')}", "planning")
|
|
|
|
# Log dependencies if any
|
|
if activity.get("dependencies"):
|
|
deps = [str(d + 1) for d in activity.get("dependencies")]
|
|
self.logging_utils.info(f" Dependencies: Activities {', '.join(deps)}", "planning")
|
|
|
|
return work_plan
|
|
else:
|
|
self.logging_utils.warning("Could not extract JSON from AI response", "planning")
|
|
|
|
# Fallback based on previous analysis
|
|
if requires_web_research:
|
|
return [{
|
|
"title": "Web Research",
|
|
"description": "Perform web research to answer the query",
|
|
"assigned_agents": ["webcrawler"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
elif involves_document_processing:
|
|
return [{
|
|
"title": "Document Processing",
|
|
"description": "Process documents or content according to requirements",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "All available documents may be needed",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
elif requires_complex_computation:
|
|
return [{
|
|
"title": "Complex Computation",
|
|
"description": "Perform complex calculations or processing",
|
|
"assigned_agents": ["coder"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "All available documents may be needed",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
else:
|
|
# Fallback: Create a simple default work plan with creative agent
|
|
return [{
|
|
"title": "Process Task",
|
|
"description": "Process the request directly",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "All available documents may be needed",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
except Exception as e:
|
|
self.logging_utils.error(f"Error creating work plan: {str(e)}", "planning")
|
|
|
|
# Check for PowerOn directly in fallback
|
|
if "poweron" in task.lower():
|
|
return [{
|
|
"title": "PowerOn Response (Fallback)",
|
|
"description": "Generate specialized PowerOn response after planning error",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
# Return a minimal fallback plan with creative agent
|
|
return [{
|
|
"title": "Process Task (Error Recovery)",
|
|
"description": "Process the request after planning error",
|
|
"assigned_agents": ["creative"],
|
|
"agent_prompts": [task],
|
|
"document_requirements": "All available documents may be needed",
|
|
"expected_output": "Text",
|
|
"dependencies": []
|
|
}]
|
|
|
|
|
|
|
|
|
|
# Language-agnostic optimization function using task analysis instead of keywords
|
|
def _optimize_work_plan(self, work_plan: List[Dict[str, Any]], task_analysis: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
"""
|
|
Optimize a work plan based on task analysis, not language-specific keywords.
|
|
|
|
Args:
|
|
work_plan: The original work plan
|
|
task_analysis: Analysis of the task
|
|
|
|
Returns:
|
|
Optimized work plan
|
|
"""
|
|
# Check if plan has multiple activities
|
|
if len(work_plan) <= 1:
|
|
return work_plan
|
|
|
|
# Only optimize when the task is about using existing data
|
|
if not task_analysis.get("isUsingExistingData", False):
|
|
return work_plan
|
|
|
|
# For tasks that use existing data, try to identify and remove redundant extraction steps
|
|
operation_type = task_analysis.get("primaryOperationType", "").lower()
|
|
if operation_type in ["formatting", "transformation"]:
|
|
# Use AI to identify extraction vs formatting activities instead of keywords
|
|
activities_analyzed = []
|
|
|
|
for activity in work_plan:
|
|
title = activity.get("title", "")
|
|
description = activity.get("description", "")
|
|
|
|
# Create an activity object with classification
|
|
activity_info = {
|
|
"original_activity": activity,
|
|
"is_extraction": False,
|
|
"is_formatting": False
|
|
}
|
|
|
|
# Use simple heuristics to classify (can be replaced with AI classification)
|
|
# These are pattern-based, not language-dependent
|
|
if any(x in title.lower() or x in description.lower() for x in ["extract", "parse", "read"]):
|
|
activity_info["is_extraction"] = True
|
|
|
|
if any(x in title.lower() or x in description.lower() for x in ["format", "convert", "transform"]):
|
|
activity_info["is_formatting"] = True
|
|
|
|
activities_analyzed.append(activity_info)
|
|
|
|
# Check if we have both extraction and formatting activities
|
|
has_extraction = any(a["is_extraction"] for a in activities_analyzed)
|
|
has_formatting = any(a["is_formatting"] for a in activities_analyzed)
|
|
|
|
if has_extraction and has_formatting:
|
|
# Create a new optimized plan
|
|
self.logging_utils.info("Optimizing plan by removing redundant extraction steps", "planning")
|
|
|
|
# First, separate formatting and non-extraction activities
|
|
formatting_activities = [a["original_activity"] for a in activities_analyzed if a["is_formatting"]]
|
|
other_activities = [a["original_activity"] for a in activities_analyzed
|
|
if not a["is_extraction"] and not a["is_formatting"]]
|
|
|
|
# Combine into a new optimized plan
|
|
optimized_plan = []
|
|
|
|
# Add formatting activities first
|
|
for activity in formatting_activities:
|
|
# Enhance the prompt to indicate that data is already available
|
|
prompt = activity.get("agent_prompts", [""])[0]
|
|
activity["agent_prompts"] = [
|
|
f"IMPORTANT: The data from the documents has already been extracted and is available. "
|
|
f"You do not need to perform any extraction steps.\n\n{prompt}"
|
|
]
|
|
|
|
# Reset dependencies since we're removing extraction activities
|
|
activity["dependencies"] = []
|
|
optimized_plan.append(activity)
|
|
|
|
# Add other non-extraction activities
|
|
for activity in other_activities:
|
|
# Reset dependencies
|
|
activity["dependencies"] = []
|
|
optimized_plan.append(activity)
|
|
|
|
return optimized_plan
|
|
|
|
# If no optimization possible, return original plan
|
|
return work_plan
|
|
|
|
|
|
async def _execute_work_plan(self, workflow: Dict[str, Any], work_plan: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
"""
|
|
Execute all activities in the work plan with proper agent handovers.
|
|
|
|
Args:
|
|
workflow: The workflow object
|
|
work_plan: The work plan with activities
|
|
|
|
Returns:
|
|
Results from all activities
|
|
"""
|
|
results = []
|
|
activity_outputs = {} # Store outputs for dependency resolution
|
|
|
|
for activity_index, activity in enumerate(work_plan):
|
|
# Extract activity info
|
|
title = activity.get("title", f"Activity {activity_index+1}")
|
|
description = activity.get("description", "")
|
|
assigned_agents = activity.get("assigned_agents", ["assistant"])
|
|
agent_prompts = activity.get("agent_prompts", [description])
|
|
doc_requirements = activity.get("document_requirements", "")
|
|
expected_output = activity.get("expected_output", "Text")
|
|
dependencies = activity.get("dependencies", [])
|
|
|
|
self.logging_utils.info(f"Starting activity: {title}", "execution")
|
|
|
|
# Validate assigned_agents and agent_prompts
|
|
if len(assigned_agents) > len(agent_prompts):
|
|
# Duplicate the last prompt for additional agents
|
|
agent_prompts.extend([agent_prompts[-1]] * (len(assigned_agents) - len(agent_prompts)))
|
|
elif len(agent_prompts) > len(assigned_agents):
|
|
# Truncate excess prompts
|
|
agent_prompts = agent_prompts[:len(assigned_agents)]
|
|
|
|
# Process dependencies first
|
|
dependency_context = {}
|
|
for dep_index in dependencies:
|
|
if dep_index < activity_index and dep_index in activity_outputs:
|
|
dep_output = activity_outputs[dep_index]
|
|
dependency_context[f"activity_{dep_index+1}"] = dep_output
|
|
|
|
# Extract required documents if needed
|
|
document_content = ""
|
|
if doc_requirements:
|
|
extracted_data = await self._extract_required_documents(workflow, doc_requirements)
|
|
if extracted_data and "extracted_content" in extracted_data:
|
|
# Format document content for the prompt
|
|
document_content = "\n\n=== EXTRACTED DOCUMENT CONTENT ===\n\n"
|
|
for item in extracted_data.get("extracted_content", []):
|
|
doc_name = item.get("name", "Unnamed document")
|
|
doc_content = item.get("content", "No content available")
|
|
document_content += f"--- {doc_name} ---\n{doc_content}\n\n"
|
|
|
|
# Execute the activity with the assigned agents
|
|
activity_result = await self._execute_agent_sequence(
|
|
workflow,
|
|
assigned_agents,
|
|
agent_prompts,
|
|
document_content,
|
|
dependency_context,
|
|
expected_output
|
|
)
|
|
|
|
# Store the result
|
|
activity_outputs[activity_index] = activity_result
|
|
results.append({
|
|
"title": title,
|
|
"description": description,
|
|
"agents": assigned_agents,
|
|
"result": activity_result.get("content", ""),
|
|
"output_format": activity_result.get("format", "Text")
|
|
})
|
|
|
|
self.logging_utils.info(f"Completed activity: {title}", "execution")
|
|
|
|
# Save intermediate state
|
|
self.workflow_manager._save_workflow(workflow)
|
|
|
|
return results
|
|
|
|
async def _execute_agent_sequence(
|
|
self,
|
|
workflow: Dict[str, Any],
|
|
agent_ids: List[str],
|
|
prompts: List[str],
|
|
document_content: str,
|
|
dependency_context: Dict[str, Any],
|
|
expected_output: str
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Execute a sequence of agents with proper handovers.
|
|
|
|
Args:
|
|
workflow: The workflow object
|
|
agent_ids: List of agent IDs to execute in sequence
|
|
prompts: List of prompts for each agent
|
|
document_content: Extracted document content
|
|
dependency_context: Context from dependent activities
|
|
expected_output: Expected output format
|
|
|
|
Returns:
|
|
Result of the agent sequence execution
|
|
"""
|
|
context = {
|
|
"workflow_id": self.workflow_id,
|
|
"expected_format": expected_output,
|
|
"dependency_outputs": dependency_context,
|
|
"include_chat_history": True # Flag to indicate chat history should be included
|
|
}
|
|
|
|
last_result = None
|
|
last_documents = []
|
|
|
|
for i, agent_id in enumerate(agent_ids):
|
|
# Get the agent
|
|
agent = self.agent_registry.get_agent(agent_id)
|
|
if agent:
|
|
# Ensure dependencies are set
|
|
if hasattr(agent, 'set_dependencies'):
|
|
agent.set_dependencies(
|
|
ai_service=self.ai_service,
|
|
document_handler=self.document_handler,
|
|
lucydom_interface=self.lucydom_interface
|
|
)
|
|
|
|
# Set document handler if agent supports it
|
|
if hasattr(agent, 'set_document_handler') and hasattr(self, 'document_handler'):
|
|
agent.set_document_handler(self.document_handler)
|
|
|
|
if not agent:
|
|
self.logging_utils.warning(f"Agent '{agent_id}' not found, using assistant instead", "agents")
|
|
agent = self.agent_registry.get_agent("assistant")
|
|
if not agent:
|
|
# If assistant not found, create a minimal agent response
|
|
continue
|
|
|
|
# Get the agent prompt
|
|
base_prompt = prompts[i] if i < len(prompts) else prompts[-1]
|
|
|
|
# Enhance the prompt with context
|
|
enhanced_prompt = self._enhance_prompt(
|
|
base_prompt,
|
|
document_content,
|
|
dependency_context,
|
|
last_result.get("content", "") if last_result else "",
|
|
i > 0, # is_continuation flag
|
|
workflow # Pass the workflow parameter
|
|
)
|
|
|
|
if document_content and "Image Analysis" not in document_content:
|
|
# Instead of trying to access message or documents directly,
|
|
# We can use what we know about the workflow we're currently processing
|
|
workflow_id = self.workflow_id
|
|
|
|
# Log a warning that might help identify the issue
|
|
self.logging_utils.warning(
|
|
f"Document content available but no image analysis found - PDF image extraction may have failed for workflow {workflow_id}",
|
|
"agents"
|
|
)
|
|
|
|
# Create the message for this agent
|
|
agent_message = self._create_message(workflow, "user")
|
|
agent_message["content"] = enhanced_prompt
|
|
|
|
# IMPORTANT FIX: Document handling logic
|
|
# First, check if we have documents from previous agent if this is a continuation
|
|
if last_documents and i > 0:
|
|
agent_message["documents"] = last_documents
|
|
# For the first agent, make sure we pass any documents from the most recent user message
|
|
elif i == 0:
|
|
# Find the most recent user message with documents
|
|
for msg in reversed(workflow.get("messages", [])):
|
|
if msg.get("role") == "user" and msg.get("documents"):
|
|
agent_message["documents"] = msg.get("documents", [])
|
|
self.logging_utils.info(f"Passing {len(agent_message['documents'])} documents from user message to {agent_id}", "agents")
|
|
break
|
|
|
|
# Log agent execution
|
|
self.logging_utils.info(f"Executing agent: {agent_id}", "agents")
|
|
|
|
# Execute the agent
|
|
agent_response = await agent.process_message(agent_message, context)
|
|
|
|
# Create response message
|
|
response_message = self._create_message(workflow, "assistant")
|
|
response_message["content"] = agent_response.get("content", "")
|
|
response_message["agent_type"] = agent_id
|
|
response_message["agent_id"] = agent_id
|
|
response_message["agent_name"] = agent.name
|
|
response_message["result_format"] = agent_response.get("result_format", expected_output)
|
|
|
|
# Capture documents from response
|
|
if "documents" in agent_response:
|
|
response_message["documents"] = agent_response["documents"]
|
|
last_documents = agent_response["documents"]
|
|
self.logging_utils.info(f"Agent {agent_id} produced {len(last_documents)} documents", "agents")
|
|
|
|
# Add to workflow
|
|
workflow["messages"].append(response_message)
|
|
|
|
# Update last result
|
|
last_result = {
|
|
"content": agent_response.get("content", ""),
|
|
"format": agent_response.get("result_format", expected_output),
|
|
"agent_id": agent_id,
|
|
"documents": agent_response.get("documents", [])
|
|
}
|
|
|
|
return last_result or {
|
|
"content": "No agent response was generated.",
|
|
"format": "Text"
|
|
}
|
|
|
|
|
|
async def _extract_required_documents(self, workflow: Dict[str, Any], doc_requirements: str) -> Dict[str, Any]:
|
|
"""
|
|
Extract required documents based on requirements description with enhanced image extraction.
|
|
|
|
Args:
|
|
workflow: The workflow object
|
|
doc_requirements: Description of document requirements
|
|
|
|
Returns:
|
|
Extracted document data
|
|
"""
|
|
# Import for data extraction
|
|
from modules.agentservice_dataextraction import data_extraction
|
|
|
|
# Get all files from the workflow
|
|
files = self.workflow_utils.get_files(workflow)
|
|
|
|
# Get all messages from the workflow
|
|
workflow_messages = workflow.get("messages", [])
|
|
|
|
# Log document requirements
|
|
self.logging_utils.info(f"Document requirements: {doc_requirements}", "extraction")
|
|
self.logging_utils.info(f"Found {len(files)} files in workflow", "extraction")
|
|
|
|
# Create enhanced extraction prompt
|
|
enhanced_prompt = f"""
|
|
Extract the following information from the available documents:
|
|
|
|
REQUIRED INFORMATION: {doc_requirements}
|
|
|
|
For all documents, please:
|
|
1. Extract relevant text portions matching the requirements
|
|
2. Identify and analyze any embedded images or charts
|
|
3. Provide structured data from tables or spreadsheets
|
|
4. Summarize key information in context of the requirements
|
|
|
|
Handle multi-format documents comprehensively (text, images, charts, tables)
|
|
For images, include detailed descriptions of visual content
|
|
"""
|
|
|
|
# Extract data using the dataextraction module with enhanced prompt
|
|
self.logging_utils.info("Starting document extraction process", "extraction")
|
|
|
|
extracted_data = await data_extraction(
|
|
prompt=enhanced_prompt,
|
|
files=files,
|
|
messages=workflow_messages,
|
|
ai_service=self.ai_service,
|
|
lucydom_interface=self.lucydom_interface,
|
|
workflow_id=self.workflow_id,
|
|
add_log_func=self._add_log,
|
|
document_handler=self.document_handler # Pass document handler for better extraction
|
|
)
|
|
|
|
# Log extraction results
|
|
if extracted_data:
|
|
extracted_content = extracted_data.get("extracted_content", [])
|
|
self.logging_utils.info(f"Extracted content from {len(extracted_content)} documents", "extraction")
|
|
|
|
# Log details for each extracted document with more detail
|
|
for doc in extracted_content:
|
|
doc_name = doc.get("name", "Unnamed document")
|
|
extraction_method = doc.get("extraction_method", "unknown")
|
|
is_extracted = doc.get("is_extracted", False)
|
|
content_preview = doc.get("content", "")[:100] + "..." if len(doc.get("content", "")) > 100 else doc.get("content", "")
|
|
|
|
self.logging_utils.info(
|
|
f"Document: {doc_name}, Method: {extraction_method}, Extracted: {is_extracted}",
|
|
"extraction"
|
|
)
|
|
self.logging_utils.info(
|
|
f"Content preview: {content_preview}",
|
|
"extraction"
|
|
)
|
|
|
|
# Specifically check for image content
|
|
if "Image Analysis:" in doc.get("content", ""):
|
|
self.logging_utils.info(f"Image content found in {doc_name}", "extraction")
|
|
else:
|
|
self.logging_utils.warning(f"No image content found in {doc_name} - check PDF extraction", "extraction")
|
|
return extracted_data
|
|
|
|
|
|
async def _create_summary(self, workflow: Dict[str, Any], results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
"""
|
|
Create a summary of the workflow results for the user.
|
|
|
|
Args:
|
|
workflow: The workflow object
|
|
results: Results from activity executions
|
|
|
|
Returns:
|
|
Summary message
|
|
"""
|
|
# Create a summary prompt
|
|
summary_prompt = "Create a clear, concise summary of the following workflow results:\n\n"
|
|
|
|
for i, result in enumerate(results, 1):
|
|
title = result.get("title", f"Activity {i}")
|
|
description = result.get("description", "")
|
|
content = result.get("result", "")
|
|
agents = ", ".join(result.get("agents", ["unknown"]))
|
|
|
|
# Limit content length for the summary prompt
|
|
content_preview = content[:500] + "..." if len(content) > 500 else content
|
|
|
|
summary_prompt += f"""
|
|
ACTIVITY {i}: {title}
|
|
Description: {description}
|
|
Executed by: {agents}
|
|
|
|
{content_preview}
|
|
|
|
---
|
|
"""
|
|
|
|
summary_prompt += """
|
|
Provide a well-structured summary that:
|
|
1. Highlights the key findings and results
|
|
2. Connects the results to the original task
|
|
3. Presents any conclusions or recommendations
|
|
|
|
Make sure the summary is clear, concise, and useful to the user.
|
|
"""
|
|
|
|
# Call AI to generate summary
|
|
summary_content = await self.ai_service.call_api([{"role": "user", "content": summary_prompt}])
|
|
|
|
# Create summary message
|
|
summary_message = self._create_message(workflow, "assistant")
|
|
summary_message["content"] = summary_content
|
|
summary_message["agent_type"] = "summary"
|
|
summary_message["agent_id"] = "workflow_summary"
|
|
summary_message["agent_name"] = "Workflow Summary"
|
|
summary_message["result_format"] = "Text"
|
|
summary_message["workflow_complete"] = True
|
|
|
|
# Add to workflow
|
|
workflow["messages"].append(summary_message)
|
|
|
|
return summary_message
|
|
|
|
def _create_message(self, workflow: Dict[str, Any], role: str) -> Dict[str, Any]:
|
|
"""Create a new message object for the workflow"""
|
|
message_id = f"msg_{uuid.uuid4()}"
|
|
current_time = datetime.now().isoformat()
|
|
|
|
# Determine sequence number
|
|
sequence_no = 1
|
|
if "messages" in workflow and workflow["messages"]:
|
|
sequence_no = len(workflow["messages"]) + 1
|
|
|
|
# Create message object
|
|
message = {
|
|
"id": message_id,
|
|
"workflow_id": self.workflow_id,
|
|
"parent_message_id": None,
|
|
"started_at": current_time,
|
|
"finished_at": None,
|
|
"sequence_no": sequence_no,
|
|
|
|
"status": "pending",
|
|
"role": role,
|
|
|
|
"data_stats": {
|
|
"processing_time": 0.0,
|
|
"token_count": 0,
|
|
"bytes_sent": 0,
|
|
"bytes_received": 0
|
|
},
|
|
|
|
"documents": [],
|
|
"content": None,
|
|
"agent_type": None
|
|
}
|
|
|
|
return message
|
|
|
|
def _add_log(self, workflow_id: str, message: str, log_type: str, agent_id: str = None, agent_name: str = None):
|
|
"""Add a log entry to the workflow"""
|
|
# This calls back to the workflow manager's log function
|
|
self.workflow_manager._add_log(workflow_id, message, log_type, agent_id, agent_name)
|
|
|
|
def _format_agent_info(self, agent_infos: List[Dict[str, Any]]) -> str:
|
|
"""Format agent information for the planning prompt"""
|
|
formatted_info = ""
|
|
for agent in agent_infos:
|
|
formatted_info += f"""
|
|
- ID: {agent.get('id', 'unknown')}
|
|
Name: {agent.get('name', '')}
|
|
Type: {agent.get('type', '')}
|
|
Description: {agent.get('description', '')}
|
|
Capabilities: {agent.get('capabilities', '')}
|
|
Result Format: {agent.get('result_format', 'Text')}
|
|
"""
|
|
return formatted_info
|
|
|
|
def _enhance_prompt(
|
|
self,
|
|
base_prompt: str,
|
|
document_content: str,
|
|
dependency_context: Dict[str, Any],
|
|
previous_result: str,
|
|
is_continuation: bool,
|
|
workflow: Dict[str, Any] = None # Add workflow parameter
|
|
) -> str:
|
|
"""
|
|
Enhance a prompt with context information.
|
|
|
|
Args:
|
|
base_prompt: The original prompt
|
|
document_content: Extracted document content
|
|
dependency_context: Context from dependent activities
|
|
previous_result: Result from previous agent in sequence
|
|
is_continuation: Flag indicating if this is a continuation
|
|
|
|
Returns:
|
|
Enhanced prompt
|
|
"""
|
|
enhanced_prompt = base_prompt
|
|
|
|
# Add continuation context if this is a continuation
|
|
if is_continuation and previous_result:
|
|
enhanced_prompt = f"""
|
|
{enhanced_prompt}
|
|
|
|
=== PREVIOUS AGENT OUTPUT ===
|
|
{previous_result}
|
|
"""
|
|
# Add document content if available
|
|
if document_content:
|
|
enhanced_prompt += f"\n\n{document_content}"
|
|
|
|
# Add dependency context if available
|
|
if dependency_context:
|
|
dependency_section = "\n\n=== OUTPUTS FROM PREVIOUS ACTIVITIES ===\n\n"
|
|
for name, value in dependency_context.items():
|
|
if isinstance(value, dict) and "content" in value:
|
|
# Extract content if it's in the standard format
|
|
dependency_section += f"--- {name} ---\n{value['content']}\n\n"
|
|
else:
|
|
# Use the value directly
|
|
dependency_section += f"--- {name} ---\n{str(value)}\n\n"
|
|
|
|
enhanced_prompt += dependency_section
|
|
|
|
# Add chat history from workflow if available
|
|
if workflow and "messages" in workflow:
|
|
chat_history = "\n\n=== CONVERSATION HISTORY ===\n\n"
|
|
relevant_messages = []
|
|
|
|
# Collect relevant messages (user and assistant interactions)
|
|
for msg in workflow.get("messages", []):
|
|
if msg.get("role") in ["user", "assistant"] and msg.get("content"):
|
|
relevant_messages.append(msg)
|
|
|
|
# Add up to the last 5 messages for context
|
|
if relevant_messages:
|
|
for msg in relevant_messages[-5:]:
|
|
role = msg.get("role", "").upper()
|
|
content = msg.get("content", "")
|
|
if content:
|
|
chat_history += f"{role}: {content}\n\n"
|
|
|
|
enhanced_prompt += chat_history
|
|
|
|
return enhanced_prompt
|