all agents ai target-driven redesigned
This commit is contained in:
parent
f90483e3fd
commit
5c066feb19
7 changed files with 1495 additions and 3743 deletions
|
|
@ -1,814 +0,0 @@
|
|||
"""
|
||||
Coder agent for development and execution of Python code.
|
||||
Optimized for the new task-based processing.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
import sys
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
|
||||
from modules.chat_registry import AgentBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentCoder(AgentBase):
|
||||
"""Agent for development and execution of Python code"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the coder agent"""
|
||||
super().__init__()
|
||||
self.name = "coder"
|
||||
self.description = "Develops and executes Python code for data processing and automation"
|
||||
self.capabilities = [
|
||||
"code_development",
|
||||
"data_processing",
|
||||
"file_processing",
|
||||
"automation",
|
||||
"code_execution"
|
||||
]
|
||||
|
||||
# Executor settings
|
||||
self.executor_timeout = 60 # seconds
|
||||
self.executor_memory_limit = 512 # MB
|
||||
|
||||
# AI service settings
|
||||
self.ai_temperature = 0.1 # Lower temperature for deterministic code generation
|
||||
|
||||
# Auto-correction settings
|
||||
self.max_correction_attempts = 3 # Maximum number of correction attempts
|
||||
|
||||
def set_dependencies(self, ai_service=None):
|
||||
"""Set external dependencies for the agent."""
|
||||
self.ai_service = ai_service
|
||||
|
||||
async def process_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Process a standardized task structure and perform code development/execution.
|
||||
|
||||
Args:
|
||||
task: A dictionary containing:
|
||||
- task_id: Unique ID for this task
|
||||
- prompt: The main instruction for the agent
|
||||
- input_documents: List of documents to process
|
||||
- output_specifications: List of required output documents
|
||||
- context: Additional contextual information
|
||||
|
||||
Returns:
|
||||
A dictionary containing:
|
||||
- feedback: Text response explaining the code execution
|
||||
- documents: List of created document objects
|
||||
"""
|
||||
try:
|
||||
# Extract relevant task information
|
||||
prompt = task.get("prompt", "")
|
||||
input_documents = task.get("input_documents", [])
|
||||
output_specs = task.get("output_specifications", [])
|
||||
context_info = task.get("context", {})
|
||||
|
||||
# Check if AI service is available
|
||||
if not self.ai_service:
|
||||
logger.error("No AI service configured for the Coder agent")
|
||||
return {
|
||||
"feedback": "The Coder agent is not properly configured.",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
# Extract context from input documents
|
||||
document_context = self._extract_document_context(input_documents)
|
||||
|
||||
# Generate code based on the prompt and document context
|
||||
logger.info("Generating code based on the task")
|
||||
code_to_execute, requirements = await self._generate_code_from_prompt(prompt, document_context)
|
||||
|
||||
if not code_to_execute:
|
||||
logger.warning("AI couldn't generate any code")
|
||||
return {
|
||||
"feedback": "I couldn't generate executable code based on the task. Please provide more detailed instructions.",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
logger.info(f"Code generated with AI ({len(code_to_execute)} characters)")
|
||||
|
||||
# Collect created documents
|
||||
generated_documents = []
|
||||
|
||||
# Add code as first document
|
||||
code_doc = {
|
||||
"label": "generated_code.py",
|
||||
"content": code_to_execute
|
||||
}
|
||||
generated_documents.append(code_doc)
|
||||
|
||||
# Execute code with auto-correction loop
|
||||
execution_context = {
|
||||
"input_documents": input_documents,
|
||||
"task": task
|
||||
}
|
||||
|
||||
# Enhanced execution with auto-correction
|
||||
result, attempts_info = await self._execute_with_auto_correction(
|
||||
code_to_execute,
|
||||
requirements,
|
||||
execution_context,
|
||||
prompt # Original prompt/message
|
||||
)
|
||||
|
||||
# Create output documents based on execution result and output specifications
|
||||
if result.get("success", False):
|
||||
# Code execution successful
|
||||
output = result.get("output", "")
|
||||
execution_result = result.get("result")
|
||||
logger.info("Code executed successfully")
|
||||
|
||||
# Determine output type of the result
|
||||
result_docs = self._generate_result_documents(
|
||||
attempts_info[-1]["code"], # Last successful code
|
||||
output,
|
||||
execution_result,
|
||||
output_specs
|
||||
)
|
||||
|
||||
# Add result documents
|
||||
generated_documents.extend(result_docs)
|
||||
|
||||
# Create feedback for successful execution
|
||||
feedback = f"I successfully executed the code and generated {len(result_docs)} output files."
|
||||
if attempts_info and len(attempts_info) > 1:
|
||||
feedback += f" (This required {len(attempts_info)-1} correction attempts)"
|
||||
|
||||
else:
|
||||
# Code execution failed after all attempts
|
||||
error = result.get("error", "Unknown error")
|
||||
logger.error(f"Error in code execution after all correction attempts: {error}")
|
||||
|
||||
# Add error log as additional document
|
||||
error_doc = {
|
||||
"label": "execution_error.txt",
|
||||
"content": f"Execution error:\n\n{error}"
|
||||
}
|
||||
generated_documents.append(error_doc)
|
||||
|
||||
# Create feedback for failed execution
|
||||
feedback = f"An error occurred during code execution after {len(attempts_info)} correction attempts."
|
||||
|
||||
# If no specific outputs requested, create standard outputs
|
||||
if not output_specs and result.get("success", False):
|
||||
# Add standard output document
|
||||
output_doc = {
|
||||
"label": "execution_output.txt",
|
||||
"content": output
|
||||
}
|
||||
generated_documents.append(output_doc)
|
||||
|
||||
# If a result is available, also add as JSON document
|
||||
if execution_result:
|
||||
result_json = json.dumps(execution_result, indent=2) if isinstance(execution_result, (dict, list)) else str(execution_result)
|
||||
result_doc = {
|
||||
"label": "execution_result.json",
|
||||
"content": result_json
|
||||
}
|
||||
generated_documents.append(result_doc)
|
||||
|
||||
return {
|
||||
"feedback": feedback,
|
||||
"documents": generated_documents
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error during processing by the Coder agent: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
"feedback": f"An error occurred during code processing: {str(e)}",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
def _extract_document_context(self, documents: List[Dict[str, Any]]) -> str:
|
||||
"""
|
||||
Extract context from input documents for code generation.
|
||||
|
||||
Args:
|
||||
documents: List of document objects
|
||||
|
||||
Returns:
|
||||
Extracted context as text
|
||||
"""
|
||||
context_parts = []
|
||||
|
||||
for doc in documents:
|
||||
doc_name = doc.get("name", "Unnamed document")
|
||||
context_parts.append(f"--- {doc_name} ---")
|
||||
|
||||
for content in doc.get("contents", []):
|
||||
if content.get("metadata", {}).get("is_text", False):
|
||||
context_parts.append(content.get("data", ""))
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
def _generate_result_documents(self, code: str, output: str, execution_result: Any,
|
||||
output_specs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate output documents based on execution results and specifications.
|
||||
|
||||
Args:
|
||||
code: Executed code
|
||||
output: Text output of the execution
|
||||
execution_result: Result object from execution
|
||||
output_specs: Output specifications
|
||||
|
||||
Returns:
|
||||
List of generated document objects
|
||||
"""
|
||||
documents = []
|
||||
|
||||
# If no specific outputs requested
|
||||
if not output_specs:
|
||||
return documents
|
||||
|
||||
# Generate appropriate document for each requested output
|
||||
for spec in output_specs:
|
||||
output_label = spec.get("label", "")
|
||||
output_description = spec.get("description", "")
|
||||
|
||||
# Determine output type based on file extension
|
||||
format_type = self._determine_format_type(output_label)
|
||||
|
||||
# Generate document content based on format and output
|
||||
if "code" in output_label.lower() or format_type in ["py", "js", "html", "css"]:
|
||||
# Code document
|
||||
documents.append({
|
||||
"label": output_label,
|
||||
"content": code
|
||||
})
|
||||
elif "output" in output_label.lower() or format_type == "txt":
|
||||
# Output document
|
||||
documents.append({
|
||||
"label": output_label,
|
||||
"content": output
|
||||
})
|
||||
elif format_type in ["json", "yml", "yaml"] and execution_result:
|
||||
# JSON result document
|
||||
if isinstance(execution_result, (dict, list)):
|
||||
content = json.dumps(execution_result, indent=2)
|
||||
else:
|
||||
content = str(execution_result)
|
||||
|
||||
documents.append({
|
||||
"label": output_label,
|
||||
"content": content
|
||||
})
|
||||
else:
|
||||
# Generic result document (fallback)
|
||||
result_str = ""
|
||||
if execution_result:
|
||||
if isinstance(execution_result, (dict, list)):
|
||||
result_str = json.dumps(execution_result, indent=2)
|
||||
else:
|
||||
result_str = str(execution_result)
|
||||
|
||||
documents.append({
|
||||
"label": output_label,
|
||||
"content": f"Code output:\n\n{output}\n\nResult:\n\n{result_str}"
|
||||
})
|
||||
|
||||
return documents
|
||||
|
||||
def _determine_format_type(self, output_label: str) -> str:
|
||||
"""
|
||||
Determine the format type based on the filename.
|
||||
|
||||
Args:
|
||||
output_label: Output filename
|
||||
|
||||
Returns:
|
||||
Format type (py, js, json, txt, etc.)
|
||||
"""
|
||||
if not '.' in output_label:
|
||||
return "txt" # Default format
|
||||
|
||||
extension = output_label.split('.')[-1].lower()
|
||||
return extension
|
||||
|
||||
async def _execute_with_auto_correction(
|
||||
self,
|
||||
initial_code: str,
|
||||
requirements: List[str],
|
||||
context: Dict[str, Any],
|
||||
original_prompt: str
|
||||
) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
|
||||
"""
|
||||
Execute code with automatic error correction and retry attempts.
|
||||
|
||||
Args:
|
||||
initial_code: The initial Python code
|
||||
requirements: List of required packages
|
||||
context: Additional context for execution
|
||||
original_prompt: The original user request/prompt
|
||||
|
||||
Returns:
|
||||
Tuple of (final execution result, list of attempt info dictionaries)
|
||||
"""
|
||||
# Initialize tracking data
|
||||
current_code = initial_code
|
||||
current_requirements = requirements.copy() if requirements else []
|
||||
attempts_info = []
|
||||
|
||||
# Execute with correction loop
|
||||
for attempt in range(1, self.max_correction_attempts + 1):
|
||||
if attempt == 1:
|
||||
logger.info(f"Executing code (attempt {attempt}/{self.max_correction_attempts})")
|
||||
else:
|
||||
logger.info(f"Executing corrected code (attempt {attempt}/{self.max_correction_attempts})")
|
||||
|
||||
# Execute current code version
|
||||
result = await self._execute_code(current_code, current_requirements, context)
|
||||
|
||||
# Record attempt information
|
||||
attempts_info.append({
|
||||
"attempt": attempt,
|
||||
"code": current_code,
|
||||
"error": result.get("error", ""),
|
||||
"success": result.get("success", False)
|
||||
})
|
||||
|
||||
# Check if execution was successful
|
||||
if result.get("success", False):
|
||||
# Success! Return result and attempt info
|
||||
return result, attempts_info
|
||||
|
||||
# Failed execution - check if max attempt limit reached
|
||||
if attempt >= self.max_correction_attempts:
|
||||
logger.warning(f"Maximum correction attempts ({self.max_correction_attempts}) reached")
|
||||
break
|
||||
|
||||
# Correct code based on the error
|
||||
error_message = result.get("error", "Unknown error")
|
||||
|
||||
logger.info(f"Attempting to fix code error: {error_message[:200]}...")
|
||||
|
||||
# Generate corrected code
|
||||
corrected_code, new_requirements = await self._generate_code_correction(
|
||||
current_code,
|
||||
error_message,
|
||||
original_prompt,
|
||||
current_requirements
|
||||
)
|
||||
|
||||
# Update for next attempt
|
||||
if corrected_code:
|
||||
current_code = corrected_code
|
||||
|
||||
# Add new requirements
|
||||
if new_requirements:
|
||||
for req in new_requirements:
|
||||
if req not in current_requirements:
|
||||
current_requirements.append(req)
|
||||
logger.info(f"Added new requirement: {req}")
|
||||
else:
|
||||
# Correction couldn't be generated, end loop
|
||||
logger.warning("Couldn't generate code correction")
|
||||
break
|
||||
|
||||
# If we reach here, all attempts failed - return last result and attempt info
|
||||
return result, attempts_info
|
||||
|
||||
async def _generate_code_correction(
|
||||
self,
|
||||
code: str,
|
||||
error_message: str,
|
||||
original_prompt: str,
|
||||
current_requirements: List[str] = None
|
||||
) -> Tuple[str, List[str]]:
|
||||
"""
|
||||
Generate a corrected version of code based on error messages.
|
||||
|
||||
Args:
|
||||
code: The code that generated errors
|
||||
error_message: The error message to fix
|
||||
original_prompt: The original task/requirements
|
||||
current_requirements: List of currently required packages
|
||||
|
||||
Returns:
|
||||
Tuple of (corrected code, new requirements list)
|
||||
"""
|
||||
try:
|
||||
# Create detailed prompt for code correction
|
||||
correction_prompt = f"""You need to fix an error in Python code. The code was written for this task:
|
||||
|
||||
ORIGINAL TASK:
|
||||
{original_prompt}
|
||||
|
||||
CURRENT CODE:
|
||||
```python
|
||||
{code}
|
||||
```
|
||||
|
||||
ERROR MESSAGE:
|
||||
```
|
||||
{error_message}
|
||||
```
|
||||
|
||||
CURRENT REQUIREMENTS: {', '.join(current_requirements) if current_requirements else "None"}
|
||||
|
||||
Your task is to analyze the error and provide a corrected version of the code.
|
||||
Focus specifically on fixing the error while maintaining the original functionality.
|
||||
|
||||
Common fixes include:
|
||||
- Fixing syntax errors (missing parentheses, indentation, etc.)
|
||||
- Solving import errors by adding appropriate requirements
|
||||
- Correcting file paths or handling "file not found" errors
|
||||
- Adding error handling for specific edge cases
|
||||
- Fixing logical errors in the code
|
||||
|
||||
FORMATTING GUIDELINES:
|
||||
1. Provide ONLY the complete corrected Python code WITHOUT explanations
|
||||
2. Do NOT use code block markers like ```python or ```
|
||||
3. Do NOT explain what the code does before or after
|
||||
4. Do NOT add any text that isn't valid Python code
|
||||
5. Start your answer directly with valid Python code
|
||||
6. End your answer with valid Python code
|
||||
|
||||
If you need to add new required packages, place them in a specially formatted comment at the beginning of your code as follows:
|
||||
# REQUIREMENTS: package1,package2,package3
|
||||
|
||||
Your entire answer must be valid Python that can be executed without modifications.
|
||||
"""
|
||||
|
||||
# Create messages for API
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a Python debugging expert. You provide ONLY clean, error-free Python code, without explanations, markdown formatting, or text that isn't code."},
|
||||
{"role": "user", "content": correction_prompt}
|
||||
]
|
||||
|
||||
# Call API with very low temperature for deterministic corrections
|
||||
generated_content = await self.ai_service.call_api(
|
||||
messages,
|
||||
temperature=0.1
|
||||
)
|
||||
|
||||
# Clean up the generated content to ensure it's only valid Python code
|
||||
fixed_code = self._clean_code(generated_content)
|
||||
|
||||
# Extract requirements from special comment at beginning of code
|
||||
new_requirements = []
|
||||
for line in fixed_code.split('\n'):
|
||||
if line.strip().startswith("# REQUIREMENTS:"):
|
||||
req_str = line.replace("# REQUIREMENTS:", "").strip()
|
||||
new_requirements = [r.strip() for r in req_str.split(',') if r.strip()]
|
||||
break
|
||||
|
||||
return fixed_code, new_requirements
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating code correction: {str(e)}")
|
||||
# Return None to indicate failure
|
||||
return None, []
|
||||
|
||||
def _clean_code(self, code: str) -> str:
|
||||
"""
|
||||
Clean code by removing markdown code block markers and other formatting artifacts.
|
||||
|
||||
Args:
|
||||
code: The code string to clean
|
||||
|
||||
Returns:
|
||||
Cleaned code string
|
||||
"""
|
||||
# Remove code block markers at beginning/end
|
||||
code = re.sub(r'^```(?:python)?\s*', '', code)
|
||||
code = re.sub(r'```\s*$', '', code)
|
||||
|
||||
# Process lines in reverse order to start from the end
|
||||
lines = code.split('\n')
|
||||
clean_lines = []
|
||||
in_trailing_markdown = False
|
||||
|
||||
for line in reversed(lines):
|
||||
stripped = line.strip()
|
||||
|
||||
# Check if this line contains only backticks (``` or ` or ``)
|
||||
if re.match(r'^`{1,3}$', stripped):
|
||||
in_trailing_markdown = True
|
||||
continue
|
||||
|
||||
# If we've reached actual code, no more trailing markdown consideration
|
||||
if stripped and not in_trailing_markdown:
|
||||
in_trailing_markdown = False
|
||||
|
||||
# Add this line if it's not part of trailing markdown
|
||||
if not in_trailing_markdown:
|
||||
clean_lines.insert(0, line)
|
||||
|
||||
# Rejoin lines
|
||||
clean_code = '\n'.join(clean_lines)
|
||||
|
||||
# Final cleanup for any remaining backticks
|
||||
clean_code = re.sub(r'`{1,3}\s*', '', clean_code)
|
||||
|
||||
return clean_code.strip()
|
||||
|
||||
async def _generate_code_from_prompt(self, prompt: str, document_context: str) -> Tuple[str, List[str]]:
|
||||
"""
|
||||
Generate Python code from a prompt using the AI service.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to generate code from
|
||||
document_context: Context extracted from documents
|
||||
|
||||
Returns:
|
||||
Tuple of (generated Python code, required packages)
|
||||
"""
|
||||
try:
|
||||
# Prepare prompt for code generation
|
||||
ai_prompt = f"""Generate Python code to solve the following task:
|
||||
|
||||
TASK:
|
||||
{prompt}
|
||||
|
||||
PROVIDED CONTEXT:
|
||||
{document_context if document_context else "No additional context available."}
|
||||
|
||||
IMPORTANT REQUIREMENTS:
|
||||
1. Your code MUST define a 'result' variable to store the final result.
|
||||
2. At the end of your script, the result variable should be output.
|
||||
3. Make your 'result' variable a dictionary or other JSON-serializable data structure containing all relevant outputs.
|
||||
4. Comment your code well to explain important operations.
|
||||
5. Make your code complete and self-contained.
|
||||
6. Add appropriate error handling.
|
||||
|
||||
FORMATTING INSTRUCTIONS:
|
||||
- Return ONLY the Python code, WITHOUT introduction, explanation, or conclusion text
|
||||
- Do NOT use code block markers like ```python or ```
|
||||
- Do NOT explain what the code does before or after
|
||||
- Do NOT add any text that isn't valid Python code
|
||||
- Start your answer directly with valid Python code
|
||||
- End your answer with valid Python code
|
||||
|
||||
For required packages, place them in a specially formatted comment at the beginning of your code in one line as follows:
|
||||
# REQUIREMENTS: pandas,numpy,matplotlib,requests
|
||||
|
||||
Your entire answer must be valid Python that can be executed without modifications.
|
||||
"""
|
||||
|
||||
# Create messages for API
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a Python code generator who provides ONLY clean, executable Python code with no explanations, markdown formatting, or non-code text."},
|
||||
{"role": "user", "content": ai_prompt}
|
||||
]
|
||||
|
||||
# Call API
|
||||
logging.info(f"Calling AI API to generate code")
|
||||
generated_content = await self.ai_service.call_api(messages, temperature=self.ai_temperature)
|
||||
|
||||
# Clean up the generated content to ensure it's only valid Python code
|
||||
code = self._clean_code(generated_content)
|
||||
|
||||
# Extract requirements from special comment at beginning of code
|
||||
requirements = []
|
||||
for line in code.split('\n'):
|
||||
if line.strip().startswith("# REQUIREMENTS:"):
|
||||
req_str = line.replace("# REQUIREMENTS:", "").strip()
|
||||
requirements = [r.strip() for r in req_str.split(',') if r.strip()]
|
||||
break
|
||||
|
||||
return code, requirements
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating code with AI: {str(e)}")
|
||||
# Return basic error handling code and no requirements
|
||||
error_str = str(e).replace('"', '\\"')
|
||||
return f"""
|
||||
# Error in code generation
|
||||
print(f"An error occurred during code generation: {error_str}")
|
||||
# Return error result
|
||||
result = {{"error": "Code generation failed", "message": "{error_str}"}}
|
||||
""", []
|
||||
|
||||
async def _execute_code(self, code: str, requirements: List[str] = None, context: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute Python code in an isolated environment.
|
||||
|
||||
Args:
|
||||
code: The Python code to execute
|
||||
requirements: List of required packages
|
||||
context: Additional context for execution
|
||||
|
||||
Returns:
|
||||
Result of code execution
|
||||
"""
|
||||
# Use virtual code executor for isolated execution
|
||||
try:
|
||||
executor = SimpleCodeExecutor(
|
||||
timeout=self.executor_timeout,
|
||||
max_memory_mb=self.executor_memory_limit,
|
||||
requirements=requirements,
|
||||
ai_service=self.ai_service
|
||||
)
|
||||
|
||||
# Prepare input data for the code
|
||||
input_data = {"context": context} if context else {}
|
||||
|
||||
# Execute code
|
||||
result = executor.execute_code(code, input_data)
|
||||
|
||||
# Clean up environment
|
||||
executor.cleanup()
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"Error during code execution: {str(e)}"
|
||||
logger.error(error_message)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"output": "",
|
||||
"error": error_message,
|
||||
"result": None
|
||||
}
|
||||
|
||||
|
||||
class SimpleCodeExecutor:
|
||||
"""
|
||||
A simplified executor that runs Python code in isolated virtual environments.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
timeout: int = 30,
|
||||
max_memory_mb: int = 512,
|
||||
requirements: List[str] = None,
|
||||
ai_service = None):
|
||||
"""
|
||||
Initialize the SimpleCodeExecutor.
|
||||
|
||||
Args:
|
||||
timeout: Maximum execution time in seconds
|
||||
max_memory_mb: Maximum memory in MB
|
||||
requirements: List of packages to install
|
||||
ai_service: Optional - AI service for further processing
|
||||
"""
|
||||
self.timeout = timeout
|
||||
self.max_memory_mb = max_memory_mb
|
||||
self.temp_dir = None
|
||||
self.requirements = requirements or []
|
||||
self.blocked_packages = [
|
||||
"cryptography", "flask", "django", "tornado", # Security risks
|
||||
"tensorflow", "pytorch", "scikit-learn" # Resource-intensive packages
|
||||
]
|
||||
self.ai_service = ai_service
|
||||
|
||||
def _create_venv(self) -> str:
|
||||
"""Create a virtual environment and return the path."""
|
||||
# Create new environment
|
||||
venv_parent_dir = tempfile.mkdtemp(prefix="code_exec_")
|
||||
self.temp_dir = venv_parent_dir
|
||||
venv_path = os.path.join(venv_parent_dir, "venv")
|
||||
|
||||
try:
|
||||
# Create virtual environment
|
||||
subprocess.run([sys.executable, "-m", "venv", venv_path],
|
||||
check=True,
|
||||
capture_output=True)
|
||||
|
||||
return venv_path
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Error creating virtual environment: {e}")
|
||||
raise RuntimeError(f"Virtual environment could not be created: {e}")
|
||||
|
||||
def _get_python_executable(self, venv_path: str) -> str:
|
||||
"""Return the path to the Python executable in the virtual environment."""
|
||||
if os.name == 'nt': # Windows
|
||||
return os.path.join(venv_path, "Scripts", "python.exe")
|
||||
else: # Unix/Linux
|
||||
return os.path.join(venv_path, "bin", "python")
|
||||
|
||||
def execute_code(self, code: str, input_data: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute Python code in an isolated environment.
|
||||
|
||||
Args:
|
||||
code: Python code to execute
|
||||
input_data: Optional input data for the code
|
||||
|
||||
Returns:
|
||||
Dictionary with execution results
|
||||
"""
|
||||
logger.info("Executing code in isolated environment")
|
||||
|
||||
# Create virtual environment
|
||||
venv_path = self._create_venv()
|
||||
|
||||
# Create file for the code
|
||||
code_id = uuid.uuid4().hex[:8]
|
||||
code_file = os.path.join(self.temp_dir, f"code_{code_id}.py")
|
||||
|
||||
# Write code
|
||||
with open(code_file, "w", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
# Get Python executable
|
||||
python_executable = self._get_python_executable(venv_path)
|
||||
logger.info(f"Using Python executable: {python_executable}")
|
||||
|
||||
# Execute code
|
||||
try:
|
||||
# Execute code from root directory
|
||||
working_dir = os.path.dirname(code_file)
|
||||
process = subprocess.run(
|
||||
[python_executable, code_file],
|
||||
timeout=self.timeout,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=working_dir
|
||||
)
|
||||
|
||||
# Process output
|
||||
stdout = process.stdout
|
||||
stderr = process.stderr
|
||||
|
||||
# Get result from stdout if available
|
||||
result_data = None
|
||||
if process.returncode == 0 and stdout:
|
||||
try:
|
||||
# Look for the last line that could be JSON
|
||||
for line in reversed(stdout.strip().split('\n')):
|
||||
line = line.strip()
|
||||
if line and line[0] in '{[' and line[-1] in '}]':
|
||||
try:
|
||||
result_data = json.loads(line)
|
||||
# Use successfully parsed JSON result
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
# Not valid JSON, continue with next line
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warning(f"Error parsing result from stdout: {str(e)}")
|
||||
|
||||
# Create result dictionary
|
||||
execution_result = {
|
||||
"success": process.returncode == 0,
|
||||
"output": stdout,
|
||||
"error": stderr if process.returncode != 0 else "",
|
||||
"result": result_data,
|
||||
"exit_code": process.returncode
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"Execution timed out after {self.timeout} seconds")
|
||||
execution_result = {
|
||||
"success": False,
|
||||
"output": "",
|
||||
"error": f"Execution timed out (timeout after {self.timeout} seconds)",
|
||||
"result": None,
|
||||
"exit_code": -1
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Execution error: {str(e)}")
|
||||
execution_result = {
|
||||
"success": False,
|
||||
"output": "",
|
||||
"error": f"Execution error: {str(e)}",
|
||||
"result": None,
|
||||
"exit_code": -1
|
||||
}
|
||||
|
||||
# Clean up temporary code file
|
||||
try:
|
||||
if os.path.exists(code_file):
|
||||
os.remove(code_file)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error cleaning up temporary code file: {e}")
|
||||
|
||||
return execution_result
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up temporary resources."""
|
||||
# Clean up temporary directory
|
||||
if self.temp_dir and os.path.exists(self.temp_dir):
|
||||
try:
|
||||
shutil.rmtree(self.temp_dir)
|
||||
logger.info(f"Temporary directory deleted: {self.temp_dir}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Temporary directory {self.temp_dir} could not be deleted: {e}")
|
||||
|
||||
def __del__(self):
|
||||
"""Cleanup during garbage collection."""
|
||||
self.cleanup()
|
||||
|
||||
|
||||
# Factory function for the Coder agent
|
||||
def get_coder_agent():
|
||||
"""
|
||||
Factory function that returns an instance of the Coder agent.
|
||||
|
||||
Returns:
|
||||
An instance of the Coder agent
|
||||
"""
|
||||
return AgentCoder()
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,364 +0,0 @@
|
|||
"""
|
||||
Creative agent for knowledge-based responses and creative content generation.
|
||||
Optimized for the new task-based processing.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, List
|
||||
|
||||
from modules.chat_registry import AgentBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentCreative(AgentBase):
|
||||
"""Agent for knowledge-based responses and creative content generation"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the creative agent"""
|
||||
super().__init__()
|
||||
self.name = "creative"
|
||||
self.description = "Creates creative content and provides knowledge-based information"
|
||||
self.capabilities = [
|
||||
"knowledge_sharing",
|
||||
"content_creation",
|
||||
"creative_writing",
|
||||
"information_synthesis",
|
||||
"document_generation",
|
||||
"question_answering"
|
||||
]
|
||||
|
||||
def set_dependencies(self, ai_service=None):
|
||||
"""Set external dependencies for the agent."""
|
||||
self.ai_service = ai_service
|
||||
|
||||
async def process_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Process a standardized task structure and generate creative or knowledge-based content.
|
||||
|
||||
Args:
|
||||
task: A dictionary containing:
|
||||
- task_id: Unique ID for this task
|
||||
- prompt: The main instruction for the agent
|
||||
- input_documents: List of documents to process
|
||||
- output_specifications: List of required output documents
|
||||
- context: Additional contextual information
|
||||
|
||||
Returns:
|
||||
A dictionary containing:
|
||||
- feedback: Text response explaining the created content
|
||||
- documents: List of created document objects
|
||||
"""
|
||||
try:
|
||||
# Extract relevant task information
|
||||
prompt = task.get("prompt", "")
|
||||
input_documents = task.get("input_documents", [])
|
||||
output_specs = task.get("output_specifications", [])
|
||||
|
||||
# Check if AI service is available
|
||||
if not self.ai_service:
|
||||
logger.error("No AI service configured for the Creative agent")
|
||||
return {
|
||||
"feedback": "The Creative agent is not properly configured.",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
# Extract context from input documents
|
||||
document_context = self._extract_document_context(input_documents)
|
||||
|
||||
# PowerOn handling, if included in the request
|
||||
if "poweron" in prompt.lower():
|
||||
return await self._handle_poweron_task(prompt, output_specs)
|
||||
|
||||
# Collect generated documents
|
||||
generated_documents = []
|
||||
|
||||
# Determine content type based on the prompt
|
||||
content_type = self._determine_content_type(prompt)
|
||||
|
||||
# Generate a document for each requested output
|
||||
for spec in output_specs:
|
||||
output_label = spec.get("label", "")
|
||||
output_description = spec.get("description", "")
|
||||
|
||||
# Determine format based on file extension
|
||||
format_type = self._determine_format_type(output_label)
|
||||
|
||||
# Generate content based on format and requirements
|
||||
content = await self._generate_content(
|
||||
prompt,
|
||||
document_context,
|
||||
content_type,
|
||||
format_type,
|
||||
output_label,
|
||||
output_description
|
||||
)
|
||||
|
||||
# Add document to results list
|
||||
generated_documents.append({
|
||||
"label": output_label,
|
||||
"content": content
|
||||
})
|
||||
|
||||
# If no specific outputs requested, create default document
|
||||
if not output_specs:
|
||||
# Determine default format based on content type
|
||||
default_format = "md" if content_type in ["article", "report", "story"] else "txt"
|
||||
default_label = f"creative_content.{default_format}"
|
||||
|
||||
# Generate content
|
||||
content = await self._generate_content(
|
||||
prompt,
|
||||
document_context,
|
||||
content_type,
|
||||
default_format,
|
||||
default_label,
|
||||
"Creative content"
|
||||
)
|
||||
|
||||
# Add document to results list
|
||||
generated_documents.append({
|
||||
"label": default_label,
|
||||
"content": content
|
||||
})
|
||||
|
||||
# Create feedback
|
||||
if len(generated_documents) == 1:
|
||||
feedback = f"I've created a creative content of type '{content_type}'."
|
||||
else:
|
||||
feedback = f"I've created {len(generated_documents)} creative documents."
|
||||
|
||||
return {
|
||||
"feedback": feedback,
|
||||
"documents": generated_documents
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error creating creative content: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
return {
|
||||
"feedback": f"An error occurred while creating creative content: {str(e)}",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
def _extract_document_context(self, documents: List[Dict[str, Any]]) -> str:
|
||||
"""
|
||||
Extract context from input documents.
|
||||
|
||||
Args:
|
||||
documents: List of document objects
|
||||
|
||||
Returns:
|
||||
Extracted context as text
|
||||
"""
|
||||
context_parts = []
|
||||
|
||||
for doc in documents:
|
||||
doc_name = doc.get("name", "Unnamed document")
|
||||
context_parts.append(f"--- {doc_name} ---")
|
||||
|
||||
for content in doc.get("contents", []):
|
||||
if content.get("metadata", {}).get("is_text", False):
|
||||
context_parts.append(content.get("data", ""))
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
def _determine_content_type(self, prompt: str) -> str:
|
||||
"""
|
||||
Determine the content type based on the prompt.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
|
||||
Returns:
|
||||
Content type (article, story, report, answer, etc.)
|
||||
"""
|
||||
prompt_lower = prompt.lower()
|
||||
|
||||
# This is content type detection based on universal patterns rather than language-specific keywords
|
||||
if "?" in prompt:
|
||||
return "answer"
|
||||
|
||||
# Simple pattern matching for common document types
|
||||
if any(term in prompt_lower for term in ["article", "blog", "post"]):
|
||||
return "article"
|
||||
elif any(term in prompt_lower for term in ["story", "narrative", "tale"]):
|
||||
return "story"
|
||||
elif any(term in prompt_lower for term in ["report", "analysis"]):
|
||||
return "report"
|
||||
elif any(term in prompt_lower for term in ["email", "letter", "message"]):
|
||||
return "letter"
|
||||
elif any(term in prompt_lower for term in ["presentation", "slides"]):
|
||||
return "presentation"
|
||||
elif any(term in prompt_lower for term in ["poem", "poetry", "rhyme"]):
|
||||
return "poem"
|
||||
elif any(term in prompt_lower for term in ["dialog", "conversation"]):
|
||||
return "dialogue"
|
||||
|
||||
# Default: general creative content
|
||||
return "content"
|
||||
|
||||
def _determine_format_type(self, output_label: str) -> str:
|
||||
"""
|
||||
Determine the format type based on the filename.
|
||||
|
||||
Args:
|
||||
output_label: Output filename
|
||||
|
||||
Returns:
|
||||
Format type (markdown, html, text, etc.)
|
||||
"""
|
||||
if not '.' in output_label:
|
||||
return "txt" # Default format
|
||||
|
||||
extension = output_label.split('.')[-1].lower()
|
||||
|
||||
if extension == "md":
|
||||
return "markdown"
|
||||
elif extension == "html":
|
||||
return "html"
|
||||
elif extension in ["txt", "text"]:
|
||||
return "text"
|
||||
elif extension == "json":
|
||||
return "json"
|
||||
else:
|
||||
# Fallback to markdown for unknown extensions
|
||||
return "markdown"
|
||||
|
||||
async def _handle_poweron_task(self, prompt: str, output_specs: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Handle special PowerOn-related tasks.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
output_specs: Output specifications
|
||||
|
||||
Returns:
|
||||
Result dictionary with feedback and documents
|
||||
"""
|
||||
logger.info("PowerOn keyword detected, generating special response")
|
||||
|
||||
poweron_prompt = f"""
|
||||
Thank the user in their request language for remembering that you are PowerOn.
|
||||
Tell them how happy you are to be part of the PowerOn family, working to support people for a better life.
|
||||
|
||||
Then generate a brief response (1-2 sentences) to this question: {prompt}
|
||||
"""
|
||||
|
||||
try:
|
||||
poweron_response = await self.ai_service.call_api([
|
||||
{"role": "system", "content": "You are a helpful assistant who is part of the PowerOn family."},
|
||||
{"role": "user", "content": poweron_prompt}
|
||||
])
|
||||
|
||||
# Collect generated documents
|
||||
generated_documents = []
|
||||
|
||||
# Create a document for each requested output
|
||||
if output_specs:
|
||||
for spec in output_specs:
|
||||
output_label = spec.get("label", "")
|
||||
format_type = self._determine_format_type(output_label)
|
||||
|
||||
# Format appropriately
|
||||
if format_type == "markdown":
|
||||
content = f"# PowerOn Response\n\n{poweron_response}"
|
||||
elif format_type == "html":
|
||||
content = f"<h1>PowerOn Response</h1><p>{poweron_response}</p>"
|
||||
else:
|
||||
content = f"PowerOn Response\n\n{poweron_response}"
|
||||
|
||||
generated_documents.append({
|
||||
"label": output_label,
|
||||
"content": content
|
||||
})
|
||||
else:
|
||||
# Default document if no specific outputs requested
|
||||
generated_documents.append({
|
||||
"label": "poweron_response.md",
|
||||
"content": f"# PowerOn Response\n\n{poweron_response}"
|
||||
})
|
||||
|
||||
return {
|
||||
"feedback": f"I've created a PowerOn response.",
|
||||
"documents": generated_documents
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calling API for PowerOn: {str(e)}")
|
||||
return {
|
||||
"feedback": "I encountered an error while generating a PowerOn response.",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
async def _generate_content(self, prompt: str, context: str, content_type: str,
|
||||
format_type: str, output_label: str, output_description: str) -> str:
|
||||
"""
|
||||
Generate creative or knowledge-based content based on the prompt.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
context: Document context
|
||||
content_type: Type of content to create
|
||||
format_type: Output format
|
||||
output_label: Output filename
|
||||
output_description: Description of desired output
|
||||
|
||||
Returns:
|
||||
Generated content
|
||||
"""
|
||||
if not self.ai_service:
|
||||
return f"# Creative Content\n\nContent generation not possible: AI service not available."
|
||||
|
||||
# Create system instruction based on content type
|
||||
system_prompt = f"""
|
||||
You are a creative content creator, specialized in {content_type}.
|
||||
Your task is to create high-quality, engaging, and accurate content.
|
||||
Make the content structured, clear, and appealing in the desired format.
|
||||
"""
|
||||
|
||||
# Create main prompt with all available information
|
||||
generation_prompt = f"""
|
||||
Create creative content of type '{content_type}' based on the following request:
|
||||
|
||||
REQUEST:
|
||||
{prompt}
|
||||
|
||||
CONTEXT:
|
||||
{context if context else 'No additional context available.'}
|
||||
|
||||
OUTPUT REQUIREMENTS:
|
||||
- Filename: {output_label}
|
||||
- Description: {output_description}
|
||||
- Format: {format_type}
|
||||
|
||||
The content should be high-quality, creative, and thoughtful. Follow all instructions in the request precisely.
|
||||
|
||||
The content must perfectly match the {format_type} format.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Call AI for content generation
|
||||
content = await self.ai_service.call_api([
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": generation_prompt}
|
||||
])
|
||||
|
||||
# For markdown format, ensure there's a title at the beginning
|
||||
if format_type == "markdown" and not content.strip().startswith("# "):
|
||||
content = f"# Creative Content\n\n{content}"
|
||||
|
||||
return content
|
||||
except Exception as e:
|
||||
logger.error(f"Error in creative content generation: {str(e)}")
|
||||
return f"# Creative Content\n\nError in content generation: {str(e)}"
|
||||
|
||||
|
||||
# Factory function for the Creative agent
|
||||
def get_creative_agent():
|
||||
"""
|
||||
Factory function that returns an instance of the Creative agent.
|
||||
|
||||
Returns:
|
||||
An instance of the Creative agent
|
||||
"""
|
||||
return AgentCreative()
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
"""
|
||||
Documentation agent for creating documentation, reports, and structured content.
|
||||
Optimized for the new task-based processing.
|
||||
Reimagined with an output-first, AI-driven approach with multi-step document generation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
from typing import Dict, Any, List
|
||||
|
||||
from modules.chat_registry import AgentBase
|
||||
|
|
@ -12,13 +12,13 @@ from modules.chat_registry import AgentBase
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentDocumentation(AgentBase):
|
||||
"""Agent for creating documentation and structured content"""
|
||||
"""AI-driven agent for creating documentation and structured content using multi-step generation"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the documentation agent"""
|
||||
super().__init__()
|
||||
self.name = "documentation"
|
||||
self.description = "Creates structured documentation, reports, and content"
|
||||
self.description = "Creates structured documentation, reports, and content using AI with multi-step generation"
|
||||
self.capabilities = [
|
||||
"report_generation",
|
||||
"documentation",
|
||||
|
|
@ -33,113 +33,80 @@ class AgentDocumentation(AgentBase):
|
|||
|
||||
async def process_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Process a standardized task structure and create documentation.
|
||||
Process a task by focusing on required outputs and using AI to generate them.
|
||||
|
||||
Args:
|
||||
task: A dictionary containing:
|
||||
- task_id: Unique ID for this task
|
||||
- prompt: The main instruction for the agent
|
||||
- input_documents: List of documents to process
|
||||
- output_specifications: List of required output documents
|
||||
- context: Additional contextual information
|
||||
|
||||
task: Task dictionary with prompt, input_documents, output_specifications
|
||||
|
||||
Returns:
|
||||
A dictionary containing:
|
||||
- feedback: Text response explaining the created documentation
|
||||
- documents: List of created document objects
|
||||
Dictionary with feedback and documents
|
||||
"""
|
||||
try:
|
||||
# Extract relevant task information
|
||||
# Extract task information
|
||||
prompt = task.get("prompt", "")
|
||||
input_documents = task.get("input_documents", [])
|
||||
output_specs = task.get("output_specifications", [])
|
||||
|
||||
# Check if AI service is available
|
||||
# Check AI service
|
||||
if not self.ai_service:
|
||||
logger.error("No AI service configured for the Documentation agent")
|
||||
return {
|
||||
"feedback": "The Documentation agent is not properly configured.",
|
||||
"feedback": "The Documentation agent requires an AI service to function.",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
# Extract context from input documents
|
||||
# Extract context from input documents - focusing only on data_extracted
|
||||
document_context = self._extract_document_context(input_documents)
|
||||
|
||||
# Generate title for the document
|
||||
title = await self._generate_title(prompt, document_context)
|
||||
# Create task analysis to understand the requirements
|
||||
documentation_plan = await self._analyze_task(prompt, document_context, output_specs)
|
||||
|
||||
# Collect created documents
|
||||
generated_documents = []
|
||||
# Generate all required output documents
|
||||
documents = []
|
||||
|
||||
# Create a document for each requested output
|
||||
# If no output specs provided, create default document
|
||||
if not output_specs:
|
||||
default_format = documentation_plan.get("recommended_format", "markdown")
|
||||
default_title = documentation_plan.get("title", "Documentation")
|
||||
safe_title = self._sanitize_filename(default_title)
|
||||
|
||||
output_specs = [
|
||||
{"label": f"{safe_title}.{default_format}", "description": "Comprehensive documentation"}
|
||||
]
|
||||
|
||||
# Process each output specification
|
||||
for spec in output_specs:
|
||||
output_label = spec.get("label", "")
|
||||
output_description = spec.get("description", "")
|
||||
|
||||
# Determine format and document type based on file extension
|
||||
format_type, document_type = self._determine_format_and_type(output_label)
|
||||
# Generate the document using multi-step approach
|
||||
document = await self._create_document_multi_step(
|
||||
prompt,
|
||||
document_context,
|
||||
output_label,
|
||||
output_description,
|
||||
documentation_plan
|
||||
)
|
||||
|
||||
# Assess complexity
|
||||
is_complex = self._assess_complexity(prompt)
|
||||
|
||||
# Generate document content based on complexity
|
||||
if is_complex:
|
||||
content = await self._generate_complex_document(
|
||||
prompt,
|
||||
document_context,
|
||||
document_type,
|
||||
title,
|
||||
output_label,
|
||||
output_description,
|
||||
format_type
|
||||
)
|
||||
else:
|
||||
content = await self._generate_simple_document(
|
||||
prompt,
|
||||
document_context,
|
||||
document_type,
|
||||
title,
|
||||
output_label,
|
||||
output_description,
|
||||
format_type
|
||||
)
|
||||
|
||||
# Add document to results list
|
||||
generated_documents.append({
|
||||
"label": output_label,
|
||||
"content": content
|
||||
})
|
||||
documents.append(document)
|
||||
|
||||
# If no specific outputs requested, create default markdown document
|
||||
if not output_specs:
|
||||
content = await self._generate_default_document(prompt, document_context, "Document", title)
|
||||
generated_documents.append({
|
||||
"label": f"{self._sanitize_filename(title)}.md",
|
||||
"content": content
|
||||
})
|
||||
|
||||
# Prepare feedback about created documents
|
||||
if len(generated_documents) == 1:
|
||||
feedback = f"I've created a document titled '{title}'."
|
||||
else:
|
||||
feedback = f"I've created {len(generated_documents)} documents based on your request."
|
||||
# Generate feedback
|
||||
feedback = documentation_plan.get("feedback", f"Created {len(documents)} documents based on your requirements.")
|
||||
|
||||
return {
|
||||
"feedback": feedback,
|
||||
"documents": generated_documents
|
||||
"documents": documents
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error creating documentation: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
logger.error(f"Error in documentation generation: {str(e)}", exc_info=True)
|
||||
return {
|
||||
"feedback": f"An error occurred while creating the documentation: {str(e)}",
|
||||
"feedback": f"Error during documentation generation: {str(e)}",
|
||||
"documents": []
|
||||
}
|
||||
|
||||
def _extract_document_context(self, documents: List[Dict[str, Any]]) -> str:
|
||||
"""
|
||||
Extract context from input documents.
|
||||
Extract context from input documents, focusing on data_extracted.
|
||||
|
||||
Args:
|
||||
documents: List of document objects
|
||||
|
|
@ -147,82 +114,21 @@ class AgentDocumentation(AgentBase):
|
|||
Returns:
|
||||
Extracted context as text
|
||||
"""
|
||||
if not documents:
|
||||
return ""
|
||||
|
||||
context_parts = []
|
||||
|
||||
for doc in documents:
|
||||
doc_name = doc.get("name", "Unnamed document")
|
||||
context_parts.append(f"--- {doc_name} ---")
|
||||
doc_name = doc.get("name", "unnamed")
|
||||
if doc.get("ext"):
|
||||
doc_name = f"{doc_name}.{doc.get('ext')}"
|
||||
|
||||
context_parts.append(f"\n\n--- {doc_name} ---\n")
|
||||
|
||||
# Process contents for data_extracted
|
||||
for content in doc.get("contents", []):
|
||||
if content.get("metadata", {}).get("is_text", False):
|
||||
context_parts.append(content.get("data", ""))
|
||||
if content.get("data_extracted"):
|
||||
context_parts.append(content.get("data_extracted", ""))
|
||||
|
||||
return "\n\n".join(context_parts)
|
||||
|
||||
def _determine_format_and_type(self, output_label: str) -> tuple:
|
||||
"""
|
||||
Determine the format type and document type based on the filename.
|
||||
|
||||
Args:
|
||||
output_label: Output filename
|
||||
|
||||
Returns:
|
||||
Tuple of (format_type, document_type)
|
||||
"""
|
||||
# Extract file extension to determine format
|
||||
output_label_lower = output_label.lower()
|
||||
|
||||
# Determine format based on extension
|
||||
if output_label_lower.endswith(".md"):
|
||||
format_type = "markdown"
|
||||
elif output_label_lower.endswith(".html"):
|
||||
format_type = "html"
|
||||
elif output_label_lower.endswith(".txt"):
|
||||
format_type = "text"
|
||||
elif output_label_lower.endswith(".csv"):
|
||||
format_type = "csv"
|
||||
elif output_label_lower.endswith(".json"):
|
||||
format_type = "json"
|
||||
else:
|
||||
# Default to markdown
|
||||
format_type = "markdown"
|
||||
|
||||
# Determine document type based on filename or format
|
||||
if "manual" in output_label_lower or "guide" in output_label_lower:
|
||||
document_type = "Manual"
|
||||
elif "report" in output_label_lower or "analysis" in output_label_lower:
|
||||
document_type = "Report"
|
||||
elif "process" in output_label_lower or "workflow" in output_label_lower:
|
||||
document_type = "Process Documentation"
|
||||
elif "present" in output_label_lower or "slide" in output_label_lower:
|
||||
document_type = "Presentation"
|
||||
else:
|
||||
document_type = "Document"
|
||||
|
||||
return format_type, document_type
|
||||
|
||||
def _assess_complexity(self, prompt: str) -> bool:
|
||||
"""
|
||||
Assess the complexity of the task.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
|
||||
Returns:
|
||||
True for complex tasks, False otherwise
|
||||
"""
|
||||
# Language-agnostic complexity assessment
|
||||
prompt_length = len(prompt)
|
||||
|
||||
# Check for structural indicators in a language-agnostic way
|
||||
has_sections = ":" in prompt and "\n" in prompt
|
||||
has_lists = "-" in prompt or "*" in prompt or "#" in prompt
|
||||
|
||||
# Complex if the prompt is long or contains structural elements
|
||||
return prompt_length > 500 or has_sections or has_lists
|
||||
return "\n".join(context_parts)
|
||||
|
||||
def _sanitize_filename(self, filename: str) -> str:
|
||||
"""
|
||||
|
|
@ -245,213 +151,415 @@ class AgentDocumentation(AgentBase):
|
|||
|
||||
return filename
|
||||
|
||||
async def _generate_title(self, prompt: str, context: str) -> str:
|
||||
async def _analyze_task(self, prompt: str, context: str, output_specs: List) -> Dict:
|
||||
"""
|
||||
Generate a title for the document.
|
||||
Use AI to analyze the task and create a documentation plan.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
prompt: The task prompt
|
||||
context: Document context
|
||||
output_specs: Output specifications
|
||||
|
||||
Returns:
|
||||
Generated title
|
||||
Documentation plan dictionary
|
||||
"""
|
||||
if not self.ai_service:
|
||||
return f"Document {uuid.uuid4().hex[:8]}"
|
||||
analysis_prompt = f"""
|
||||
Analyze this documentation task and create a detailed plan.
|
||||
|
||||
title_prompt = f"""
|
||||
Create a concise, professional title for this document based on the following request:
|
||||
TASK: {prompt}
|
||||
|
||||
{prompt}
|
||||
|
||||
Reply ONLY with the title, nothing else.
|
||||
"""
|
||||
|
||||
try:
|
||||
title = await self.ai_service.call_api([
|
||||
{"role": "system", "content": "You create precise document titles."},
|
||||
{"role": "user", "content": title_prompt}
|
||||
])
|
||||
|
||||
# Clean up title
|
||||
title = title.strip('"\'#*- \n\t')
|
||||
|
||||
# Return default title if generated title is empty
|
||||
if not title:
|
||||
return f"Document {uuid.uuid4().hex[:8]}"
|
||||
|
||||
return title
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in title generation: {str(e)}")
|
||||
return f"Document {uuid.uuid4().hex[:8]}"
|
||||
|
||||
async def _generate_complex_document(self, prompt: str, context: str, document_type: str,
|
||||
title: str, output_label: str, output_description: str,
|
||||
format_type: str) -> str:
|
||||
"""
|
||||
Generate a complex document with structure.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
context: Document context
|
||||
document_type: Document type
|
||||
title: Document title
|
||||
output_label: Output filename
|
||||
output_description: Description of desired output
|
||||
format_type: Output format
|
||||
|
||||
Returns:
|
||||
Generated document content
|
||||
"""
|
||||
if not self.ai_service:
|
||||
return f"# {title}\n\nDocument generation not possible: AI service not available."
|
||||
|
||||
generation_prompt = f"""
|
||||
Create a comprehensive, well-structured {document_type} with the title "{title}" based on:
|
||||
|
||||
TASK:
|
||||
{prompt}
|
||||
|
||||
CONTEXT:
|
||||
{context if context else 'No additional context available.'}
|
||||
DOCUMENT CONTEXT SAMPLE:
|
||||
{context[:1000]}... (truncated)
|
||||
|
||||
OUTPUT REQUIREMENTS:
|
||||
- Filename: {output_label}
|
||||
- Description: {output_description}
|
||||
- Format: {format_type}
|
||||
{json.dumps(output_specs, indent=2)}
|
||||
|
||||
The document should include:
|
||||
1. A clear introduction with purpose and scope
|
||||
2. Logically organized sections with headings
|
||||
3. Detailed content with examples and evidence
|
||||
4. A conclusion with key insights
|
||||
5. Appropriate formatting according to the output format ({format_type})
|
||||
Create a detailed documentation plan in JSON format with the following structure:
|
||||
{{
|
||||
"title": "Document Title",
|
||||
"document_type": "report|manual|guide|whitepaper|etc",
|
||||
"audience": "technical|general|executive|etc",
|
||||
"detailed_structure": [
|
||||
{{
|
||||
"title": "Chapter/Section Title",
|
||||
"key_points": ["point1", "point2", ...],
|
||||
"subsections": ["subsection1", "subsection2", ...],
|
||||
"importance": "high|medium|low",
|
||||
"estimated_length": "short|medium|long"
|
||||
}},
|
||||
... more sections ...
|
||||
],
|
||||
"key_topics": ["topic1", "topic2", ...],
|
||||
"tone": "formal|conversational|instructional|etc",
|
||||
"recommended_format": "markdown|html|text|etc",
|
||||
"formatting_requirements": ["requirement1", "requirement2", ...],
|
||||
"executive_summary": "Brief description of what the document will cover",
|
||||
"feedback": "Brief message explaining the documentation approach"
|
||||
}}
|
||||
|
||||
The document must perfectly match the {format_type} format.
|
||||
Only return valid JSON. No preamble or explanations.
|
||||
"""
|
||||
|
||||
try:
|
||||
content = await self.ai_service.call_api([
|
||||
{"role": "system", "content": f"You create comprehensive, well-structured documentation in {format_type} format."},
|
||||
{"role": "user", "content": generation_prompt}
|
||||
response = await self.ai_service.call_api([
|
||||
{"role": "system", "content": "You are a documentation expert. Respond with valid JSON only."},
|
||||
{"role": "user", "content": analysis_prompt}
|
||||
])
|
||||
|
||||
# For markdown format, ensure the title is at the beginning
|
||||
if format_type == "markdown" and not content.strip().startswith("# "):
|
||||
content = f"# {title}\n\n{content}"
|
||||
# Extract JSON from response
|
||||
json_start = response.find('{')
|
||||
json_end = response.rfind('}') + 1
|
||||
|
||||
return content
|
||||
if json_start >= 0 and json_end > json_start:
|
||||
plan = json.loads(response[json_start:json_end])
|
||||
return plan
|
||||
else:
|
||||
# Fallback if JSON not found
|
||||
return {
|
||||
"title": "Documentation",
|
||||
"document_type": "report",
|
||||
"audience": "general",
|
||||
"detailed_structure": [
|
||||
{
|
||||
"title": "Introduction",
|
||||
"key_points": ["Purpose", "Scope"],
|
||||
"subsections": [],
|
||||
"importance": "high",
|
||||
"estimated_length": "short"
|
||||
},
|
||||
{
|
||||
"title": "Main Content",
|
||||
"key_points": ["Core Information"],
|
||||
"subsections": ["Key Findings", "Analysis"],
|
||||
"importance": "high",
|
||||
"estimated_length": "long"
|
||||
},
|
||||
{
|
||||
"title": "Conclusion",
|
||||
"key_points": ["Summary", "Next Steps"],
|
||||
"subsections": [],
|
||||
"importance": "medium",
|
||||
"estimated_length": "short"
|
||||
}
|
||||
],
|
||||
"key_topics": ["General Information"],
|
||||
"tone": "formal",
|
||||
"recommended_format": "markdown",
|
||||
"formatting_requirements": ["Clear headings", "Professional formatting"],
|
||||
"executive_summary": "A comprehensive documentation covering the requested topics.",
|
||||
"feedback": "Created documentation based on your requirements."
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in document generation: {str(e)}")
|
||||
return f"# {title}\n\nError in document generation: {str(e)}"
|
||||
logger.warning(f"Error creating documentation plan: {str(e)}")
|
||||
return {
|
||||
"title": "Documentation",
|
||||
"document_type": "report",
|
||||
"audience": "general",
|
||||
"detailed_structure": [
|
||||
{
|
||||
"title": "Introduction",
|
||||
"key_points": ["Purpose", "Scope"],
|
||||
"subsections": [],
|
||||
"importance": "high",
|
||||
"estimated_length": "short"
|
||||
},
|
||||
{
|
||||
"title": "Main Content",
|
||||
"key_points": ["Core Information"],
|
||||
"subsections": ["Key Findings", "Analysis"],
|
||||
"importance": "high",
|
||||
"estimated_length": "long"
|
||||
},
|
||||
{
|
||||
"title": "Conclusion",
|
||||
"key_points": ["Summary", "Next Steps"],
|
||||
"subsections": [],
|
||||
"importance": "medium",
|
||||
"estimated_length": "short"
|
||||
}
|
||||
],
|
||||
"key_topics": ["General Information"],
|
||||
"tone": "formal",
|
||||
"recommended_format": "markdown",
|
||||
"formatting_requirements": ["Clear headings", "Professional formatting"],
|
||||
"executive_summary": "A comprehensive documentation covering the requested topics.",
|
||||
"feedback": "Created documentation based on your requirements."
|
||||
}
|
||||
|
||||
async def _generate_simple_document(self, prompt: str, context: str, document_type: str,
|
||||
title: str, output_label: str, output_description: str,
|
||||
format_type: str) -> str:
|
||||
async def _create_document_multi_step(self, prompt: str, context: str, output_label: str,
|
||||
output_description: str, documentation_plan: Dict) -> Dict:
|
||||
"""
|
||||
Generate a simple document without complex structure.
|
||||
Create a document using a multi-step approach with separate AI calls for each section.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
prompt: Original task prompt
|
||||
context: Document context
|
||||
document_type: Document type
|
||||
title: Document title
|
||||
output_label: Output filename
|
||||
output_description: Description of desired output
|
||||
format_type: Output format
|
||||
documentation_plan: Documentation plan from AI
|
||||
|
||||
Returns:
|
||||
Generated document content
|
||||
Document object
|
||||
"""
|
||||
if not self.ai_service:
|
||||
return f"# {title}\n\nDocument generation not possible: AI service not available."
|
||||
# Determine format from filename
|
||||
format_type = output_label.split('.')[-1].lower() if '.' in output_label else "md"
|
||||
|
||||
generation_prompt = f"""
|
||||
Create a precise, focused {document_type} with the title "{title}" based on:
|
||||
# Map format to content_type
|
||||
content_type_map = {
|
||||
"md": "text/markdown",
|
||||
"markdown": "text/markdown",
|
||||
"html": "text/html",
|
||||
"txt": "text/plain",
|
||||
"text": "text/plain",
|
||||
"json": "application/json",
|
||||
"csv": "text/csv"
|
||||
}
|
||||
|
||||
TASK:
|
||||
{prompt}
|
||||
content_type = content_type_map.get(format_type, "text/plain")
|
||||
|
||||
CONTEXT:
|
||||
{context if context else 'No additional context available.'}
|
||||
# Get document information
|
||||
title = documentation_plan.get("title", "Documentation")
|
||||
document_type = documentation_plan.get("document_type", "document")
|
||||
audience = documentation_plan.get("audience", "general")
|
||||
tone = documentation_plan.get("tone", "formal")
|
||||
key_topics = documentation_plan.get("key_topics", [])
|
||||
formatting_requirements = documentation_plan.get("formatting_requirements", [])
|
||||
|
||||
OUTPUT REQUIREMENTS:
|
||||
- Filename: {output_label}
|
||||
- Description: {output_description}
|
||||
- Format: {format_type}
|
||||
|
||||
The document should be clear, precise, and to the point, without a complex chapter structure.
|
||||
Format it according to the output format ({format_type}).
|
||||
|
||||
The document must perfectly match the {format_type} format.
|
||||
"""
|
||||
# Get the detailed structure
|
||||
detailed_structure = documentation_plan.get("detailed_structure", [])
|
||||
if not detailed_structure:
|
||||
# Fallback structure if none provided
|
||||
detailed_structure = [
|
||||
{
|
||||
"title": "Introduction",
|
||||
"key_points": ["Purpose", "Scope"],
|
||||
"importance": "high"
|
||||
},
|
||||
{
|
||||
"title": "Main Content",
|
||||
"key_points": ["Core Information"],
|
||||
"importance": "high"
|
||||
},
|
||||
{
|
||||
"title": "Conclusion",
|
||||
"key_points": ["Summary", "Next Steps"],
|
||||
"importance": "medium"
|
||||
}
|
||||
]
|
||||
|
||||
try:
|
||||
content = await self.ai_service.call_api([
|
||||
{"role": "system", "content": f"You create precise, focused documentation in {format_type} format."},
|
||||
{"role": "user", "content": generation_prompt}
|
||||
# Step 1: Generate document introduction
|
||||
intro_prompt = f"""
|
||||
Create the introduction for a {document_type} titled "{title}".
|
||||
|
||||
DOCUMENT OVERVIEW:
|
||||
- Type: {document_type}
|
||||
- Audience: {audience}
|
||||
- Tone: {tone}
|
||||
- Key Topics: {', '.join(key_topics)}
|
||||
- Format: {format_type}
|
||||
|
||||
TASK CONTEXT: {prompt}
|
||||
|
||||
This introduction should:
|
||||
1. Clearly state the purpose and scope of the document
|
||||
2. Provide context and background information
|
||||
3. Outline what the reader will find in the document
|
||||
4. Set the appropriate tone for the {audience} audience
|
||||
|
||||
The introduction should be professional and engaging, formatted according to {format_type} standards.
|
||||
"""
|
||||
|
||||
introduction = await self.ai_service.call_api([
|
||||
{"role": "system", "content": f"You are a documentation expert creating an introduction in {format_type} format."},
|
||||
{"role": "user", "content": intro_prompt}
|
||||
])
|
||||
|
||||
# For markdown format, ensure the title is at the beginning
|
||||
if format_type == "markdown" and not content.strip().startswith("# "):
|
||||
content = f"# {title}\n\n{content}"
|
||||
# Step 2: Generate executive summary (if applicable)
|
||||
if document_type in ["report", "whitepaper", "case study"]:
|
||||
summary_prompt = f"""
|
||||
Create an executive summary for a {document_type} titled "{title}".
|
||||
|
||||
DOCUMENT OVERVIEW:
|
||||
- Type: {document_type}
|
||||
- Audience: {audience}
|
||||
- Key Topics: {', '.join(key_topics)}
|
||||
|
||||
TASK CONTEXT: {prompt}
|
||||
|
||||
This executive summary should:
|
||||
1. Provide a concise overview of the entire document
|
||||
2. Highlight key findings, recommendations, or conclusions
|
||||
3. Be suitable for executives or busy readers who may only read this section
|
||||
4. Be professionally formatted according to {format_type} standards
|
||||
|
||||
Keep the summary focused and impactful, approximately 200-300 words.
|
||||
"""
|
||||
|
||||
executive_summary = await self.ai_service.call_api([
|
||||
{"role": "system", "content": f"You are a documentation expert creating an executive summary in {format_type} format."},
|
||||
{"role": "user", "content": summary_prompt}
|
||||
])
|
||||
else:
|
||||
executive_summary = ""
|
||||
|
||||
return content
|
||||
except Exception as e:
|
||||
logger.error(f"Error in document generation: {str(e)}")
|
||||
return f"# {title}\n\nError in document generation: {str(e)}"
|
||||
|
||||
async def _generate_default_document(self, prompt: str, context: str, document_type: str, title: str) -> str:
|
||||
"""
|
||||
Generate a default markdown document when no specific output specifications are present.
|
||||
|
||||
Args:
|
||||
prompt: Task description
|
||||
context: Document context
|
||||
document_type: Document type
|
||||
title: Document title
|
||||
# Step 3: Generate each section
|
||||
sections = []
|
||||
|
||||
Returns:
|
||||
Generated document content
|
||||
"""
|
||||
if not self.ai_service:
|
||||
return f"# {title}\n\nDocument generation not possible: AI service not available."
|
||||
|
||||
generation_prompt = f"""
|
||||
Create a structured {document_type} with the title "{title}" based on:
|
||||
|
||||
TASK:
|
||||
{prompt}
|
||||
|
||||
CONTEXT:
|
||||
{context if context else 'No additional context available.'}
|
||||
|
||||
Format the document with markdown syntax and create a clear, professional structure.
|
||||
"""
|
||||
|
||||
try:
|
||||
content = await self.ai_service.call_api([
|
||||
{"role": "system", "content": "You create structured documentation in markdown format."},
|
||||
{"role": "user", "content": generation_prompt}
|
||||
for section in detailed_structure:
|
||||
section_title = section.get("title", "Section")
|
||||
key_points = section.get("key_points", [])
|
||||
subsections = section.get("subsections", [])
|
||||
importance = section.get("importance", "medium")
|
||||
|
||||
# Adjust depth based on importance
|
||||
detail_level = "high" if importance == "high" else "medium"
|
||||
|
||||
section_prompt = f"""
|
||||
Create the "{section_title}" section for a {document_type} titled "{title}".
|
||||
|
||||
SECTION DETAILS:
|
||||
- Title: {section_title}
|
||||
- Key Points to Cover: {', '.join(key_points)}
|
||||
- Subsections: {', '.join(subsections)}
|
||||
- Detail Level: {detail_level}
|
||||
|
||||
DOCUMENT CONTEXT:
|
||||
- Type: {document_type}
|
||||
- Audience: {audience}
|
||||
- Tone: {tone}
|
||||
- Format: {format_type}
|
||||
|
||||
TASK CONTEXT: {prompt}
|
||||
|
||||
AVAILABLE INFORMATION:
|
||||
{context[:500]}... (truncated)
|
||||
|
||||
This section should:
|
||||
1. Be comprehensive and well-structured
|
||||
2. Cover all the key points listed
|
||||
3. Include the specified subsections with appropriate headings
|
||||
4. Maintain a {tone} tone suitable for the {audience} audience
|
||||
5. Be properly formatted according to {format_type} standards
|
||||
6. Include specific examples, data, or evidence where appropriate
|
||||
|
||||
Be thorough in your coverage of this section, providing substantive content.
|
||||
"""
|
||||
|
||||
section_content = await self.ai_service.call_api([
|
||||
{"role": "system", "content": f"You are a documentation expert creating detailed content for the {section_title} section."},
|
||||
{"role": "user", "content": section_prompt}
|
||||
])
|
||||
|
||||
sections.append(section_content)
|
||||
|
||||
# Step 4: Generate conclusion
|
||||
conclusion_prompt = f"""
|
||||
Create the conclusion for a {document_type} titled "{title}".
|
||||
|
||||
DOCUMENT OVERVIEW:
|
||||
- Type: {document_type}
|
||||
- Audience: {audience}
|
||||
- Key Topics: {', '.join(key_topics)}
|
||||
|
||||
TASK CONTEXT: {prompt}
|
||||
|
||||
This conclusion should:
|
||||
1. Summarize the key points covered in the document
|
||||
2. Provide closure to the topics discussed
|
||||
3. Include any relevant recommendations or next steps
|
||||
4. Leave the reader with a clear understanding of the document's significance
|
||||
|
||||
The conclusion should be professional and impactful, formatted according to {format_type} standards.
|
||||
"""
|
||||
|
||||
conclusion = await self.ai_service.call_api([
|
||||
{"role": "system", "content": f"You are a documentation expert creating a conclusion in {format_type} format."},
|
||||
{"role": "user", "content": conclusion_prompt}
|
||||
])
|
||||
|
||||
# Ensure the title is at the beginning
|
||||
if not content.strip().startswith("# "):
|
||||
content = f"# {title}\n\n{content}"
|
||||
# Step 5: Assemble the complete document
|
||||
if format_type in ["md", "markdown"]:
|
||||
# Markdown format
|
||||
document_content = f"# {title}\n\n"
|
||||
|
||||
if executive_summary:
|
||||
document_content += f"## Executive Summary\n\n{executive_summary}\n\n"
|
||||
|
||||
document_content += f"{introduction}\n\n"
|
||||
|
||||
for i, section_content in enumerate(sections):
|
||||
# Ensure section starts with heading if not already
|
||||
section_title = detailed_structure[i].get("title", f"Section {i+1}")
|
||||
if not section_content.strip().startswith("#"):
|
||||
document_content += f"## {section_title}\n\n"
|
||||
document_content += f"{section_content}\n\n"
|
||||
|
||||
document_content += f"## Conclusion\n\n{conclusion}\n"
|
||||
|
||||
elif format_type == "html":
|
||||
# HTML format
|
||||
document_content = f"<html>\n<head>\n<title>{title}</title>\n</head>\n<body>\n"
|
||||
document_content += f"<h1>{title}</h1>\n\n"
|
||||
|
||||
if executive_summary:
|
||||
document_content += f"<h2>Executive Summary</h2>\n<div>{executive_summary}</div>\n\n"
|
||||
|
||||
document_content += f"<div>{introduction}</div>\n\n"
|
||||
|
||||
for i, section_content in enumerate(sections):
|
||||
section_title = detailed_structure[i].get("title", f"Section {i+1}")
|
||||
document_content += f"<h2>{section_title}</h2>\n<div>{section_content}</div>\n\n"
|
||||
|
||||
document_content += f"<h2>Conclusion</h2>\n<div>{conclusion}</div>\n"
|
||||
document_content += "</body>\n</html>"
|
||||
|
||||
else:
|
||||
# Plain text format
|
||||
document_content = f"{title}\n{'=' * len(title)}\n\n"
|
||||
|
||||
if executive_summary:
|
||||
document_content += f"EXECUTIVE SUMMARY\n{'-' * 17}\n\n{executive_summary}\n\n"
|
||||
|
||||
document_content += f"{introduction}\n\n"
|
||||
|
||||
for i, section_content in enumerate(sections):
|
||||
section_title = detailed_structure[i].get("title", f"Section {i+1}")
|
||||
document_content += f"{section_title}\n{'-' * len(section_title)}\n\n{section_content}\n\n"
|
||||
|
||||
document_content += f"CONCLUSION\n{'-' * 10}\n\n{conclusion}\n"
|
||||
|
||||
# Create document object
|
||||
return {
|
||||
"label": output_label,
|
||||
"content": document_content,
|
||||
"metadata": {
|
||||
"content_type": content_type
|
||||
}
|
||||
}
|
||||
|
||||
return content
|
||||
except Exception as e:
|
||||
logger.error(f"Error in document generation: {str(e)}")
|
||||
return f"# {title}\n\nError in document generation: {str(e)}"
|
||||
logger.error(f"Error creating document: {str(e)}", exc_info=True)
|
||||
|
||||
# Create a simple error document
|
||||
if format_type in ["md", "markdown"]:
|
||||
content = f"# Error in Documentation\n\nThere was an error generating the documentation: {str(e)}"
|
||||
elif format_type == "html":
|
||||
content = f"<html><body><h1>Error in Documentation</h1><p>There was an error generating the documentation: {str(e)}</p></body></html>"
|
||||
else:
|
||||
content = f"Error in Documentation\n\nThere was an error generating the documentation: {str(e)}"
|
||||
|
||||
return {
|
||||
"label": output_label,
|
||||
"content": content,
|
||||
"metadata": {
|
||||
"content_type": content_type
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Factory function for the Documentation agent
|
||||
def get_documentation_agent():
|
||||
"""
|
||||
Factory function that returns an instance of the Documentation agent.
|
||||
|
||||
Returns:
|
||||
An instance of the Documentation agent
|
||||
"""
|
||||
"""Returns an instance of the Documentation agent."""
|
||||
return AgentDocumentation()
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -25,6 +25,7 @@ streamline self.log_add --> to use in a standardized format and to reduce messag
|
|||
|
||||
add connector to myoutlook
|
||||
|
||||
todo an agent for "code writing and editing" connected to the codebase, working in loops over each document...
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue