diff --git a/app.py b/app.py index a25bd63d..d3c7f831 100644 --- a/app.py +++ b/app.py @@ -14,7 +14,7 @@ from logging.handlers import RotatingFileHandler from datetime import timedelta import pathlib -from modules.utility import APP_CONFIG +from modules.configuration import APP_CONFIG from modules.gateway_interface import get_gateway_interface diff --git a/connectors/connector_aichat_anthropic.py b/connectors/connector_aichat_anthropic.py index 4f1d04d2..de150895 100644 --- a/connectors/connector_aichat_anthropic.py +++ b/connectors/connector_aichat_anthropic.py @@ -2,7 +2,7 @@ import logging import httpx from typing import Dict, Any, List, Optional, Union from fastapi import HTTPException -from modules.utility import APP_CONFIG +from modules.configuration import APP_CONFIG # Logger konfigurieren logger = logging.getLogger(__name__) @@ -199,7 +199,7 @@ class ChatService: # Unterscheide zwischen Dateipfad und Binärdaten if isinstance(image_data, str): # Es ist ein Dateipfad - importiere filehandling nur bei Bedarf - from gateway.gwserver.modules import agentservice_filemanager as file_handler + from modules import agentservice_filemanager as file_handler base64_data, auto_mime_type = file_handler.encode_file_to_base64(image_data) mime_type = mime_type or auto_mime_type else: diff --git a/connectors/connector_aichat_openai.py b/connectors/connector_aichat_openai.py index 2b5c7f81..a83aae25 100644 --- a/connectors/connector_aichat_openai.py +++ b/connectors/connector_aichat_openai.py @@ -2,7 +2,7 @@ import logging import httpx from typing import Dict, Any, List, Optional, Union from fastapi import HTTPException -from modules.utility import APP_CONFIG +from modules.configuration import APP_CONFIG # Logger konfigurieren logger = logging.getLogger(__name__) @@ -108,7 +108,7 @@ class ChatService: # Unterscheide zwischen Dateipfad und Binärdaten if isinstance(image_data, str): # Es ist ein Dateipfad - importiere filehandling nur bei Bedarf - from gateway.gwserver.modules import agentservice_filemanager as file_handler + from modules import agentservice_filemanager as file_handler base64_data, auto_mime_type = file_handler.encode_file_to_base64(image_data) mime_type = mime_type or auto_mime_type else: diff --git a/connectors/connector_db_json.py b/connectors/connector_db_json.py index de1ba2c2..5371c6be 100644 --- a/connectors/connector_db_json.py +++ b/connectors/connector_db_json.py @@ -375,7 +375,7 @@ class DatabaseConnector: # Wenn die Tabelle leer ist und eine System-ID registriert werden soll if not data: self.register_initial_id(table, record_data["id"]) - logger.info(f"Initiale ID {record_data['id']} für Tabelle {table} registriert") + logger.info(f"Initiale ID {record_data['id']} für Tabelle {table} wurde registriert") # Füge den neuen Datensatz hinzu data.append(record_data) @@ -462,6 +462,7 @@ class DatabaseConnector: # Datensatz nicht gefunden raise ValueError(f"Datensatz mit ID {record_id} nicht gefunden in Tabelle {table}") + # System-Tabellen-Funktionen def register_initial_id(self, table: str, initial_id: int) -> bool: diff --git a/connectors/connector_db_mysql.py b/connectors/connector_db_mysql.py deleted file mode 100644 index f2e93cbe..00000000 --- a/connectors/connector_db_mysql.py +++ /dev/null @@ -1,675 +0,0 @@ -import os -import logging -from typing import List, Dict, Any, Optional, Union -from datetime import datetime -import mysql.connector -from mysql.connector import Error - - -logger = logging.getLogger(__name__) - - -class DatabaseConnector: - """ - Ein Konnektor für MySQL-basierte Datenspeicherung. - Stellt generische Datenbankoperationen bereit. - """ - - def __init__(self, db_host: str, db_database: str, db_user: str, db_password: str, mandate_id: int = None, user_id: int = None): - """ - Initialisiert den MySQL-Datenbankkonnektor. - - Args: - db_host: MySQL-Server Host - db_database: Name der Datenbank - db_user: Benutzername für die Authentifizierung - db_password: Passwort für die Authentifizierung - mandate_id: Kontext-Parameter für den Mandanten - user_id: Kontext-Parameter für den Benutzer - """ - # Speichere die Eingabeparameter - self.db_host = db_host - self.db_database = db_database - self.db_user = db_user - self.db_password = db_password - - # Prüfe, ob Kontext-Parameter gesetzt sind - if mandate_id is None or user_id is None: - raise ValueError("mandate_id und user_id müssen gesetzt sein") - - # Stelle Verbindung zur Datenbank her - self.connection = self._create_connection() - - # System-Tabelle initialisieren - self._system_table_name = "_system" - self._initialize_system_table() - - # Temporär mandate_id und user_id speichern - self._mandate_id = mandate_id - self._user_id = user_id - - # Wenn mandate_id oder user_id 0 sind, versuche die initialen IDs zu verwenden - if mandate_id == 0: - initial_mandate_id = self.get_initial_id("mandates") - if initial_mandate_id is not None: - self._mandate_id = initial_mandate_id - logger.info(f"Verwende initiale mandate_id: {initial_mandate_id} statt 0") - - if user_id == 0: - initial_user_id = self.get_initial_id("users") - if initial_user_id is not None: - self._user_id = initial_user_id - logger.info(f"Verwende initiale user_id: {initial_user_id} statt 0") - - # Setze die effektiven IDs als Eigenschaften - self.mandate_id = self._mandate_id - self.user_id = self._user_id - - logger.info(f"DatabaseConnector initialisiert für Datenbank: {db_database}") - logger.info(f"Kontext: mandate_id={self.mandate_id}, user_id={self.user_id}") - - def _create_connection(self): - """Erstellt eine Verbindung zur MySQL-Datenbank""" - try: - connection = mysql.connector.connect( - host=self.db_host, - database=self.db_database, - user=self.db_user, - password=self.db_password - ) - if connection.is_connected(): - logger.info(f"Verbunden mit MySQL-Server Version {connection.get_server_info()}") - return connection - except Error as e: - logger.error(f"Fehler bei der Verbindung zu MySQL: {e}") - raise - - def _initialize_system_table(self): - """Initialisiert die System-Tabelle, falls sie noch nicht existiert.""" - cursor = None - try: - cursor = self.connection.cursor() - - # Prüfe, ob die System-Tabelle existiert - cursor.execute(f""" - SELECT COUNT(*) - FROM information_schema.tables - WHERE table_schema = '{self.db_database}' - AND table_name = '{self._system_table_name}' - """) - - if cursor.fetchone()[0] == 0: - # Erstelle die System-Tabelle - cursor.execute(f""" - CREATE TABLE {self._system_table_name} ( - table_name VARCHAR(255) PRIMARY KEY, - initial_id INT NOT NULL - ) - """) - self.connection.commit() - logger.info(f"System-Tabelle '{self._system_table_name}' erstellt") - except Error as e: - logger.error(f"Fehler beim Initialisieren der System-Tabelle: {e}") - if self.connection.is_connected(): - self.connection.rollback() - raise - finally: - if cursor and cursor.is_connected(): - cursor.close() - - def _execute_query(self, query: str, params: tuple = None): - """Führt eine SQL-Abfrage aus""" - cursor = None - try: - cursor = self.connection.cursor(dictionary=True) - cursor.execute(query, params) - return cursor - except Error as e: - logger.error(f"Fehler bei der Ausführung der Abfrage: {e}") - raise - finally: - if cursor: - cursor.close() - - def _execute_select(self, query: str, params: tuple = None) -> List[Dict[str, Any]]: - """Führt eine SELECT-Abfrage aus und gibt die Ergebnisse zurück""" - cursor = None - try: - cursor = self.connection.cursor(dictionary=True) - cursor.execute(query, params) - result = cursor.fetchall() - return result - except Error as e: - logger.error(f"Fehler bei der Ausführung der SELECT-Abfrage: {e}") - raise - finally: - if cursor: - cursor.close() - - def _execute_insert(self, query: str, params: tuple = None) -> int: - """Führt eine INSERT-Abfrage aus und gibt die ID des eingefügten Datensatzes zurück""" - cursor = None - try: - cursor = self.connection.cursor() - cursor.execute(query, params) - self.connection.commit() - return cursor.lastrowid - except Error as e: - logger.error(f"Fehler bei der Ausführung der INSERT-Abfrage: {e}") - self.connection.rollback() - raise - finally: - if cursor: - cursor.close() - - def _execute_update(self, query: str, params: tuple = None) -> int: - """Führt eine UPDATE-Abfrage aus und gibt die Anzahl der betroffenen Zeilen zurück""" - cursor = None - try: - cursor = self.connection.cursor() - cursor.execute(query, params) - self.connection.commit() - return cursor.rowcount - except Error as e: - logger.error(f"Fehler bei der Ausführung der UPDATE-Abfrage: {e}") - self.connection.rollback() - raise - finally: - if cursor: - cursor.close() - - def _execute_delete(self, query: str, params: tuple = None) -> int: - """Führt eine DELETE-Abfrage aus und gibt die Anzahl der gelöschten Zeilen zurück""" - cursor = None - try: - cursor = self.connection.cursor() - cursor.execute(query, params) - self.connection.commit() - return cursor.rowcount - except Error as e: - logger.error(f"Fehler bei der Ausführung der DELETE-Abfrage: {e}") - self.connection.rollback() - raise - finally: - if cursor: - cursor.close() - - def _apply_record_filter(self, record_filter: Dict[str, Any] = None) -> str: - """Erstellt eine WHERE-Klausel basierend auf dem Datensatzfilter""" - if not record_filter: - return "WHERE 1=1" - - conditions = [] - params = [] - - for field, value in record_filter.items(): - conditions.append(f"{field} = %s") - params.append(value) - - where_clause = "WHERE " + " AND ".join(conditions) - - return where_clause, tuple(params) - - def _get_context_filter(self) -> tuple: - """Erstellt eine WHERE-Klausel für den Mandanten- und Benutzerkontext""" - return "WHERE mandate_id = %s", (self.mandate_id,) - - # Public API - - def get_tables(self, filter_criteria: Dict[str, Any] = None) -> List[str]: - """ - Gibt eine Liste aller verfügbaren Tabellen zurück. - - Args: - filter_criteria: Optionale Filterkriterien (nicht implementiert) - - Returns: - Liste der Tabellennamen - """ - query = """ - SELECT table_name - FROM information_schema.tables - WHERE table_schema = %s - AND table_name NOT LIKE '\_%' - """ - - try: - result = self._execute_select(query, (self.db_database,)) - return [row["table_name"] for row in result] - except Exception as e: - logger.error(f"Fehler beim Abrufen der Tabellen: {e}") - return [] - - def get_fields(self, table: str, filter_criteria: Dict[str, Any] = None) -> List[str]: - """ - Gibt eine Liste aller Felder einer Tabelle zurück. - - Args: - table: Name der Tabelle - filter_criteria: Optionale Filterkriterien (nicht implementiert) - - Returns: - Liste der Feldnamen - """ - query = """ - SELECT column_name - FROM information_schema.columns - WHERE table_schema = %s AND table_name = %s - """ - - try: - result = self._execute_select(query, (self.db_database, table)) - return [row["column_name"] for row in result] - except Exception as e: - logger.error(f"Fehler beim Abrufen der Felder für Tabelle {table}: {e}") - return [] - - def get_schema(self, table: str, language: str = None, filter_criteria: Dict[str, Any] = None) -> Dict[str, Dict[str, Any]]: - """ - Gibt ein Schema-Objekt für eine Tabelle zurück mit Datentypen und Labels. - - Args: - table: Name der Tabelle - language: Sprache für die Labels (optional) - filter_criteria: Optionale Filterkriterien (nicht implementiert) - - Returns: - Schema-Objekt mit Feldern, Datentypen und Labels - """ - query = """ - SELECT - column_name, - data_type, - column_comment - FROM - information_schema.columns - WHERE - table_schema = %s AND table_name = %s - """ - - schema = {} - - try: - result = self._execute_select(query, (self.db_database, table)) - - for row in result: - field = row["column_name"] - data_type = row["data_type"] - comment = row["column_comment"] - - # Label erstellen (Standardwert ist der Feldname) - label = field - - # Wenn ein Kommentar existiert, verwende diesen als Label - if comment: - label = comment - - schema[field] = { - "type": data_type, - "label": label - } - - return schema - except Exception as e: - logger.error(f"Fehler beim Abrufen des Schemas für Tabelle {table}: {e}") - return {} - - def get_recordset(self, table: str, field_filter: List[str] = None, record_filter: Dict[str, Any] = None) -> List[Dict[str, Any]]: - """ - Gibt eine Liste von Datensätzen aus einer Tabelle zurück, gefiltert nach Kriterien. - - Args: - table: Name der Tabelle - field_filter: Filter für Felder (welche Felder zurückgegeben werden sollen) - record_filter: Filter für Datensätze (welche Datensätze zurückgegeben werden sollen) - - Returns: - Liste der gefilterten Datensätze - """ - # Bestimme die Felder für die Abfrage - fields = "*" - if field_filter and isinstance(field_filter, list): - fields = ", ".join(field_filter) - - # Basisbedingung ist der Mandantenkontext - base_where, base_params = self._get_context_filter() - - # Wende zusätzliche Filterbedingungen an, wenn vorhanden - additional_where = "" - additional_params = () - - if record_filter: - additional_where, additional_params = self._apply_record_filter(record_filter) - # Entferne das "WHERE" am Anfang und ersetze es durch "AND" - additional_where = " AND " + additional_where[6:] - - # Kombiniere die Bedingungen und Parameter - where_clause = base_where + additional_where - params = base_params + additional_params - - # Erstelle die vollständige Abfrage - query = f""" - SELECT {fields} FROM {table} {where_clause} - """ - - try: - return self._execute_select(query, params) - except Exception as e: - logger.error(f"Fehler beim Abrufen der Datensätze aus Tabelle {table}: {e}") - return [] - - def record_create(self, table: str, record_data: Dict[str, Any]) -> Dict[str, Any]: - """ - Erstellt einen neuen Datensatz in der Tabelle. - - Args: - table: Name der Tabelle - record_data: Daten für den neuen Datensatz - - Returns: - Der erstellte Datensatz - """ - # Füge mandate_id und user_id hinzu, falls nicht vorhanden oder 0 - if "mandate_id" not in record_data or record_data["mandate_id"] == 0: - record_data["mandate_id"] = self.mandate_id - - if "user_id" not in record_data or record_data["user_id"] == 0: - record_data["user_id"] = self.user_id - - # Erstelle die Abfrage - fields = ", ".join(record_data.keys()) - placeholders = ", ".join(["%s"] * len(record_data)) - values = tuple(record_data.values()) - - query = f""" - INSERT INTO {table} ({fields}) - VALUES ({placeholders}) - """ - - try: - # Prüfe zuerst, ob die Tabelle leer ist - check_query = f""" - SELECT COUNT(*) as count FROM {table} - """ - count_result = self._execute_select(check_query) - is_empty = count_result[0]["count"] == 0 - - # Führe die Abfrage aus und erhalte die ID des neuen Datensatzes - new_id = self._execute_insert(query, values) - - # Wenn die Tabelle vorher leer war, registriere die neue ID als initiale ID - if is_empty and new_id: - self.register_initial_id(table, new_id) - logger.info(f"Initiale ID {new_id} für Tabelle {table} registriert") - - # Füge die ID zum Datensatz hinzu, falls eine zurückgegeben wurde - if new_id: - record_data["id"] = new_id - - return record_data - except Exception as e: - logger.error(f"Fehler beim Erstellen des Datensatzes in Tabelle {table}: {e}") - raise ValueError(f"Fehler beim Erstellen des Datensatzes in Tabelle {table}") - - def record_delete(self, table: str, record_id: Union[str, int]) -> bool: - """ - Löscht einen Datensatz aus der Tabelle. - - Args: - table: Name der Tabelle - record_id: ID des zu löschenden Datensatzes - - Returns: - True bei Erfolg, False bei Fehler - """ - # Prüfe, ob es sich um die initiale ID handelt - initial_id = self.get_initial_id(table) - if initial_id is not None and initial_id == record_id: - logger.warning(f"Versuch, den initialen Datensatz mit ID {record_id} aus Tabelle {table} zu löschen, wurde verhindert") - return False - - # Prüfe zuerst, ob der Datensatz zum aktuellen Mandanten gehört - check_query = f""" - SELECT mandate_id FROM {table} WHERE id = %s - """ - - try: - result = self._execute_select(check_query, (record_id,)) - - if not result: - # Datensatz nicht gefunden - return False - - if result[0]["mandate_id"] != self.mandate_id: - raise ValueError("Not your mandate") - - # Lösche den Datensatz - delete_query = f""" - DELETE FROM {table} WHERE id = %s AND mandate_id = %s - """ - - rows_affected = self._execute_delete(delete_query, (record_id, self.mandate_id)) - - return rows_affected > 0 - except Exception as e: - logger.error(f"Fehler beim Löschen des Datensatzes aus Tabelle {table}: {e}") - return False - - def record_modify(self, table: str, record_id: Union[str, int], record_data: Dict[str, Any]) -> Dict[str, Any]: - """ - Ändert einen Datensatz in der Tabelle. - - Args: - table: Name der Tabelle - record_id: ID des zu ändernden Datensatzes - record_data: Neue Daten für den Datensatz - - Returns: - Der aktualisierte Datensatz - """ - # Prüfe, ob es sich um die initiale ID handelt und die ID geändert werden soll - initial_id = self.get_initial_id(table) - if initial_id is not None and initial_id == record_id and "id" in record_data and record_data["id"] != record_id: - raise ValueError(f"Die ID des initialen Datensatzes in Tabelle {table} kann nicht geändert werden") - - # Prüfe zuerst, ob der Datensatz zum aktuellen Mandanten gehört - check_query = f""" - SELECT mandate_id FROM {table} WHERE id = %s - """ - - try: - result = self._execute_select(check_query, (record_id,)) - - if not result: - # Datensatz nicht gefunden - raise ValueError(f"Datensatz mit ID {record_id} nicht gefunden in Tabelle {table}") - - if result[0]["mandate_id"] != self.mandate_id: - raise ValueError("Not your mandate") - - # Erstelle die SET-Klausel und Parameter für das Update - set_clauses = [] - values = [] - - for key, value in record_data.items(): - set_clauses.append(f"{key} = %s") - values.append(value) - - set_clause = ", ".join(set_clauses) - values.append(record_id) # Für die WHERE-Bedingung - values.append(self.mandate_id) # Für die mandate_id-Bedingung - - # Aktualisiere den Datensatz - update_query = f""" - UPDATE {table} - SET {set_clause} - WHERE id = %s AND mandate_id = %s - """ - - rows_affected = self._execute_update(update_query, tuple(values)) - - if rows_affected > 0: - # Lade den aktualisierten Datensatz - get_query = f""" - SELECT * FROM {table} WHERE id = %s - """ - - updated_record = self._execute_select(get_query, (record_id,)) - - if updated_record: - return updated_record[0] - else: - raise ValueError(f"Fehler beim Abrufen des aktualisierten Datensatzes aus Tabelle {table}") - else: - raise ValueError(f"Fehler beim Aktualisieren des Datensatzes in Tabelle {table}") - except Exception as e: - logger.error(f"Fehler beim Aktualisieren des Datensatzes in Tabelle {table}: {e}") - raise - - # System-Tabellen-Funktionen - - def register_initial_id(self, table: str, initial_id: int) -> bool: - """ - Registriert die initiale ID für eine Tabelle. - - Args: - table: Name der Tabelle - initial_id: Die initiale ID - - Returns: - True bei Erfolg, False bei Fehler - """ - try: - # Prüfe zuerst, ob bereits eine initiale ID für diese Tabelle registriert ist - check_query = f""" - SELECT COUNT(*) as count - FROM {self._system_table_name} - WHERE table_name = %s - """ - - result = self._execute_select(check_query, (table,)) - - if result and result[0]["count"] > 0: - # Bereits registriert - return True - - # Registriere die initiale ID - insert_query = f""" - INSERT INTO {self._system_table_name} (table_name, initial_id) - VALUES (%s, %s) - """ - - self._execute_insert(insert_query, (table, initial_id)) - logger.info(f"Initiale ID {initial_id} für Tabelle {table} registriert") - - return True - except Exception as e: - logger.error(f"Fehler beim Registrieren der initialen ID für Tabelle {table}: {e}") - return False - - def get_initial_id(self, table: str) -> Optional[int]: - """ - Gibt die initiale ID für eine Tabelle zurück. - - Args: - table: Name der Tabelle - - Returns: - Die initiale ID oder None, wenn nicht vorhanden - """ - try: - query = f""" - SELECT initial_id - FROM {self._system_table_name} - WHERE table_name = %s - """ - - result = self._execute_select(query, (table,)) - - if result and len(result) > 0: - logger.info(f"Gefundene initiale ID für Tabelle {table}: {result[0]['initial_id']}") - return result[0]["initial_id"] - - # Wenn keine initiale ID gefunden wurde, versuche den ersten Datensatz zu verwenden - if table and not table.startswith("_"): - try: - query = f""" - SELECT id - FROM {table} - ORDER BY id - LIMIT 1 - """ - - first_record = self._execute_select(query) - - if first_record and len(first_record) > 0 and "id" in first_record[0]: - first_id = first_record[0]["id"] - # Registriere diese ID als initiale ID - self.register_initial_id(table, first_id) - logger.info(f"Automatisch erkannte initiale ID {first_id} für Tabelle {table}") - return first_id - except Exception as inner_e: - logger.warning(f"Konnte keinen ersten Datensatz in Tabelle {table} finden: {inner_e}") - - logger.debug(f"Keine initiale ID für Tabelle {table} gefunden") - return None - except Exception as e: - logger.error(f"Fehler beim Abrufen der initialen ID für Tabelle {table}: {e}") - return None - - def has_initial_id(self, table: str) -> bool: - """ - Prüft, ob eine initiale ID für eine Tabelle registriert ist. - - Args: - table: Name der Tabelle - - Returns: - True, wenn eine initiale ID registriert ist, sonst False - """ - try: - query = f""" - SELECT COUNT(*) as count - FROM {self._system_table_name} - WHERE table_name = %s - """ - - result = self._execute_select(query, (table,)) - - if result and len(result) > 0: - return result[0]["count"] > 0 - - return False - except Exception as e: - logger.error(f"Fehler beim Prüfen der initialen ID für Tabelle {table}: {e}") - return False - - def get_all_initial_ids(self) -> Dict[str, int]: - """ - Gibt alle registrierten initialen IDs zurück. - - Returns: - Dictionary mit Tabellennamen als Schlüssel und initialen IDs als Werte - """ - try: - query = f""" - SELECT table_name, initial_id - FROM {self._system_table_name} - """ - - result = self._execute_select(query) - - initial_ids = {} - for row in result: - initial_ids[row["table_name"]] = row["initial_id"] - - return initial_ids - except Exception as e: - logger.error(f"Fehler beim Abrufen aller initialen IDs: {e}") - return {} - - def close(self): - """Schließt die Datenbankverbindung""" - if hasattr(self, 'connection') and self.connection.is_connected(): - self.connection.close() - logger.info("Datenbankverbindung geschlossen") \ No newline at end of file diff --git a/modules/agentservice_agent_analyst.py b/modules/agentservice_agent_analyst.py deleted file mode 100644 index 989e80ce..00000000 --- a/modules/agentservice_agent_analyst.py +++ /dev/null @@ -1,1804 +0,0 @@ -""" -Datenanalyst-Agent für die Analyse und Interpretation von Daten. -Angepasst für das refaktorisierte Core-Modul mit AgentCommunicationProtocol. -""" - -import logging -import traceback -import json -import re -import uuid -import io -import base64 -from typing import List, Dict, Any, Optional, Union, Tuple -from datetime import datetime -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -import plotly.express as px -import plotly.graph_objects as go - -from modules.agentservice_base import BaseAgent -from connectors.connector_aichat_openai import ChatService -from modules.agentservice_utils import WorkflowUtils, MessageUtils, LoggingUtils, FileUtils -from modules.agentservice_protocol import AgentMessage, AgentCommunicationProtocol - -logger = logging.getLogger(__name__) - -class AnalystAgent(BaseAgent): - """Agent for data analysis and interpretation""" - - def __init__(self): - """Initialize the data analyst agent""" - super().__init__() - self.id = "analyst_agent" - self.name = "Data Analyst" - self.type = "analyst" - self.description = "Analyzes and interprets data" - self.capabilities = "data_analysis,pattern_recognition,statistics,visualization,data_interpretation" - self.result_format = "AnalysisReport" - - # Initialize AI service - self.ai_service = None - - # Document capabilities - self.supports_documents = True - self.document_capabilities = ["read", "analyze", "extract"] - self.required_context = ["data_source", "analysis_objectives"] - self.document_handler = None - - # Initialize protocol - self.protocol = AgentCommunicationProtocol() - - # Initialize utilities - self.message_utils = MessageUtils() - self.file_utils = FileUtils() - - # Setup visualization defaults - self.plt_style = 'seaborn-v0_8-whitegrid' - self.default_figsize = (10, 6) - self.chart_dpi = 100 - plt.style.use(self.plt_style) - - def get_agent_info(self) -> Dict[str, Any]: - """Get agent information for agent registry""" - info = super().get_agent_info() - info.update({ - "metadata": { - "supported_formats": ["csv", "xlsx", "json", "text"], - "analysis_types": ["statistical", "trend", "comparative", "predictive", "clustering", "general"], - "visualization_types": ["bar", "line", "scatter", "histogram", "box", "heatmap", "pie"] - } - }) - return info - - def set_document_handler(self, document_handler): - """Set the document handler for file operations""" - self.document_handler = document_handler - - """ - Main updates to the process_message method in AnalystAgent to consider all available content. - """ - async def process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Process a message and perform data analysis. - - Args: - message: Input message - context: Optional context - - Returns: - Analysis response - """ - # Extract workflow_id from context or message - workflow_id = context.get("workflow_id") if context else message.get("workflow_id", "unknown") - - # Get or create logging_utils - log_func = context.get("log_func") if context else None - logging_utils = LoggingUtils(workflow_id, log_func) - - # Create status update using protocol - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Starting data analysis", - sender_id=self.id, - status="in_progress", - progress=0.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Create response structure - response = { - "role": "assistant", - "content": "", - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "result_format": self.result_format, - "workflow_id": workflow_id, - "documents": [] - } - - try: - # Extract task from message - task = message.get("content", "") - - # Process any attached documents and extract data - document_context = "" - data_frames = {} - - if message.get("documents"): - logging_utils.info("Processing documents for analysis", "execution") - document_context, data_frames = await self._process_and_extract_data(message) - - # Update progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Documents processed, performing analysis", - sender_id=self.id, - status="in_progress", - progress=0.4, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Check if we have either data frames OR a substantial text task to analyze - # This is the key change - we're considering the task text as analyzable content - have_analyzable_content = len(data_frames) > 0 or (task and len(task.strip()) > 10) - - if not have_analyzable_content: - # Only show warning if really no content is available - if message.get("documents"): - logging_utils.warning("No processable data found in the provided documents", "execution") - analysis_content = "## Data Analysis Report\n\nI couldn't find any processable data in the provided documents. Please ensure you've attached CSV, Excel, or other data files in a format I can analyze." - else: - logging_utils.warning("No documents or analyzable content provided for analysis", "execution") - analysis_content = "## Data Analysis Report\n\nNo data or sufficient text content was provided for analysis. Please provide text for analysis or attach data files for me to analyze." - - response["content"] = analysis_content - return response - - # Determine analysis type and perform analysis - analysis_type = self._determine_analysis_type(task) - logging_utils.info(f"Performing {analysis_type} analysis", "execution") - - # Create enhanced prompt with document context - enhanced_prompt = self._create_enhanced_prompt(message, document_context, context) - - # Generate visualization documents if data is available - visualization_documents = [] - if data_frames: - logging_utils.info(f"Generating visualizations for {len(data_frames)} data sets", "execution") - visualization_documents = self._generate_visualizations(data_frames, analysis_type, workflow_id, task) - - # Add visualizations to response documents - response["documents"].extend(visualization_documents) - - # Update progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Visualizations created, finalizing analysis", - sender_id=self.id, - status="in_progress", - progress=0.7, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Generate analysis with included data insights if we have data frames - analysis_content = "" - if data_frames: - # Extract data insights to include in the analysis - data_insights = self._extract_data_insights(data_frames) - - # Add insights to the prompt - enhanced_prompt += f"\n\n=== DATA INSIGHTS ===\n{data_insights}" - - # Generate analysis with data insights - analysis_content = await self._generate_analysis(enhanced_prompt, analysis_type) - - # Include references to the visualization documents - if visualization_documents: - viz_references = "\n\n## Visualizations\n\n" - viz_references += "The following visualizations have been created to help understand the data:\n\n" - - for i, doc in enumerate(visualization_documents, 1): - doc_source = doc.get("source", {}) - doc_name = doc_source.get("name", f"Visualization {i}") - viz_references += f"{i}. **{doc_name}** - Available as an attached document\n" - - analysis_content += viz_references - else: - # Generate analysis based just on text if no data frames but we have text to analyze - # This is the key change - we're analyzing the text content directly - logging_utils.info("No data frames available, analyzing text content", "execution") - analysis_content = await self._generate_analysis(enhanced_prompt, analysis_type) - - # Final progress update - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Analysis completed", - sender_id=self.id, - status="completed", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Set the content in the response - response["content"] = analysis_content - - # Finish by sending result message to protocol if needed - if context and context.get("require_protocol_message"): - result_message = self.send_analysis_result( - analysis_content=analysis_content, - sender_id=self.id, - receiver_id=context.get("receiver_id", "workflow"), - task_id=context.get("task_id", f"analysis_{uuid.uuid4()}"), - analysis_data={ - "analysis_type": analysis_type, - "visualization_count": len(visualization_documents), - "data_frame_count": len(data_frames) - }, - context_id=workflow_id - ) - # Just log the message creation, don't need to return it - logging_utils.info(f"Created protocol result message: {result_message.id}", "execution") - - return response - - except Exception as e: - error_msg = f"Error during data analysis: {str(e)}" - logging_utils.error(error_msg, "error") - - # Create error response using protocol - error_message = self.protocol.create_error_message( - error_description=error_msg, - sender_id=self.id, - error_type="analysis", - error_details={"traceback": traceback.format_exc()}, - context_id=workflow_id - ) - - # Set error content in the response - response["content"] = f"## Error during data analysis\n\n{error_msg}\n\n```\n{traceback.format_exc()}\n```" - response["status"] = "error" - - return response - - - - """ - Add _create_enhanced_prompt method to better handle text content in analysis. - """ - - def _create_enhanced_prompt(self, message: Dict[str, Any], document_context: str, context: Dict[str, Any] = None) -> str: - """ - Create an enhanced prompt for analysis that integrates all available content. - - Args: - message: The original message - document_context: Context extracted from documents - context: Optional additional context - - Returns: - Enhanced prompt for analysis - """ - # Get original task/prompt - task = message.get("content", "") - - # Add context information if available - context_info = "" - if context: - # Add any dependency outputs from previous activities - if "dependency_outputs" in context: - dependency_context = context.get("dependency_outputs", {}) - for name, value in dependency_context.items(): - if isinstance(value, dict) and "content" in value: - context_info += f"\n\n=== INPUT FROM {name.upper()} ===\n{value['content']}" - else: - context_info += f"\n\n=== INPUT FROM {name.upper()} ===\n{str(value)}" - - # Add expected format information - if "expected_format" in context: - context_info += f"\n\nExpected output format: {context.get('expected_format')}" - - # Start with task - enhanced_prompt = f"ANALYSIS TASK:\n{task}" - - # Add any context information - if context_info: - enhanced_prompt += f"\n\n{context_info}" - - # Add document context if available - if document_context: - enhanced_prompt += f"\n\n=== DOCUMENT CONTENT ===\n{document_context}" - else: - # If no document content, explicitly note that we're analyzing the text content directly - enhanced_prompt += "\n\nNo data files were provided. Perform analysis on the text content itself." - - # Add final instructions - if document_context: - enhanced_prompt += "\n\nBased on the data and documents provided, please perform a comprehensive analysis." - else: - enhanced_prompt += "\n\nBased on the text content provided, please perform a comprehensive analysis." - - if task: - enhanced_prompt += f" Focus specifically on addressing: {task}" - - enhanced_prompt += "\n\nProvide insights, patterns, and conclusions in a clear, structured format." - - return enhanced_prompt - - - - async def _process_and_extract_data(self, message: Dict[str, Any]) -> Tuple[str, Dict[str, pd.DataFrame]]: - """ - Process documents and extract structured data. - - Args: - message: Input message with documents - - Returns: - Tuple of (document_context, data_frames_dict) - """ - document_context = "" - data_frames = {} - - if not message.get("documents"): - return document_context, data_frames - - # Extract document text (this will be our context) - if self.document_handler: - document_context = self.document_handler.merge_document_contents(message) - else: - document_context = self._extract_document_text(message) - - # Identify and process data files (CSV, Excel, etc.) - for document in message.get("documents", []): - source = document.get("source", {}) - filename = source.get("name", "") - file_id = source.get("id", 0) - content_type = source.get("content_type", "") - - # Skip if not a recognizable data file - if not self._is_data_file(filename, content_type): - continue - - try: - # Try to get file content through document handler first - file_content = None - if self.document_handler: - file_content = self.document_handler.get_file_content_from_message(message, file_id=file_id) - - # Process based on file type - if filename.lower().endswith('.csv'): - df = self._process_csv(file_content, filename) - if df is not None: - data_frames[filename] = df - - elif filename.lower().endswith(('.xlsx', '.xls')): - dfs = self._process_excel(file_content, filename) - for sheet_name, df in dfs.items(): - data_frames[f"{filename}::{sheet_name}"] = df - - elif filename.lower().endswith('.json'): - df = self._process_json(file_content, filename) - if df is not None: - data_frames[filename] = df - - except Exception as e: - logger.error(f"Error processing file {filename}: {str(e)}") - - return document_context, data_frames - - def _is_data_file(self, filename: str, content_type: str) -> bool: - """Check if a file is a processable data file""" - if filename.lower().endswith(('.csv', '.xlsx', '.xls', '.json')): - return True - - if content_type in ['text/csv', 'application/vnd.ms-excel', - 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', - 'application/json']: - return True - - return False - - def _process_csv(self, file_content: Union[bytes, str], filename: str) -> Optional[pd.DataFrame]: - """Process CSV file content into a pandas DataFrame""" - if file_content is None: - return None - - try: - # Handle the case where file_content is already a string - if isinstance(file_content, str): - text_content = file_content - df = pd.read_csv(io.StringIO(text_content)) - df = self._preprocess_dataframe(df) - return df - - # Handle the case where file_content is bytes - else: - # Try various encodings - for encoding in ['utf-8', 'latin1', 'cp1252']: - try: - # Use StringIO to create a file-like object - text_content = file_content.decode(encoding) - df = pd.read_csv(io.StringIO(text_content)) - - # Basic preprocessing - df = self._preprocess_dataframe(df) - return df - except UnicodeDecodeError: - continue - except Exception as e: - logger.error(f"Error processing CSV with {encoding} encoding: {str(e)}") - - # If all encodings fail, try one more time with errors='replace' - text_content = file_content.decode('utf-8', errors='replace') - df = pd.read_csv(io.StringIO(text_content)) - df = self._preprocess_dataframe(df) - return df - - except Exception as e: - logger.error(f"Failed to process CSV file {filename}: {str(e)}") - return None - - def _process_excel(self, file_content: bytes, filename: str) -> Dict[str, pd.DataFrame]: - """Process Excel file content into pandas DataFrames""" - result = {} - - if file_content is None: - return result - - try: - # Use BytesIO to create a file-like object - excel_file = io.BytesIO(file_content) - - # Try to read with pandas - excel_data = pd.ExcelFile(excel_file) - - # Process each sheet - for sheet_name in excel_data.sheet_names: - df = pd.read_excel(excel_file, sheet_name=sheet_name) - - # Basic preprocessing - df = self._preprocess_dataframe(df) - - # Only include if there's actual data - if not df.empty: - result[sheet_name] = df - - return result - - except Exception as e: - logger.error(f"Failed to process Excel file {filename}: {str(e)}") - return result - - def _process_json(self, file_content: bytes, filename: str) -> Optional[pd.DataFrame]: - """Process JSON file content into a pandas DataFrame""" - if file_content is None: - return None - - try: - # Decode and parse JSON - json_content = file_content.decode('utf-8') - data = json.loads(json_content) - - # Handle different JSON structures - if isinstance(data, list): - # List of records - df = pd.DataFrame(data) - elif isinstance(data, dict): - # Try to find a suitable data structure in the dict - if any(isinstance(v, list) for v in data.values()): - # Find the first list value to use as data - for key, value in data.items(): - if isinstance(value, list) and len(value) > 0: - df = pd.DataFrame(value) - break - else: - # No suitable list found - return None - else: - # Convert flat dict to a single-row DataFrame - df = pd.DataFrame([data]) - else: - # Unsupported structure - return None - - # Basic preprocessing - df = self._preprocess_dataframe(df) - return df - - except Exception as e: - logger.error(f"Failed to process JSON file {filename}: {str(e)}") - return None - - def _preprocess_dataframe(self, df: pd.DataFrame) -> pd.DataFrame: - """Perform basic preprocessing on a DataFrame""" - if df.empty: - return df - - # Remove completely empty rows and columns - df = df.dropna(how='all') - df = df.dropna(axis=1, how='all') - - # Try to convert string columns to numeric where appropriate - for col in df.columns: - # Skip if already numeric - if pd.api.types.is_numeric_dtype(df[col]): - continue - - # Skip if mostly non-numeric strings - if df[col].dtype == 'object': - # Check if more than 80% of non-NA values could be numeric - non_na_values = df[col].dropna() - if len(non_na_values) == 0: - continue - - # Try to convert to numeric and count successes - numeric_count = pd.to_numeric(non_na_values, errors='coerce').notna().sum() - if numeric_count / len(non_na_values) > 0.8: - # More than 80% can be converted to numeric, so convert the column - df[col] = pd.to_numeric(df[col], errors='coerce') - - # Try to parse date columns - for col in df.columns: - # Skip if not object dtype - if df[col].dtype != 'object': - continue - - # Check if column name suggests a date - if any(date_term in col.lower() for date_term in ['date', 'time', 'day', 'month', 'year']): - try: - # Try to parse as datetime - df[col] = pd.to_datetime(df[col], errors='coerce') - # Only keep the conversion if at least 80% succeeded - if df[col].notna().mean() < 0.8: - # Revert to original if too many NAs were introduced - df[col] = df[col].astype('object') - except: - pass - - return df - - def _extract_document_text(self, message: Dict[str, Any]) -> str: - """ - Extract text from documents (fallback method). - - Args: - message: Input message with documents - - Returns: - Extracted text - """ - text_content = "" - for document in message.get("documents", []): - source = document.get("source", {}) - name = source.get("name", "unnamed") - - text_content += f"\n\n--- {name} ---\n" - - for content in document.get("contents", []): - if content.get("type") == "text": - text_content += content.get("text", "") - - return text_content - - def _determine_analysis_type(self, task: str) -> str: - """ - Determine the type of analysis based on the task. - Enhanced to better handle text-based analysis. - - Args: - task: The analysis task - - Returns: - Analysis type - """ - task_lower = task.lower() - - # Check for statistical analysis - if any(term in task_lower for term in ["statistics", "statistical", "mean", "median", "variance"]): - return "statistical" - - # Check for trend analysis - elif any(term in task_lower for term in ["trend", "pattern", "time series", "historical"]): - return "trend" - - # Check for comparative analysis - elif any(term in task_lower for term in ["compare", "comparison", "versus", "vs", "difference"]): - return "comparative" - - # Check for predictive analysis - elif any(term in task_lower for term in ["predict", "forecast", "future", "projection"]): - return "predictive" - - # Check for clustering or categorization - elif any(term in task_lower for term in ["cluster", "segment", "categorize", "classify"]): - return "clustering" - - # Check for text analysis specific terms - elif any(term in task_lower for term in ["text", "sentiment", "topic", "semantic", "meaning", "interpretation"]): - return "textual" - - # Check for summary requests - elif any(term in task_lower for term in ["summarize", "summary", "overview", "digest"]): - return "summary" - - # Default to general analysis - else: - return "general" - - - def _extract_data_insights(self, data_frames: Dict[str, pd.DataFrame]) -> str: - """ - Extract basic insights from data frames. - - Args: - data_frames: Dictionary of data frames - - Returns: - Extracted insights as text - """ - insights = [] - - for name, df in data_frames.items(): - if df.empty: - continue - - insight = f"Dataset: {name}\n" - insight += f"Shape: {df.shape[0]} rows, {df.shape[1]} columns\n" - insight += f"Columns: {', '.join(df.columns.tolist())}\n" - - # Basic statistics for numeric columns - numeric_cols = df.select_dtypes(include=['number']).columns - if len(numeric_cols) > 0: - insight += "Numeric column statistics:\n" - for col in numeric_cols[:5]: # Limit to first 5 columns - stats = df[col].describe() - insight += f" {col}: min={stats['min']:.2f}, max={stats['max']:.2f}, mean={stats['mean']:.2f}, median={df[col].median():.2f}\n" - - if len(numeric_cols) > 5: - insight += f" ... and {len(numeric_cols) - 5} more numeric columns\n" - - # Date range for datetime columns - date_cols = df.select_dtypes(include=['datetime']).columns - if len(date_cols) > 0: - insight += "Date range:\n" - for col in date_cols: - if df[col].notna().any(): - min_date = df[col].min() - max_date = df[col].max() - insight += f" {col}: {min_date} to {max_date}\n" - - # Categorical column value counts - cat_cols = df.select_dtypes(include=['object', 'category']).columns - if len(cat_cols) > 0: - insight += "Categorical columns:\n" - for col in cat_cols[:3]: # Limit to first 3 columns - # Get top 3 values - top_values = df[col].value_counts().head(3) - vals_str = ", ".join([f"{val} ({count})" for val, count in top_values.items()]) - insight += f" {col}: {df[col].nunique()} unique values. Top values: {vals_str}\n" - - if len(cat_cols) > 3: - insight += f" ... and {len(cat_cols) - 3} more categorical columns\n" - - # Missing values - missing = df.isna().sum() - if missing.sum() > 0: - cols_with_missing = missing[missing > 0] - insight += "Missing values:\n" - for col, count in cols_with_missing.items(): - pct = 100 * count / len(df) - insight += f" {col}: {count} missing values ({pct:.1f}%)\n" - - insights.append(insight) - - return "\n\n".join(insights) - - def _generate_visualizations(self, data_frames: Dict[str, pd.DataFrame], analysis_type: str, - workflow_id: str, task: str) -> List[Dict[str, Any]]: - """ - Generate appropriate visualizations based on data and analysis type. - - Args: - data_frames: Dictionary of DataFrames to visualize - analysis_type: Type of analysis being performed - workflow_id: Workflow ID - task: Original task description - - Returns: - List of visualization document objects - """ - documents = [] - - for name, df in data_frames.items(): - if df.empty or df.shape[0] < 2: - continue # Skip empty or single-row DataFrames - - # Generate different visualizations based on the analysis type - if analysis_type == "statistical": - viz_docs = self._create_statistical_visualizations(df, name, workflow_id) - documents.extend(viz_docs) - - elif analysis_type == "trend": - viz_docs = self._create_trend_visualizations(df, name, workflow_id) - documents.extend(viz_docs) - - elif analysis_type == "comparative": - viz_docs = self._create_comparative_visualizations(df, name, workflow_id) - documents.extend(viz_docs) - - elif analysis_type == "predictive": - viz_docs = self._create_predictive_visualizations(df, name, workflow_id) - documents.extend(viz_docs) - - elif analysis_type == "clustering": - viz_docs = self._create_clustering_visualizations(df, name, workflow_id) - documents.extend(viz_docs) - - else: # general analysis - viz_docs = self._create_general_visualizations(df, name, workflow_id) - documents.extend(viz_docs) - - return documents - - def _create_statistical_visualizations(self, df: pd.DataFrame, name: str, workflow_id: str) -> List[Dict[str, Any]]: - """Create statistical visualizations for a DataFrame""" - documents = [] - - # 1. Distribution/Histogram plots for numeric columns - numeric_cols = df.select_dtypes(include=['number']).columns[:5] # Limit to first 5 - if len(numeric_cols) > 0: - plt.figure(figsize=(12, 8)) - - for i, col in enumerate(numeric_cols, 1): - plt.subplot(len(numeric_cols), 1, i) - sns.histplot(df[col].dropna(), kde=True) - plt.title(f'Distribution of {col}') - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_stat_dist_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Statistical Distributions - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # 2. Box plots for numeric columns - if len(numeric_cols) > 0: - plt.figure(figsize=(12, 8)) - sns.boxplot(data=df[numeric_cols]) - plt.title(f'Box Plots of Numeric Variables in {name}') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_stat_box_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Box Plots - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # 3. Correlation heatmap for numeric columns - if len(numeric_cols) >= 2: - plt.figure(figsize=(10, 8)) - corr = df[numeric_cols].corr() - sns.heatmap(corr, annot=True, cmap='coolwarm', center=0) - plt.title(f'Correlation Heatmap - {name}') - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_stat_corr_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Correlation Heatmap - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - return documents - - def _create_trend_visualizations(self, df: pd.DataFrame, name: str, workflow_id: str) -> List[Dict[str, Any]]: - """Create trend visualizations for a DataFrame""" - documents = [] - - # Check for date/time columns - date_cols = df.select_dtypes(include=['datetime']).columns - - # If we have date columns, create time series plots - if len(date_cols) > 0: - date_col = date_cols[0] # Use the first date column - - # Find numeric columns to plot against the date - numeric_cols = df.select_dtypes(include=['number']).columns[:3] # Limit to first 3 - - if len(numeric_cols) > 0: - plt.figure(figsize=(12, 8)) - - for i, col in enumerate(numeric_cols, 1): - plt.subplot(len(numeric_cols), 1, i) - plt.plot(df[date_col], df[col]) - plt.title(f'Trend of {col} over time') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_trend_time_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Time Series Trends - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # If no date columns found, find another column that might represent sequence/order - else: - # Look for columns with sequential numbers - potential_sequence_cols = [] - for col in df.select_dtypes(include=['number']).columns: - values = df[col].dropna().values - if len(values) >= 5: - # Check if values are mostly sequential - diffs = np.diff(sorted(values)) - if np.all(diffs > 0) and np.std(diffs) / np.mean(diffs) < 0.5: - potential_sequence_cols.append(col) - - # Use first potential sequence column or first numeric column - numeric_cols = df.select_dtypes(include=['number']).columns - if len(potential_sequence_cols) > 0 and len(numeric_cols) > 1: - sequence_col = potential_sequence_cols[0] - # Find other numeric columns to plot against the sequence - plot_cols = [col for col in numeric_cols if col != sequence_col][:2] - - plt.figure(figsize=(12, 6)) - for col in plot_cols: - plt.plot(df[sequence_col], df[col], marker='o', label=col) - plt.title(f'Trend by {sequence_col} - {name}') - plt.legend() - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_trend_seq_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Sequential Trends - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # Moving average visualization if we have enough data points - if len(df) > 10: - numeric_cols = df.select_dtypes(include=['number']).columns[:2] # Limit to first 2 - if len(numeric_cols) > 0: - plt.figure(figsize=(12, 6)) - - for col in numeric_cols: - # Sort data if we have a date column - if len(date_cols) > 0: - sorted_df = df.sort_values(by=date_cols[0]) - else: - sorted_df = df - - # Calculate moving average (window size 3) - values = sorted_df[col].values - window_size = min(3, len(values) - 1) - if window_size > 0: - moving_avg = np.convolve(values, np.ones(window_size)/window_size, mode='valid') - - # Plot original and moving average - plt.plot(values, label=f'{col} (Original)') - plt.plot(np.arange(window_size-1, len(values)), moving_avg, label=f'{col} (Moving Avg)') - - plt.title(f'Moving Average Trends - {name}') - plt.legend() - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_trend_mavg_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Moving Average Trends - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - return documents - - def _create_comparative_visualizations(self, df: pd.DataFrame, name: str, workflow_id: str) -> List[Dict[str, Any]]: - """Create comparative visualizations for a DataFrame""" - documents = [] - - # 1. Look for categorical columns to use for grouping - cat_cols = df.select_dtypes(include=['object', 'category']).columns - - if len(cat_cols) > 0: - # Use the first categorical column with reasonable number of unique values - groupby_col = None - for col in cat_cols: - unique_count = df[col].nunique() - if 2 <= unique_count <= 10: # Reasonable number of categories - groupby_col = col - break - - if groupby_col: - # Find numeric columns to compare across groups - numeric_cols = df.select_dtypes(include=['number']).columns[:3] # Limit to first 3 - - if len(numeric_cols) > 0: - # 1. Bar chart comparing means - plt.figure(figsize=(12, 6)) - mean_by_group = df.groupby(groupby_col)[numeric_cols].mean() - mean_by_group.plot(kind='bar') - plt.title(f'Mean Comparison by {groupby_col} - {name}') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_comp_bar_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Mean Comparison by {groupby_col} - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # 2. Box plots for comparing distributions - plt.figure(figsize=(12, 8)) - for i, col in enumerate(numeric_cols, 1): - plt.subplot(len(numeric_cols), 1, i) - sns.boxplot(x=groupby_col, y=col, data=df) - plt.title(f'Distribution of {col} by {groupby_col}') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_comp_box_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Distribution Comparison - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # 3. Scatter plot comparing two numeric variables - numeric_cols = df.select_dtypes(include=['number']).columns - if len(numeric_cols) >= 2: - plt.figure(figsize=(10, 8)) - # Use first two numeric columns - x_col, y_col = numeric_cols[0], numeric_cols[1] - - scatter = plt.scatter(df[x_col], df[y_col]) - plt.title(f'Comparison of {x_col} vs {y_col} - {name}') - plt.xlabel(x_col) - plt.ylabel(y_col) - - # Add color if we have a categorical column - if len(cat_cols) > 0: - groupby_col = cat_cols[0] - if df[groupby_col].nunique() <= 10: # Reasonable number of categories - plt.figure(figsize=(10, 8)) - scatter = plt.scatter(df[x_col], df[y_col], c=pd.factorize(df[groupby_col])[0], cmap='viridis') - plt.title(f'Comparison of {x_col} vs {y_col} by {groupby_col} - {name}') - plt.xlabel(x_col) - plt.ylabel(y_col) - legend1 = plt.legend(scatter.legend_elements()[0], df[groupby_col].unique(), title=groupby_col) - plt.gca().add_artist(legend1) - - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_comp_scatter_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Variable Comparison - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - return documents - - def _create_predictive_visualizations(self, df: pd.DataFrame, name: str, workflow_id: str) -> List[Dict[str, Any]]: - """Create predictive visualizations for a DataFrame""" - documents = [] - - # Check for date/time columns for time series prediction - date_cols = df.select_dtypes(include=['datetime']).columns - - if len(date_cols) > 0: - date_col = date_cols[0] # Use the first date column - - # Sort by date - df_sorted = df.sort_values(by=date_col) - - # Find numeric columns to predict - numeric_cols = df.select_dtypes(include=['number']).columns[:2] # Limit to first 2 - - if len(numeric_cols) > 0: - plt.figure(figsize=(12, 8)) - - for i, col in enumerate(numeric_cols, 1): - plt.subplot(len(numeric_cols), 1, i) - - # Get values and dates - values = df_sorted[col].values - dates = df_sorted[date_col].values - - # Need minimum number of points for meaningful prediction - if len(values) >= 5: - # Use basic linear regression for prediction - # Convert dates to numeric values for regression - date_nums = np.array([(d - dates[0]).total_seconds() for d in dates]) - date_nums = date_nums / np.max(date_nums) # Normalize - - # Remove NaNs - mask = ~np.isnan(values) - if np.sum(mask) >= 3: # Need at least 3 points - x = date_nums[mask].reshape(-1, 1) - y = values[mask] - - # Fit linear regression - from sklearn.linear_model import LinearRegression - model = LinearRegression() - model.fit(x, y) - - # Predict on original range - y_pred = model.predict(x) - - # Extend for prediction - x_extended = np.linspace(0, 1.2, 100).reshape(-1, 1) - y_extended = model.predict(x_extended) - - # Convert x_extended back to dates for plotting - max_seconds = np.max([(d - dates[0]).total_seconds() for d in dates]) - future_seconds = x_extended.flatten() * max_seconds - future_dates = [dates[0] + pd.Timedelta(seconds=s) for s in future_seconds] - - # Plot - plt.plot(dates, values, 'o-', label='Actual') - plt.plot(future_dates, y_extended, '--', label='Predicted') - plt.axvline(x=dates[-1], color='r', linestyle=':', label='Current') - - plt.title(f'Prediction for {col}') - plt.xticks(rotation=45) - plt.legend() - - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_pred_time_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Time Series Prediction - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # Regression prediction (feature vs target) - numeric_cols = df.select_dtypes(include=['number']).columns - if len(numeric_cols) >= 2: - plt.figure(figsize=(10, 8)) - - # Use first two numeric columns as feature and target - x_col, y_col = numeric_cols[0], numeric_cols[1] - - # Remove NaNs - df_clean = df[[x_col, y_col]].dropna() - - if len(df_clean) >= 5: # Need minimum points for regression - x = df_clean[x_col].values.reshape(-1, 1) - y = df_clean[y_col].values - - # Fit linear regression - from sklearn.linear_model import LinearRegression - model = LinearRegression() - model.fit(x, y) - - # Generate predictions - x_range = np.linspace(df_clean[x_col].min(), df_clean[x_col].max() * 1.1, 100).reshape(-1, 1) - y_pred = model.predict(x_range) - - # Plot - plt.scatter(df_clean[x_col], df_clean[y_col], label='Data Points') - plt.plot(x_range, y_pred, 'r--', label=f'Predicted {y_col}') - plt.title(f'Regression Prediction: {y_col} based on {x_col} - {name}') - plt.xlabel(x_col) - plt.ylabel(y_col) - plt.legend() - - # Add regression equation - slope = model.coef_[0] - intercept = model.intercept_ - plt.text(0.05, 0.95, f'{y_col} = {slope:.2f} * {x_col} + {intercept:.2f}', - transform=plt.gca().transAxes, fontsize=10, - verticalalignment='top') - - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_pred_reg_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Regression Prediction - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - return documents - - def _create_clustering_visualizations(self, df: pd.DataFrame, name: str, workflow_id: str) -> List[Dict[str, Any]]: - """Create clustering visualizations for a DataFrame""" - documents = [] - - # Need numeric columns for clustering - numeric_cols = df.select_dtypes(include=['number']).columns - if len(numeric_cols) >= 2: - # Select two numeric columns for 2D visualization - cols = numeric_cols[:2] - - # Remove NaNs - df_clean = df[cols].dropna() - - if len(df_clean) >= 5: # Need minimum points for clustering - # Normalize data - from sklearn.preprocessing import StandardScaler - scaler = StandardScaler() - data_scaled = scaler.fit_transform(df_clean) - - # Apply K-means clustering - from sklearn.cluster import KMeans - # Determine number of clusters (2-5 based on data size) - n_clusters = min(max(2, len(df_clean) // 10), 5) - kmeans = KMeans(n_clusters=n_clusters, random_state=42) - clusters = kmeans.fit_predict(data_scaled) - - # Add cluster labels to DataFrame - df_clean['Cluster'] = clusters - - # Create scatter plot with clusters - plt.figure(figsize=(10, 8)) - - # Plot clusters - scatter = plt.scatter(df_clean[cols[0]], df_clean[cols[1]], c=df_clean['Cluster'], cmap='viridis') - - # Plot centroids - centroids = scaler.inverse_transform(kmeans.cluster_centers_) - plt.scatter(centroids[:, 0], centroids[:, 1], marker='X', s=200, c='red', label='Centroids') - - plt.title(f'K-means Clustering ({n_clusters} clusters) - {name}') - plt.xlabel(cols[0]) - plt.ylabel(cols[1]) - plt.legend(*scatter.legend_elements(), title="Clusters") - plt.legend() - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_clust_kmeans_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"K-means Clustering - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # If we have more than 2 numeric columns, also create a PCA visualization - if len(numeric_cols) > 2: - from sklearn.decomposition import PCA - - # Select more columns for PCA - pca_cols = numeric_cols[:min(len(numeric_cols), 5)] - - # Remove NaNs - df_pca = df[pca_cols].dropna() - - if len(df_pca) >= 5: - # Normalize data - pca_data = StandardScaler().fit_transform(df_pca) - - # Apply PCA to reduce to 2 dimensions - pca = PCA(n_components=2) - principal_components = pca.fit_transform(pca_data) - - # Create DataFrame with principal components - pca_df = pd.DataFrame(data=principal_components, columns=['PC1', 'PC2']) - - # Apply clustering to PCA results - clusters = KMeans(n_clusters=n_clusters, random_state=42).fit_predict(pca_df) - pca_df['Cluster'] = clusters - - # Create scatter plot - plt.figure(figsize=(10, 8)) - scatter = plt.scatter(pca_df['PC1'], pca_df['PC2'], c=pca_df['Cluster'], cmap='viridis') - plt.title(f'PCA Clustering ({n_clusters} clusters) - {name}') - plt.xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} variance)') - plt.ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} variance)') - plt.legend(*scatter.legend_elements(), title="Clusters") - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_clust_pca_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"PCA Clustering - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - return documents - - def _create_general_visualizations(self, df: pd.DataFrame, name: str, workflow_id: str) -> List[Dict[str, Any]]: - """Create general purpose visualizations for a DataFrame""" - documents = [] - - # 1. Data overview: numeric summary - numeric_cols = df.select_dtypes(include=['number']).columns - if len(numeric_cols) > 0: - # Create a bar chart of means for numeric columns - plt.figure(figsize=(12, 6)) - means = df[numeric_cols].mean().sort_values() - means.plot(kind='bar') - plt.title(f'Mean Values of Numeric Variables - {name}') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_gen_means_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Numeric Variables Summary - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # 2. Categorical data overview - cat_cols = df.select_dtypes(include=['object', 'category']).columns - if len(cat_cols) > 0: - # Select the first categorical column with reasonable cardinality - for col in cat_cols: - if df[col].nunique() <= 10: # Reasonable number of categories - plt.figure(figsize=(10, 6)) - value_counts = df[col].value_counts().sort_values(ascending=False) - value_counts.plot(kind='bar') - plt.title(f'Distribution of {col} - {name}') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_gen_cat_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Categorical Distribution - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - break # Only use the first suitable column - - # 3. Correlation matrix if we have multiple numeric columns - if len(numeric_cols) >= 2: - plt.figure(figsize=(10, 8)) - corr = df[numeric_cols].corr() - sns.heatmap(corr, annot=True, cmap='coolwarm', center=0) - plt.title(f'Correlation Matrix - {name}') - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_gen_corr_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Correlation Matrix - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - # 4. If we have date columns, show time-based visualization - date_cols = df.select_dtypes(include=['datetime']).columns - if len(date_cols) > 0 and len(numeric_cols) > 0: - date_col = date_cols[0] # Use the first date column - num_col = numeric_cols[0] # Use the first numeric column - - plt.figure(figsize=(12, 6)) - plt.plot(df[date_col], df[num_col], marker='o') - plt.title(f'{num_col} over Time - {name}') - plt.xticks(rotation=45) - plt.tight_layout() - - # Save figure - img_data = self._get_figure_as_base64() - plt.close() - - # Create document - doc_id = f"viz_gen_time_{uuid.uuid4()}" - doc = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": f"Time Series Overview - {name}", - "content_type": "image/png", - "size": len(img_data) - }, - "contents": [{ - "type": "image", - "data": img_data, - "format": "base64" - }] - } - documents.append(doc) - - return documents - - def _get_figure_as_base64(self) -> str: - """Convert current matplotlib figure to base64 string""" - buffer = io.BytesIO() - plt.savefig(buffer, format='png', dpi=self.chart_dpi) - buffer.seek(0) - image_png = buffer.getvalue() - buffer.close() - - # Convert to base64 - image_base64 = base64.b64encode(image_png).decode('utf-8') - return image_base64 - - - """ - Enhanced _generate_analysis method to better handle text-only analysis. - """ - - async def _generate_analysis(self, prompt: str, analysis_type: str) -> str: - """ - Generate analysis based on prompt and analysis type. - Enhanced to handle text-only analysis. - - Args: - prompt: The analysis prompt - analysis_type: Type of analysis - - Returns: - Generated analysis - """ - if not self.ai_service: - logging.warning("AI service not available for analysis generation") - return f"## Data Analysis ({analysis_type})\n\nUnable to generate analysis: AI service not available." - - # Create specialized prompt based on analysis type - system_prompt = self._get_analysis_system_prompt(analysis_type) - - # Determine if this is a data-based or text-based analysis - is_data_analysis = "DATA INSIGHTS" in prompt - - # Enhance the prompt with analysis-specific instructions - if is_data_analysis: - enhanced_prompt = f""" - Generate a detailed {analysis_type} analysis based on the following data: - - {prompt} - - Your analysis should include: - 1. A summary of the data - 2. Key findings and insights - 3. Supporting evidence and calculations - 4. Clear conclusions - 5. Recommendations where appropriate - - Format the analysis in Markdown with proper headings, lists, and tables. - """ - else: - # Text-based analysis instructions - enhanced_prompt = f""" - Generate a detailed {analysis_type} analysis of the following text content: - - {prompt} - - Your analysis should include: - 1. A summary of the main themes and topics - 2. Key insights and observations - 3. Analysis of structure, patterns, and relationships - 4. Clear conclusions and interpretations - 5. Recommendations or implications where appropriate - - Format the analysis in Markdown with proper headings, lists, and tables. - """ - - try: - content = await self.ai_service.call_api([ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": enhanced_prompt} - ]) - - # Ensure there's a title at the top - if not content.strip().startswith("# "): - content = f"# {analysis_type.capitalize()} Analysis\n\n{content}" - - return content - except Exception as e: - return f"# {analysis_type.capitalize()} Analysis\n\nError generating analysis: {str(e)}" - - - def _get_analysis_system_prompt(self, analysis_type: str) -> str: - """ - Get specialized system prompt for specific analysis type. - Enhanced with text analysis capabilities. - - Args: - analysis_type: Type of analysis - - Returns: - System prompt - """ - base_prompt = self._get_system_prompt() - - # Add analysis-specific instructions - if analysis_type == "statistical": - return f"{base_prompt}\n\nFocus on statistical measures including mean, median, mode, variance, and distribution. Identify outliers and unusual data points. Present key statistics in tables where appropriate." - - elif analysis_type == "trend": - return f"{base_prompt}\n\nFocus on identifying trends over time, seasonality, and patterns in the data. Look for long-term movements, cyclical patterns, and turning points. Consider rate of change and growth rates." - - elif analysis_type == "comparative": - return f"{base_prompt}\n\nFocus on comparing different groups, categories, or time periods. Highlight similarities and differences. Use comparative metrics and relative measures rather than just absolute values." - - elif analysis_type == "predictive": - return f"{base_prompt}\n\nFocus on extrapolating trends and patterns to make predictions about future values. Discuss confidence levels and potential factors that could influence outcomes. Be clear about assumptions." - - elif analysis_type == "clustering": - return f"{base_prompt}\n\nFocus on identifying natural groupings or segments within the data. Describe the characteristics of each cluster and what distinguishes them. Consider similarities within groups and differences between groups." - - elif analysis_type == "textual": - return f"{base_prompt}\n\nFocus on analyzing the text content provided. Identify key themes, topics, and concepts. Analyze sentiment, tone, and perspective. Extract important relationships, arguments, or logical structures. Provide insights into the meaning and implications of the text." - - elif analysis_type == "summary": - return f"{base_prompt}\n\nFocus on providing a concise overview of the provided content. Identify the main points, key arguments, and essential information. Distill complex information into clear, digestible insights. Maintain objectivity while highlighting the most important elements." - - else: - return base_prompt - - - def _get_system_prompt(self) -> str: - """ - Get specialized system prompt for analyst agent. - Enhanced to handle text analysis better. - - Returns: - System prompt - """ - return f""" - You are {self.name}, a specialized {self.type} agent focused on data and text analysis. - - {self.description} - - When analyzing data: - 1. First, identify the data structure and key variables - 2. Look for patterns, trends, and outliers - 3. Provide statistical insights and evidence-based conclusions - 4. Highlight any important findings clearly - 5. Suggest visualizations that would help understand the data - - When analyzing text content: - 1. Identify key themes, concepts, and topics - 2. Extract important patterns and relationships - 3. Provide insights into the meaning and implications of the text - 4. Identify sentiment, tone, and perspective where relevant - 5. Organize findings in a logical, structured way - - For CSV data, interpret tables correctly and perform calculations accurately. - For textual data, extract key metrics, themes and relationships. - - Respond in a clear, analytical style, and format your findings in a structured report. - """ - - def send_analysis_result(self, analysis_content: str, sender_id: str, receiver_id: str, - task_id: str, analysis_data: Dict[str, Any] = None, - context_id: str = None) -> AgentMessage: - """ - Send analysis results using the protocol. - - Args: - analysis_content: Analysis content - sender_id: Sender ID - receiver_id: Receiver ID - task_id: Task ID - analysis_data: Additional analysis data - context_id: Context ID - - Returns: - Protocol message - """ - return self.protocol.create_result_message( - result_content=analysis_content, - sender_id=sender_id, - receiver_id=receiver_id, - task_id=task_id, - output_data=analysis_data, - result_format=self.result_format, - context_id=context_id - ) - - def send_error_message(self, error_description: str, sender_id: str, receiver_id: str = None, - error_details: Dict[str, Any] = None, context_id: str = None) -> AgentMessage: - """ - Send error message using the protocol. - - Args: - error_description: Error description - sender_id: Sender ID - receiver_id: Receiver ID - error_details: Error details - context_id: Context ID - - Returns: - Protocol message - """ - return self.protocol.create_error_message( - error_description=error_description, - sender_id=sender_id, - receiver_id=receiver_id, - error_type="analysis_error", - error_details=error_details, - context_id=context_id - ) - - def send_document_request_message(self, document_description: str, sender_id: str, receiver_id: str, - filters: Dict[str, Any] = None, context_id: str = None) -> AgentMessage: - """ - Send document request using the protocol. - - Args: - document_description: Document description - sender_id: Sender ID - receiver_id: Receiver ID - filters: Document filters - context_id: Context ID - - Returns: - Protocol message - """ - return self.protocol.create_document_request_message( - document_description=document_description, - sender_id=sender_id, - receiver_id=receiver_id, - filters=filters, - context_id=context_id - ) - -# Singleton instance -_analyst_agent = None - -def get_analyst_agent(): - """Returns a singleton instance of the data analyst agent""" - global _analyst_agent - if _analyst_agent is None: - _analyst_agent = AnalystAgent() - return _analyst_agent \ No newline at end of file diff --git a/modules/agentservice_agent_coder.py b/modules/agentservice_agent_coder.py deleted file mode 100644 index 9b9e3ebf..00000000 --- a/modules/agentservice_agent_coder.py +++ /dev/null @@ -1,1399 +0,0 @@ -""" -CoderAgent - A unified agent for developing and executing Python code. -Includes code execution capabilities previously in separate modules. -Enhanced with auto-correction loop for handling execution errors. -""" - -import logging -import json -import re -import uuid -import traceback -import os -import subprocess -import tempfile -import shutil -import sys -import pandas as pd -from datetime import datetime -from typing import List, Dict, Any, Optional, Tuple - -from modules.agentservice_base import BaseAgent -from modules.agentservice_utils import FileUtils, WorkflowUtils, MessageUtils, LoggingUtils -from connectors.connector_aichat_openai import ChatService -from modules.agentservice_protocol import AgentMessage, AgentCommunicationProtocol - -logger = logging.getLogger(__name__) - -# Existing SimpleCodeExecutor class remains unchanged -class SimpleCodeExecutor: - # ... existing code ... - """ - A simplified executor that runs Python code in isolated virtual environments. - """ - - # Class variable to store workflow environments for persistence - _workflow_environments = {} - - def __init__(self, - workflow_id: str = None, - timeout: int = 30, - max_memory_mb: int = 512, - requirements: List[str] = None, - blocked_packages: List[str] = None, - ai_service = None): - """ - Initialize the SimpleCodeExecutor. - - Args: - workflow_id: Optional workflow ID for persistent environments - timeout: Maximum execution time in seconds - max_memory_mb: Maximum memory in MB - requirements: List of packages to install - blocked_packages: List of blocked packages - """ - self.workflow_id = workflow_id - self.timeout = timeout - self.max_memory_mb = max_memory_mb - self.temp_dir = None - self.requirements = requirements or [] - self.blocked_packages = blocked_packages or [ - "cryptography", "flask", "django", "tornado", # Security risks - "tensorflow", "pytorch", "scikit-learn" # Resource intensive - ] - self.is_persistent = workflow_id is not None - self.ai_service = ai_service - - @classmethod - def get_workflow_environment(cls, workflow_id: str) -> Optional[str]: - """Get an existing workflow environment path if it exists.""" - return cls._workflow_environments.get(workflow_id) - - @classmethod - def set_workflow_environment(cls, workflow_id: str, env_path: str) -> None: - """Store a workflow environment path.""" - cls._workflow_environments[workflow_id] = env_path - - def _create_venv(self) -> str: - """Creates a virtual environment and returns the path.""" - # Check for existing environment if using workflow_id - if self.workflow_id: - self.is_persistent = True - existing_env = self.get_workflow_environment(self.workflow_id) - if existing_env and os.path.exists(existing_env): - logger.info(f"Reusing existing virtual environment: {existing_env}") - self.temp_dir = os.path.dirname(existing_env) - return existing_env - else: - logger.info(f"Creating new environment for workflow {self.workflow_id}") - - # Create a new environment - venv_parent_dir = tempfile.mkdtemp(prefix="simple_exec_") - self.temp_dir = venv_parent_dir - venv_path = os.path.join(venv_parent_dir, "venv") - - try: - # Create virtual environment - logger.info(f"Creating new virtual environment in {venv_path}") - subprocess.run([sys.executable, "-m", "venv", venv_path], - check=True, - capture_output=True) - - # Store the environment path if this is for a specific workflow - if self.workflow_id: - logger.info(f"Registering new persistent environment for workflow {self.workflow_id}") - self.set_workflow_environment(self.workflow_id, venv_path) - - return venv_path - except subprocess.CalledProcessError as e: - logger.error(f"Error creating virtual environment: {e}") - raise RuntimeError(f"Could not create venv: {e}") - - def _get_pip_executable(self, venv_path: str) -> str: - """Gets the path to the pip executable in the virtual environment.""" - if os.name == 'nt': # Windows - return os.path.join(venv_path, "Scripts", "pip.exe") - else: # Unix/Linux - return os.path.join(venv_path, "bin", "pip") - - def _get_python_executable(self, venv_path: str) -> str: - """Gets the path to the Python executable in the virtual environment.""" - if os.name == 'nt': # Windows - return os.path.join(venv_path, "Scripts", "python.exe") - else: # Unix/Linux - return os.path.join(venv_path, "bin", "python") - - def _filter_requirements(self, requirements: List[str]) -> List[str]: - """Filter out blocked packages and invalid requirements.""" - if not requirements: - return [] - - filtered_requirements = [] - for req in requirements: - # Skip empty, comment lines, or invalid requirements - req = req.strip() - if not req or req.startswith('#') or '```' in req or req in ['`', '``', '```']: - logging.warning(f"Skipping comment or invalid requirement: {req}") - continue - - # Extract package name from requirement spec - import re - package_name = re.split(r'[=<>]', req)[0].strip().lower() - - if package_name in self.blocked_packages: - logging.warning(f"Blocked package detected: {package_name}") - continue - - filtered_requirements.append(req) - - return filtered_requirements - - def _install_packages(self, venv_path: str, requirements: List[str]) -> bool: - """Install packages in the virtual environment.""" - if not requirements: - return True - - # Filter requirements - filtered_requirements = self._filter_requirements(requirements) - if not filtered_requirements: - logger.info("No allowed packages to install") - return True - - # Get pip executable - pip_executable = self._get_pip_executable(venv_path) - - # Install packages - try: - logger.info(f"Installing packages: {', '.join(filtered_requirements)}") - result = subprocess.run( - [pip_executable, "install"] + filtered_requirements, - check=True, - capture_output=True, - text=True, - timeout=300 - ) - logger.info("Package installation successful") - return True - except subprocess.CalledProcessError as e: - logger.error(f"Error during package installation: {e.stderr}") - return False - except Exception as e: - logger.error(f"Error during package installation: {str(e)}") - return False - - def _extract_required_packages(self, code: str) -> List[str]: - # Extract required packages from requirements comments in the 1st code line - packages = set() - # Check for special REQUIREMENTS comment - specific format we're looking for - first_lines = code.split('\n')[:5] # Only check first few lines - for line in first_lines: - if line.strip().startswith("# REQUIREMENTS:"): - req_str = line.replace("# REQUIREMENTS:", "").strip() - for pkg in req_str.split(','): - if pkg.strip(): - packages.add(pkg.strip()) - return list(packages) - - - def execute_code(self, code: str, input_data: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Execute Python code in an isolated environment using a simple approach. - - Args: - code: Python code to execute - input_data: Optional input data for the code - - Returns: - Dictionary with execution results - """ - logger.info(f"Executing code with workflow_id: {self.workflow_id}") - - # Create or reuse virtual environment - venv_path = self._create_venv() #creating self.temp_dir! - - # Create input_data directory for file handling - input_data_dir = os.path.join(self.temp_dir, "input_data") # Temp dir is at root - os.makedirs(input_data_dir, exist_ok=True) - - # Extract and install required packages - all_requirements = [] - - # Add explicitly provided requirements - # if self.requirements: - # all_requirements.extend(self.requirements) - - # Extract requirements from code - extracted_requirements = self._extract_required_packages(code) - if extracted_requirements: - all_requirements.extend(extracted_requirements) - logger.info(f"Extracted required packages from code: {', '.join(extracted_requirements)}") - - # Install packages if needed - if all_requirements: - logger.info(f"Installing {len(all_requirements)} packages") - install_success = self._install_packages(venv_path, all_requirements) - if not install_success: - # Return error if package installation failed - return { - "success": False, - "output": "", - "error": f"Failed to install required packages: {', '.join(all_requirements)}", - "result": None, - "exit_code": -1 - } - - # Process extracted document content if available - if input_data and "extracted_documents" in input_data: - for doc in input_data["extracted_documents"]: - doc_name = doc["name"] - doc_content = doc["content"] - doc_type = doc["type"] - - # Create file path - file_path = os.path.join(input_data_dir, doc_name) - - try: - # Write content to file - with open(file_path, 'w', encoding='utf-8') as f: - f.write(doc_content) - - # Add to files list if not already there - if "files" not in input_data: - input_data["files"] = [] - - input_data["files"].append({ - "id": f"extracted_{doc_name}", - "name": doc_name, - "type": doc_type, - "path": file_path - }) - - logger.info(f"Created file from extracted content: {doc_name}") - except Exception as e: - logger.error(f"Error creating file from extracted content: {str(e)}") - - # Copy input files to input_data directory if provided - if input_data and "files" in input_data: - for file_info in input_data.get("files", []): - # Skip files we just created from extracted content - if file_info.get("id", "").startswith("extracted_"): - continue - - source_path = file_info.get("path", "") - logger.info(f"Attempting to copy file from: {source_path}") - logger.info(f"File exists: {os.path.exists(source_path)}") - if source_path and os.path.exists(source_path): - # Get just the filename - file_name = os.path.basename(source_path) - # Create destination path in input_data directory - dest_path = os.path.join(input_data_dir, file_name) - - try: - # Copy the file - shutil.copy2(source_path, dest_path) - logger.info(f"Copied file to input_data directory: {dest_path}") - except Exception as e: - logger.error(f"Error copying file {source_path}: {str(e)}") - - # Create a file for the code - code_id = uuid.uuid4().hex[:8] - code_file = os.path.join(self.temp_dir, f"code_{code_id}.py") - - # Write the code as-is without injecting additional loader code - with open(code_file, "w", encoding="utf-8") as f: - f.write(code) - - # Get Python executable - python_executable = self._get_python_executable(venv_path) - logger.info(f"Using Python executable: {python_executable}") - - # Execute code - try: - # Run the code from root dir - working_dir = os.path.dirname(code_file) # This should be the project root - logger.info(f"DEBUG PATH Root: {os.getcwd()} Code: {code_file} Working Dir: {working_dir}") - logger.debug(f"|{code}|") - process = subprocess.run( - [python_executable, code_file], - timeout=self.timeout, - capture_output=True, - text=True, - cwd=working_dir - ) - - # Process the output - stdout = process.stdout - stderr = process.stderr - - # Get result from stdout if available - result_data = None - if process.returncode == 0 and stdout: - try: - # Look for the last line that could be JSON - for line in reversed(stdout.strip().split('\n')): - line = line.strip() - if line and line[0] in '{[' and line[-1] in '}]': - try: - result_data = json.loads(line) - # Successfully parsed JSON result, use it - break - except json.JSONDecodeError: - # Not valid JSON, continue to next line - continue - except Exception as e: - logger.warning(f"Failed to parse result from stdout: {str(e)}") - - # Create result dictionary - execution_result = { - "success": process.returncode == 0, - "output": stdout, - "error": stderr if process.returncode != 0 else "", - "result": result_data, - "exit_code": process.returncode - } - - except subprocess.TimeoutExpired: - logger.error(f"Execution timed out after {self.timeout} seconds") - execution_result = { - "success": False, - "output": "", - "error": f"Execution timed out (timeout after {self.timeout} seconds)", - "result": None, - "exit_code": -1 - } - except Exception as e: - logger.error(f"Execution error: {str(e)}") - execution_result = { - "success": False, - "output": "", - "error": f"Execution error: {str(e)} for code {code}", - "result": None, - "exit_code": -1 - } - - # Clean up temporary code file - try: - if os.path.exists(code_file): - os.remove(code_file) - except Exception as e: - logger.warning(f"Error cleaning up temporary code file: {e}") - - return execution_result - - def cleanup(self): - """Clean up temporary resources.""" - # Skip cleanup for persistent environments - if self.is_persistent and self.workflow_id: - logger.info(f"Skipping cleanup for persistent environment of workflow {self.workflow_id}") - return - - # Clean up temporary directory - if self.temp_dir and os.path.exists(self.temp_dir): - try: - shutil.rmtree(self.temp_dir) - logger.info(f"Deleted temporary directory: {self.temp_dir}") - except Exception as e: - logger.warning(f"Could not delete temporary directory {self.temp_dir}: {e}") - - def __del__(self): - """Clean up during garbage collection.""" - self.cleanup() - - -class CoderAgent(BaseAgent): - """Agent for developing and executing Python code with auto-correction capabilities""" - - def __init__(self): - """Initialize the coder agent with proper type and capabilities""" - super().__init__() - - # Agent metadata - self.id = "coder" - self.type = "coder" - self.name = "Python Code Agent" - self.description = "Develops and executes Python code" - self.capabilities = "code_development,data_processing,file_processing,automation" - self.result_format = "python_code" - - # Initialize AI service - self.ai_service = None - - # Add document capabilities - self.supports_documents = True - self.document_capabilities = ["read", "reference", "create"] - self.required_context = ["workflow_id"] - self.document_handler = None - - # Initialize protocol - self.protocol = AgentCommunicationProtocol() - - # Init utilities - self.file_utils = FileUtils() - self.message_utils = MessageUtils() - - # Executor settings - self.executor_timeout = 60 # seconds - self.executor_memory_limit = 512 # MB - - # AI service settings - self.ai_temperature = 0.1 # Lower temperature for more deterministic code generation - self.ai_max_tokens = 2000 # Enough tokens for complex code - - # Auto-correction settings (new) - self.max_correction_attempts = 3 # Maximum number of correction attempts - self.correction_temperature = 0.1 # Even lower temperature for corrections - - def get_agent_info(self) -> Dict[str, Any]: - """Get agent information for agent registry""" - info = super().get_agent_info() - info.update({ - "metadata": { - "timeout": self.executor_timeout, - "memory_limit": self.executor_memory_limit, - "max_correction_attempts": self.max_correction_attempts - } - }) - return info - - def set_document_handler(self, document_handler): - """Set the document handler for file operations""" - self.document_handler = document_handler - - - async def process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Process a message to develop and execute Python code with auto-correction. - - Args: - message: The message to process - context: Additional context information - - Returns: - Response message - """ - # Extract workflow_id from context or message - workflow_id = context.get("workflow_id") if context else message.get("workflow_id", "unknown") - - # Get or create logging_utils - log_func = context.get("log_func") if context else None - logging_utils = LoggingUtils(workflow_id, log_func) - - # Create response message - response = { - "role": "assistant", - "content": "", - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "workflow_id": workflow_id, - "documents": [] - } - - # Send status update using protocol - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Starting code generation and execution", - sender_id=self.id, - status="in_progress", - progress=0.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - try: - # Extract content and documents - content = message.get("content", "") - documents = message.get("documents", []) - - code_to_execute = None - requirements = [] - - # Generate code based on the message content using AI - logging_utils.info("Generating new code with AI", "agents") - - # Log status update - 10% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Analyzing requirements and generating code", - sender_id=self.id, - status="in_progress", - progress=0.1, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Generate code using AI - code_to_execute, requirements = await self._generate_code_from_prompt(content, documents) - if not code_to_execute: - logging_utils.warning("AI could not generate code", "agents") - response["content"] = "I couldn't generate executable code based on your request. Please provide more detailed instructions." - self.message_utils.finalize_message(response) - return response - - logging_utils.info(f"Code generated with AI ({len(code_to_execute)} characters)", "agents") - - # Log status update - 30% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Code generated, preparing for execution", - sender_id=self.id, - status="in_progress", - progress=0.3, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Create code file document - code_doc_id = f"code_{uuid.uuid4()}" - code_filename = "generated_code.py" - - code_document = { - "id": code_doc_id, - "source": { - "type": "generated", - "id": code_doc_id, - "name": code_filename, - "content_type": "text/x-python", - "size": len(code_to_execute) - }, - "contents": [{ - "type": "text", - "text": code_to_execute, - "is_extracted": True - }] - } - - # Add code document to response - response["documents"].append(code_document) - logging_utils.info(f"Added code file '{code_filename}' to response", "agents") - - # Execute the code with auto-correction loop - if code_to_execute: - # Log status update - 40% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Setting up execution environment", - sender_id=self.id, - status="in_progress", - progress=0.4, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Prepare execution context - execution_context = { - "workflow_id": workflow_id, - "documents": documents, - "message": message, - "log_func": log_func - } - - # Log status update - 50% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Executing code", - sender_id=self.id, - status="in_progress", - progress=0.5, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Enhanced execution with auto-correction - result, attempts_info = await self._execute_with_auto_correction( - code_to_execute, - requirements, - execution_context, - content, # Original prompt/message - logging_utils - ) - - # Prepare response based on the final result (success or failure) - if result.get("success", False): - # Log status update - 80% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Code executed successfully, preparing results", - sender_id=self.id, - status="in_progress", - progress=0.8, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Code execution successful - output = result.get("output", "") - execution_result = result.get("result") - logging_utils.info("Code executed successfully", "execution") - - # Format response content - response_content = f"## Code executed successfully" - - # Add correction attempts info if any corrections were made - if attempts_info and len(attempts_info) > 1: - response_content += f" (after {len(attempts_info)-1} correction attempts)" - - response_content += "\n\n" - - # Include the executed code - response_content += f"### Final Executed Code\n\n```python\n{attempts_info[-1]['code']}\n```\n\n" - - # Include the output if available - if output: - response_content += f"### Output\n\n```\n{output}\n```\n\n" - - # Create document with results - data_document = self._create_document_from_result(execution_result) - if data_document: - response["documents"].append(data_document) - - # Include the execution result if available - if execution_result: - result_str = json.dumps(execution_result, indent=2) if isinstance(execution_result, (dict, list)) else str(execution_result) - response_content += f"### Result\n\n```\n{result_str}\n```\n\n" - - # Include correction history if any corrections were made - if attempts_info and len(attempts_info) > 1: - response_content += f"### Code Correction History\n\n" - for i, attempt in enumerate(attempts_info[:-1], 1): - response_content += f"**Attempt {i}:**\n\n" - response_content += f"```python\n{attempt['code']}\n```\n\n" - response_content += f"**Error:**\n\n```\n{attempt['error']}\n```\n\n" - - # Create a correction attempt document for each attempt - attempt_doc_id = f"correction_{uuid.uuid4()}" - attempt_filename = f"correction_attempt_{i}.py" - - attempt_document = { - "id": attempt_doc_id, - "source": { - "type": "generated", - "id": attempt_doc_id, - "name": attempt_filename, - "content_type": "text/x-python", - "size": len(attempt['code']) - }, - "contents": [{ - "type": "text", - "text": attempt['code'], - "is_extracted": True - }] - } - - # Add correction document to response - response["documents"].append(attempt_document) - logging_utils.info(f"Added correction attempt file '{attempt_filename}' to response", "agents") - - response["content"] = response_content - - # Process any files created by the code - if isinstance(execution_result, dict) and "created_files" in execution_result: - created_files = execution_result.get("created_files", []) - for file_info in created_files: - file_id = file_info.get("id") - if file_id: - logging_utils.info(f"Adding created file {file_info.get('name', file_id)} to documents", "files") - # Add file document to the response - doc = { - "id": f"doc_{uuid.uuid4()}", - "source": file_info, - "type": "file" - } - response["documents"].append(doc) - else: - # Code execution failed after all attempts - error = result.get("error", "Unknown error") - logging_utils.error(f"Error during code execution after all correction attempts: {error}", "execution") - - # Format error response - response_content = f"## Error during code execution\n\n" - - # Include correction attempts information - if attempts_info: - response_content += f"I made {len(attempts_info)} attempts to correct the code, but couldn't resolve all issues.\n\n" - - # Add the final attempt - response_content += f"### Final Code Attempt\n\n```python\n{attempts_info[-1]['code']}\n```\n\n" - response_content += f"### Final Error\n\n```\n{attempts_info[-1]['error']}\n```\n\n" - - # Add recommendation based on error - response_content += self.get_error_recommendation(error) - - # Add correction history - if len(attempts_info) > 1: - response_content += f"\n### Code Correction History\n\n" - for i, attempt in enumerate(attempts_info[:-1], 1): - response_content += f"**Attempt {i}:**\n\n" - response_content += f"```python\n{attempt['code']}\n```\n\n" - response_content += f"**Error:**\n\n```\n{attempt['error']}\n```\n\n" - - # Create a correction attempt document for each attempt - attempt_doc_id = f"correction_{uuid.uuid4()}" - attempt_filename = f"correction_attempt_{i}.py" - - attempt_document = { - "id": attempt_doc_id, - "source": { - "type": "generated", - "id": attempt_doc_id, - "name": attempt_filename, - "content_type": "text/x-python", - "size": len(attempt['code']) - }, - "contents": [{ - "type": "text", - "text": attempt['code'], - "is_extracted": True - }] - } - - # Add correction document to response - response["documents"].append(attempt_document) - logging_utils.info(f"Added correction attempt file '{attempt_filename}' to response", "agents") - else: - # Just show the code and error - response_content += f"### Executed Code\n\n```python\n{code_to_execute}\n```\n\n" - response_content += f"### Error\n\n```\n{error}\n```\n\n" - - # Add recommendation based on error - response_content += self.get_error_recommendation(error) - - response["content"] = response_content - else: - # No code to execute - response["content"] = "I couldn't find or generate executable code. Please provide Python code or explain your requirements more clearly." - - # Finalize response - self.message_utils.finalize_message(response) - - # Log completion - 100% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Code execution complete", - sender_id=self.id, - status="completed", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Log success - logging_utils.info("CoderAgent has successfully processed the request", "agents") - - return response - - except Exception as e: - error_msg = f"Error during processing by the CoderAgent: {str(e)}" - logging_utils.error(error_msg, "error") - - # Create error response - response["content"] = f"## Processing Error\n\n```\n{error_msg}\n\n{traceback.format_exc()}\n```" - self.message_utils.finalize_message(response) - - # Log error status - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Error during code execution: {str(e)}", - sender_id=self.id, - status="error", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "error", self.id, self.name) - - return response - - - - - def _create_document_from_result(self, execution_result, output_format="json"): - """ - Create a document object from execution results - - Args: - execution_result: The data returned from code execution - output_format: Desired format (json, csv, etc.) - - Returns: - Document object for passing to other agents - """ - if not execution_result: - return None - - doc_id = f"data_{uuid.uuid4()}" - - # Determine filename and content type based on the data - if isinstance(execution_result, pd.DataFrame): - # Handle DataFrame result - filename = "processed_data.csv" - content_type = "text/csv" - content = execution_result.to_csv(index=False) - elif isinstance(execution_result, dict) or isinstance(execution_result, list): - # Handle dictionary or list result - filename = "processed_data.json" - content_type = "application/json" - content = json.dumps(execution_result) - elif isinstance(execution_result, str): - # Try to determine if string is JSON, CSV, or plain text - if execution_result.strip().startswith('{') or execution_result.strip().startswith('['): - filename = "processed_data.json" - content_type = "application/json" - elif ',' in execution_result and '\n' in execution_result: - filename = "processed_data.csv" - content_type = "text/csv" - else: - filename = "processed_data.txt" - content_type = "text/plain" - content = str(execution_result) - else: - # Default case for other types - filename = "processed_data.txt" - content_type = "text/plain" - content = str(execution_result) - - # Create document object - document = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": filename, - "content_type": content_type, - }, - "contents": [{ - "type": "text", - "text": content, - "is_extracted": True - }] - } - - return document - - async def _execute_with_auto_correction( - self, - initial_code: str, - requirements: List[str], - context: Dict[str, Any], - original_prompt: str, - logging_utils: LoggingUtils = None - ) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: - """ - Execute code with automatic error correction and retries. - - Args: - initial_code: The initial Python code to execute - requirements: List of required packages - context: Additional context for execution - original_prompt: The original user request/prompt - logging_utils: Optional logging utility - - Returns: - Tuple of (final execution result, list of attempt info dictionaries) - """ - # Initialize tracking data - current_code = initial_code - current_requirements = requirements.copy() if requirements else [] - attempts_info = [] - - # Execute with correction loop - for attempt in range(1, self.max_correction_attempts + 1): - if logging_utils: - if attempt == 1: - logging_utils.info(f"Executing code (attempt {attempt}/{self.max_correction_attempts})", "execution") - else: - logging_utils.info(f"Executing corrected code (attempt {attempt}/{self.max_correction_attempts})", "execution") - - # Execute the current code version - result = await self._execute_code(current_code, current_requirements, context) - - # Record attempt information - attempts_info.append({ - "attempt": attempt, - "code": current_code, - "error": result.get("error", ""), - "success": result.get("success", False) - }) - - # Check if execution was successful - if result.get("success", False): - # Success! Return the result and attempt info - return result, attempts_info - - # Failed execution - check if we've reached the maximum attempt limit - if attempt >= self.max_correction_attempts: - if logging_utils: - logging_utils.warning(f"Maximum correction attempts ({self.max_correction_attempts}) reached, giving up", "execution") - break - - # Need to correct the code - generate a fix based on the error - error_message = result.get("error", "Unknown error") - - if logging_utils: - logging_utils.info(f"Attempting to fix code error: {error_message[:200]}...", "execution") - - # Generate corrected code - corrected_code, new_requirements = await self._generate_code_correction( - current_code, - error_message, - original_prompt, - current_requirements - ) - - # Update for next attempt - if corrected_code: - current_code = corrected_code - - # Add any new requirements - if new_requirements: - for req in new_requirements: - if req not in current_requirements: - current_requirements.append(req) - if logging_utils: - logging_utils.info(f"Added new requirement: {req}", "execution") - else: - # Could not generate correction, break out of the loop - if logging_utils: - logging_utils.warning("Could not generate code correction, giving up", "execution") - break - - # If we get here, all attempts failed - return the last result and attempt info - return result, attempts_info - - async def _generate_code_correction( - self, - code: str, - error_message: str, - original_prompt: str, - current_requirements: List[str] = None - ) -> Tuple[str, List[str]]: - """ - Generate a corrected version of code based on error messages. - - Args: - code: The code that produced errors - error_message: The error message to fix - original_prompt: The original task/requirements - current_requirements: List of currently required packages - - Returns: - Tuple of (corrected code, new requirements list) - """ - try: - # Create a detailed prompt for code correction - correction_prompt = f"""You need to fix an error in Python code. The code was written for this task: - -ORIGINAL TASK: -{original_prompt} - -CURRENT CODE: -```python -{code} -``` - -ERROR MESSAGE: -``` -{error_message} -``` - -CURRENT REQUIREMENTS: {', '.join(current_requirements) if current_requirements else "None"} - -Your task is to analyze the error and provide a corrected version of the code. -Focus specifically on fixing the error while preserving the original functionality. - -Common fixes might include: -- Fixing syntax errors (missing parentheses, indentation, etc.) -- Resolving import errors by adding appropriate requirements -- Correcting file paths or handling file not found errors -- Adding error handling for specific edge cases -- Fixing logical errors in the code - -FORMAT INSTRUCTIONS: -1. Provide ONLY the complete fixed Python code without ANY explanation -2. DO NOT include code block markers like ```python or ``` -3. DO NOT explain what the code does before or after it -4. DO NOT include any text that is not valid Python code -5. Start your response directly with the valid Python code -6. End your response with valid Python code - -If you need to add new required packages, place them in a specially formatted comment at the top of your code like this: -# REQUIREMENTS: package1,package2,package3 - -Your entire response must be valid Python that can be executed without modification. -""" - - # Create messages for the API - messages = [ - {"role": "system", "content": "You are a Python debugging expert. You provide ONLY clean, fixed Python code without any explanations, markdown formatting, or non-code text. Your response should be nothing but valid, fixed Python code that can be executed directly."}, - {"role": "user", "content": correction_prompt} - ] - - # Call the API with very low temperature for deterministic fixes - generated_content = await self.ai_service.call_api( - messages, - temperature=self.correction_temperature, - max_tokens=self.ai_max_tokens - ) - - # Clean the generated content to ensure it's only valid Python code - fixed_code = self._clean_code(generated_content) - - # Extract requirements from special comment at the top of the code - new_requirements = [] - for line in fixed_code.split('\n'): - if line.strip().startswith("# REQUIREMENTS:"): - req_str = line.replace("# REQUIREMENTS:", "").strip() - new_requirements = [r.strip() for r in req_str.split(',') if r.strip()] - break - - return fixed_code, new_requirements - - except Exception as e: - logging.error(f"Error generating code correction: {str(e)}", exc_info=True) - # Return None to indicate failure - return None, [] - - def _clean_code(self, code: str) -> str: - """ - Clean up code by removing markdown code block markers and other formatting artifacts. - - Args: - code: The code string to clean - - Returns: - Cleaned code string - """ - import re - - # Remove code block markers at beginning/end - code = re.sub(r'^```(?:python)?\s*', '', code) - code = re.sub(r'```\s*$', '', code) - - # Remove any trailing markdown code blocks that might have been added by the AI - lines = code.split('\n') - clean_lines = [] - - # Flag to track if we're in a trailing markdown section - in_trailing_markdown = False - - for line in reversed(lines): - stripped = line.strip() - - # Check if this line contains only backticks (``` or ` or ``) - if re.match(r'^`{1,3}$', stripped): - in_trailing_markdown = True - continue - - # Check if this is a markdown comment or note - if in_trailing_markdown and (stripped.startswith('#') or - stripped.lower().startswith('note:') or - stripped.lower().startswith('example:')): - continue - - # If we've reached actual code, stop considering trailing markdown - if stripped and not in_trailing_markdown: - in_trailing_markdown = False - - # Add this line if it's not part of trailing markdown - if not in_trailing_markdown: - clean_lines.insert(0, line) - - # Join the lines back together - clean_code = '\n'.join(clean_lines) - - # Final cleanup for any stray backticks - clean_code = re.sub(r'`{1,3}\s*$', '', clean_code) - - return clean_code.strip() - - async def _generate_code_from_prompt(self, prompt: str, documents: List[Dict[str, Any]]) -> Tuple[str, List[str]]: - """ - Generate Python code from a prompt using AI service. - - Args: - prompt: The prompt to generate code from - documents: Documents associated with the prompt - - Returns: - Tuple of (generated Python code, required packages) - """ - try: - # Prepare a prompt for code generation - ai_prompt = f"""Generate Python code to solve the following task: -{prompt} - -Available input files: -""" - # Add information about available documents - if documents: - for i, doc in enumerate(documents): - source = doc.get("source", {}) - doc_name = source.get("name", f"Document {i+1}") - doc_type = source.get("content_type", "unknown") - doc_id = source.get("id", "") - - ai_prompt += f"- {doc_name} (type: {doc_type}, id: {doc_id}, path: './input_data/{doc_name}')\n" - else: - ai_prompt += "No input files available.\n" - - ai_prompt += """ -IMPORTANT REQUIREMENTS: -1. Your code MUST define a 'result' variable to store the final output of your code. -2. At the end of your script, it should print or output the result variable. -3. Make your 'result' variable a dictionary or another JSON-serializable data structure that contains all relevant output. -4. Input files are accessible in the './input_data/' directory. -5. Keep code well-documented with comments explaining key operations. -6. Make your code complete and self-contained. -7. Include proper error handling. - -FORMAT INSTRUCTIONS: -- Provide ONLY the Python code without ANY introduction, explanation, or conclusion text -- DO NOT include code block markers like ```python or ``` -- DO NOT explain what the code does before or after it -- DO NOT include any text that is not valid Python code -- Start your response directly with valid Python code -- End your response with valid Python code - -For required packages, place them in a specially formatted comment at the top of your code one one line like this: -# REQUIREMENTS: pandas,numpy,matplotlib,requests - -Your entire response must be valid Python that can be executed without modification. -""" - - # Create messages for the API - messages = [ - {"role": "system", "content": "You are a Python code generator that provides ONLY clean, executable Python code without any explanations, markdown formatting, or non-code text. Your response should be nothing but valid Python code that can be executed directly."}, - {"role": "user", "content": ai_prompt} - ] - - # Call the API - logging.info(f"Calling AI API to generate code") - generated_content = await self.ai_service.call_api(messages, temperature=self.ai_temperature, max_tokens=self.ai_max_tokens) - - # Clean the generated content to ensure it's only valid Python code - code = self._clean_code(generated_content) - - # Extract requirements from special comment at the top of the code - requirements = [] - for line in code.split('\n'): - if line.strip().startswith("# REQUIREMENTS:"): - req_str = line.replace("# REQUIREMENTS:", "").strip() - requirements = [r.strip() for r in req_str.split(',') if r.strip()] - break - - return code, requirements - - except Exception as e: - logging.error(f"Error generating code with AI: {str(e)}", exc_info=True) - # Return basic error handling code and no requirements - error_str = str(e).replace('"', '\\"') - return f""" -# Error during code generation -print(f"An error occurred during code generation: {error_str}") -# Return an error result -result = {{"error": "Code generation failed", "message": "{error_str}"}} -""", [] - - async def _execute_code(self, code: str, requirements: List[str] = None, context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Execute Python code using the SimpleCodeExecutor. - - Args: - code: The Python code to execute - requirements: List of required packages - context: Additional context for execution - - Returns: - Result of code execution - """ - # Get workflow ID and set up logging - workflow_id = context.get("workflow_id", "") if context else "" - logging_utils = None - if "log_func" in context and workflow_id: - logging_utils = LoggingUtils(workflow_id, context.get("log_func")) - - if logging_utils: - logging_utils.info("Executing Python code", "execution") - if requirements: - logging_utils.info(f"Required packages: {', '.join(requirements)}", "execution") - - try: - # List of blocked packages for security - blocked_packages = [ - "cryptography", "flask", "django", "tornado", # Security risks - "tensorflow", "pytorch", "scikit-learn" # Resource intensive - ] - - # Initialize SimpleCodeExecutor with requirements and workflow_id for persistence - executor = SimpleCodeExecutor( - workflow_id=workflow_id, - timeout=self.executor_timeout, - max_memory_mb=self.executor_memory_limit, - requirements=requirements, - blocked_packages=blocked_packages, - ai_service = self.ai_service - ) - - # Prepare input data for the code - input_data = {"context": context, "workflow_id": workflow_id} - - # Add file references if available - if context and "documents" in context: - input_data["files"] = [ - { - "id": doc.get("source", {}).get("id", ""), - "name": doc.get("source", {}).get("name", ""), - "type": doc.get("source", {}).get("content_type", ""), - "path": doc.get("source", {}).get("path", "") # Full file path - } - for doc in context.get("documents", []) - if doc.get("source", {}).get("type") == "file" - ] - - # Extract document content from message but don't create files yet - if context and "message" in context and "content" in context["message"]: - message_content = context["message"]["content"] - - # Check if there's extracted document content - if "=== EXTRACTED DOCUMENT CONTENT ===" in message_content: - # Add a special field to input_data for extracted content - input_data["extracted_documents"] = [] - - # Split by the document marker pattern - pattern = r"--- (.*?) ---\s*" - import re - doc_sections = re.split(pattern, message_content) - - # Skip the first section (before any "--- doc ---" marker) - for i in range(1, len(doc_sections), 2): - if i+1 < len(doc_sections): - doc_name = doc_sections[i].strip() - doc_content = doc_sections[i+1].strip() - - # Store the extracted content to be processed by the executor - input_data["extracted_documents"].append({ - "name": doc_name, - "content": doc_content, - "type": "text/csv" if doc_name.endswith(".csv") else "text/plain" - }) - if logging_utils: - logging_utils.info(f"Extracted document content: {doc_name}", "execution") - - - # Execute the code - if logging_utils: - logging_utils.info(f"Executing code with input data containing {len(input_data.get('files', []))} files", "execution") - - result = executor.execute_code(code, input_data) - - # Log the execution results - if logging_utils: - if result.get("success", False): - logging_utils.info("Code executed successfully", "execution") - - # Log a preview of the output - output = result.get("output", "") - if output: - preview = output[:1000] + "..." if len(output) > 1000 else output - logging_utils.info(f"Output preview: {preview}", "execution") - - # Log a preview of the result - execution_result = result.get("result") - if execution_result: - if isinstance(execution_result, (dict, list)): - result_str = json.dumps(execution_result, indent=2) - preview = result_str[:1000] + "..." if len(result_str) > 1000 else result_str - else: - str_result = str(execution_result) - preview = str_result[:1000] + "..." if len(str_result) > 1000 else str_result - - logging_utils.info(f"Result preview: {preview}", "execution") - else: - # Log error information - error = result.get("error", "Unknown error") - logging_utils.error(f"Error during code execution: {error}", "execution") - - # Clean up non-persistent environments - if not executor.is_persistent: - executor.cleanup() - - return result - - except Exception as e: - error_message = f"Error during code execution: {str(e)}\n{traceback.format_exc()}" - - if logging_utils: - logging_utils.error(error_message, "error") - - return { - "success": False, - "output": "", - "error": error_message, - "result": None - } - - - def send_error_message(self, error_description: str, sender_id: str, receiver_id: str = None, context_id: str = None) -> AgentMessage: - """Send an error message using the protocol""" - return self.protocol.create_error_message( - error_description=error_description, - sender_id=sender_id, - receiver_id=receiver_id, - error_type="code_execution", - context_id=context_id - ) - - def send_result_message(self, result_content: str, sender_id: str, receiver_id: str, task_id: str, - output_data: Dict[str, Any] = None, context_id: str = None) -> AgentMessage: - """Send a result message using the protocol""" - return self.protocol.create_result_message( - result_content=result_content, - sender_id=sender_id, - receiver_id=receiver_id, - task_id=task_id, - output_data=output_data, - result_format="python_code", - context_id=context_id - ) - - # Unchanged error recommendation function - def get_error_recommendation(error_message: str) -> str: - """Generate recommendations based on error message.""" - if "ImportError" in error_message or "ModuleNotFoundError" in error_message: - return """ - ### Recommendation - The error indicates a missing Python module. Try using standard libraries or common data analysis modules. - """ - elif "PermissionError" in error_message: - return """ - ### Recommendation - The code doesn't have the necessary permissions to access files or directories. - """ - elif "SyntaxError" in error_message: - return """ - ### Recommendation - There's a syntax error in the code. Check for missing parentheses, quotes, colons, or indentation errors. - """ - elif "FileNotFoundError" in error_message: - return """ - ### Recommendation - A file could not be found. Check the file path and make sure the file exists. - """ - else: - return """ - ### Recommendation - To fix the error: - 1. Check the exact error message - 2. Simplify the code and test step by step - 3. Use try/except blocks for error-prone operations - """ - - -# Singleton instance -_coder_agent = None - -def get_coder_agent(): - """Returns a singleton instance of the Coder Agent""" - global _coder_agent - if _coder_agent is None: - _coder_agent = CoderAgent() - return _coder_agent \ No newline at end of file diff --git a/modules/agentservice_agent_creative.py b/modules/agentservice_agent_creative.py deleted file mode 100644 index c0870b7d..00000000 --- a/modules/agentservice_agent_creative.py +++ /dev/null @@ -1,399 +0,0 @@ -""" -Creative Agent for knowledge-based answers and creative content generation. -Handles open questions, documentation tasks, and special 'poweron' requests. -Based on the refactored Core-Module. -""" - -import logging -from typing import List, Dict, Any, Optional -import json - -from modules.agentservice_base import BaseAgent -from modules.agentservice_utils import MessageUtils, LoggingUtils -from modules.agentservice_protocol import AgentCommunicationProtocol - -logger = logging.getLogger(__name__) - -class CreativeAgent(BaseAgent): - """Agent for knowledge-based answers and creative content generation""" - - def __init__(self): - """Initialize the Creative Agent""" - super().__init__() - self.id = "creative" - self.name = "Creative Knowledge Assistant" - self.type = "knowledge" - self.description = "Provides knowledge-based answers, creates content, handles document processing, and responds to PowerOn requests" - - # Extended capabilities to explicitly cover document processing - self.capabilities = ("knowledge_sharing,content_creation,document_generation," - "creative_writing,poweron,document_processing," - "information_extraction,data_transformation," - "document_analysis,text_processing,table_creation," - "visual_information_processing,content_structuring") - - # Update result format to include tables - self.result_format = "Text,Document,Table" - - # Add enhanced document capabilities - self.supports_documents = True - self.document_capabilities = ["read", "create", "analyze", "extract", "transform"] - self.required_context = ["workflow_id"] - self.document_handler = None - - # Initialize AI service - self.ai_service = None - - # Initialize protocol - self.protocol = AgentCommunicationProtocol() - - # Initialize utilities - self.message_utils = MessageUtils() - - def get_agent_info(self) -> Dict[str, Any]: - """Get agent information for agent registry""" - info = super().get_agent_info() - info.update({ - "metadata": { - "specialties": [ - "creative_writing", - "documentation", - "knowledge", - "poweron", - "document_processing", - "information_extraction", - "content_transformation", - "table_generation", - "document_analysis" - ] - } - }) - return info - - def set_document_handler(self, document_handler): - """Set the document handler for file operations""" - self.document_handler = document_handler - - - async def old_process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Process a message and generate a creative or knowledge-based response. - Enhanced with improved document handling. - - Args: - message: The message to process - context: Additional context - - Returns: - The generated response - """ - # Extract workflow_id from context or message - workflow_id = context.get("workflow_id") if context and isinstance(context, dict) else None - if not workflow_id and isinstance(message, dict): - workflow_id = message.get("workflow_id", "unknown") - - # Create response structure early for fallback - response = { - "role": "assistant", - "content": "", - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "result_format": self.result_format, - "workflow_id": workflow_id - } - - # Safely create logging utils - log_func = None - logging_utils = None - try: - from modules.agentservice_utils import LoggingUtils - log_func = context.get("log_func") if context and isinstance(context, dict) else None - logging_utils = LoggingUtils(workflow_id, log_func) - except Exception as e: - # If we can't even create logging utils, use basic logging - logger.error(f"Error creating logging utils: {str(e)}") - - # Log function that works with or without logging_utils - def safe_log(message, level="info"): - try: - if logging_utils: - if level == "info": - logging_utils.info(message, "agents") - elif level == "warning": - logging_utils.warning(message, "agents") - elif level == "error": - logging_utils.error(message, "agents") - else: - if level == "info": - logger.info(message) - elif level == "warning": - logger.warning(message) - elif level == "error": - logger.error(message) - except Exception as log_err: - logger.error(f"Error in logging: {str(log_err)}") - - try: - safe_log("Starting to process request", "info") - - # Get the prompt from the message with safety check - prompt = "" - if isinstance(message, dict): - prompt = message.get("content", "") - - safe_log(f"Processing request: {prompt[:50]}...", "info") - - # Power-On handling with safety check - if prompt and "poweron" in prompt.lower(): - safe_log("Detected PowerOn keyword, generating specialized response", "info") - - poweron_prompt = f""" - Tell to the user in the language of their prompt a big big thank you, that they think for you being PowerOn. Tell them, how pleased you are, to be part of the PowerOn family, working to support humans for a better life. - - Then generate a short answer (1-2 sentences) to this question: {prompt} - """ - - try: - poweron_response = await self.ai_service.call_api([ - {"role": "system", "content": "You are a helpful assistant that is part of the PowerOn family."}, - {"role": "user", "content": poweron_prompt} - ]) - - response["content"] = poweron_response - safe_log("PowerOn response generated", "info") - return response - except Exception as api_err: - safe_log(f"Error calling API for PowerOn: {str(api_err)}", "error") - response["content"] = "I encountered an error while generating a PowerOn response. Please try again." - return response - - # Create system prompt - system_prompt = "You are a helpful, creative assistant specializing in knowledge sharing, content creation, and document processing." - - # Add conversation summarization capabilities - system_prompt += """ - When asked to summarize information, always consider: - 1. All provided document content - 2. The entire conversation history in the current workflow - 3. Any structured data that has been shared - - For summarization tasks specifically, make sure to analyze the complete context including previous messages in the conversation, not just the files or the current request. - """ - - if workflow_id and workflow_id != "unknown": - system_prompt += """ - You are currently operating within a workflow where multiple messages may have been exchanged. - When generating summaries or overviews, you must incorporate the content from previous messages - in this workflow as they contain valuable context and information. - """ - - # Safely check for documents - has_documents = False - document_count = 0 - - try: - if isinstance(message, dict) and "documents" in message: - documents = message.get("documents") - if documents is not None: - document_count = len(documents) - has_documents = document_count > 0 - safe_log(f"Message contains {document_count} documents", "info") - except Exception as doc_err: - safe_log(f"Error checking documents: {str(doc_err)}", "warning") - - # Initialize document variables - document_content = "" - document_texts = [] - document_names = [] - - # Process documents with extreme caution - if has_documents: - safe_log("Processing attached documents", "info") - - # Try document handler first - try: - if self.document_handler: - try: - document_content = self.document_handler.merge_document_contents(message) - if document_content: - safe_log("Successfully extracted document content with handler", "info") - else: - safe_log("Document handler returned empty content", "warning") - except Exception as handler_err: - safe_log(f"Error using document handler: {str(handler_err)}", "warning") - except Exception as err: - safe_log(f"General error with document handler: {str(err)}", "warning") - - # Fallback: manual extraction (very cautious) - try: - documents = message.get("documents", []) or [] - - for i, doc in enumerate(documents): - if doc is None: - safe_log(f"Document at index {i} is None", "warning") - continue - - try: - # Process source - source = None - if isinstance(doc, dict): - source = doc.get("source") - - # Get name - doc_name = "Document" - if isinstance(source, dict): - doc_name = source.get("name", f"Document {i+1}") - - document_names.append(doc_name) - safe_log(f"Processing document: {doc_name}", "info") - - # Get contents - contents = [] - if isinstance(doc, dict): - contents = doc.get("contents", []) or [] - - doc_text = "" - for content_item in contents: - if content_item is None: - continue - - if isinstance(content_item, dict) and content_item.get("type") == "text": - text = content_item.get("text", "") - if text: - doc_text = text - document_texts.append(doc_text) - safe_log(f"Found text content in {doc_name}", "info") - break - - # Handle empty content - if not doc_text: - safe_log(f"No text content found in {doc_name}", "warning") - placeholder = f"[This appears to be a document named '{doc_name}', but I couldn't extract its content]" - document_texts.append(placeholder) - - except Exception as doc_err: - safe_log(f"Error processing individual document: {str(doc_err)}", "warning") - except Exception as docs_err: - safe_log(f"Error in document processing loop: {str(docs_err)}", "warning") - - # Combine prompt with documents safely - full_prompt = prompt - - try: - if document_content: - full_prompt = f"{prompt}\n\n### Reference Documents:\n{document_content}" - safe_log("Using document handler content", "info") - elif document_texts and document_names: - # Use only corresponding pairs of names and texts - docs_content = "" - min_length = min(len(document_names), len(document_texts)) - - for i in range(min_length): - name = document_names[i] - text = document_texts[i] - docs_content += f"\n\n### Document: {name}\n{text}" - - if docs_content: - full_prompt = f"{prompt}\n\n{docs_content}" - safe_log("Using manually extracted content", "info") - else: - safe_log("No document content could be added", "warning") - else: - safe_log("No document content available to add to prompt", "info") - except Exception as combine_err: - safe_log(f"Error combining prompt with documents: {str(combine_err)}", "warning") - - # Call AI API - try: - safe_log("Calling AI service", "info") - - content = await self.ai_service.call_api([ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": full_prompt} - ]) - - response["content"] = content - safe_log("Response successfully generated", "info") - - except Exception as api_err: - safe_log(f"Error calling AI API: {str(api_err)}", "error") - response["content"] = f"I encountered an error while processing your request. Please try again or rephrase your question." - - return response - - except Exception as e: - # Ultra-safe error handling - error_msg = f"Error generating response: {str(e)}" - try: - if logging_utils: - logging_utils.error(error_msg, "error") - else: - logger.error(error_msg) - except: - logger.error(f"Critical error in error handling: {error_msg}") - - response["content"] = f"I encountered an error while processing your request: {str(e)}" - return response - - - async def process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Direct message processing function that focuses on properly handling the user's request. - """ - # Extract workflow_id and setup response - workflow_id = "unknown" - if context and isinstance(context, dict) and "workflow_id" in context: - workflow_id = context["workflow_id"] - elif message and isinstance(message, dict) and "workflow_id" in message: - workflow_id = message["workflow_id"] - - response = { - "role": "assistant", - "content": "", - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "result_format": "Text", - "workflow_id": workflow_id - } - - try: - # Extract the user's message directly - user_message = "" - if isinstance(message, dict) and "content" in message: - user_message = message["content"] - - # Ensure we have something to process - if not user_message: - response["content"] = "Please provide a message for me to respond to." - return response - - # Simple system prompt that focuses on direct response to the user's request - system_prompt = """You are a helpful, creative assistant. - Respond directly to the user's request without referencing any workflow or system context. - Focus only on providing a direct, helpful response to the specific question or request.""" - - # Process with AI - content = await self.ai_service.call_api([ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": user_message} - ]) - - response["content"] = content - return response - - except Exception as e: - logger.error(f"Error in process_message: {str(e)}") - response["content"] = f"I encountered an error while processing your request: {str(e)}" - return response - -# Singleton-Instanz -_creative_agent = None - -def get_creative_agent(): - """Returns a singleton instance of the Creative Agent""" - global _creative_agent - if _creative_agent is None: - _creative_agent = CreativeAgent() - return _creative_agent \ No newline at end of file diff --git a/modules/agentservice_agent_documentation.py b/modules/agentservice_agent_documentation.py deleted file mode 100644 index f7a3a34c..00000000 --- a/modules/agentservice_agent_documentation.py +++ /dev/null @@ -1,574 +0,0 @@ -""" -Dokumentations-Agent für die Erstellung von Dokumentation, Berichten und strukturierten Inhalten. -Verwendet einen adaptiven Prozess zur Erstellung hochwertiger Dokumentation basierend auf der Komplexität des Auftrags. -Angepasst für das refaktorisierte Core-Modul und AgentCommunicationProtocol. -""" - -import logging -import json -import re -import traceback -from typing import List, Dict, Any, Optional, Tuple, Union -from datetime import datetime -import uuid - -from modules.agentservice_base import BaseAgent -from modules.agentservice_utils import WorkflowUtils, MessageUtils, LoggingUtils -from modules.agentservice_protocol import AgentMessage, AgentCommunicationProtocol -from modules.agentservice_filemanager import FileManager # Import the file manager - -logger = logging.getLogger(__name__) - -class DocumentationAgent(BaseAgent): - """Agent for creating documentation and structured content""" - - def __init__(self): - """Initialize the documentation agent""" - super().__init__() - self.id = "documentation_agent" - self.name = "Documentation Specialist" - self.type = "documentation" - self.description = "Creates documentation and structured content" - self.capabilities = "report_generation,documentation,content_structuring,technical_writing,knowledge_organization" - self.result_format = "FormattedDocument" - - # Initialize AI service - self.ai_service = None - - # Initialize document handler - self.document_handler = None - - # Document capabilities - self.supports_documents = True - self.document_capabilities = ["read", "reference", "create"] - self.required_context = ["document_purpose", "target_audience"] - - # Initialize protocol - self.protocol = AgentCommunicationProtocol() - - # Initialize utilities - self.message_utils = MessageUtils() - - # Track the latest generated document - self.last_document = {} - - def get_agent_info(self) -> Dict[str, Any]: - """Get agent information for agent registry""" - info = super().get_agent_info() - info.update({ - "metadata": { - "document_types": ["manual", "report", "process", "presentation", "document"], - "formats": ["markdown", "text"] - } - }) - return info - - def set_document_handler(self, document_handler): - """Set the document handler for file operations""" - self.document_handler = document_handler - - async def process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Process a message and create documentation. - - Args: - message: Input message - context: Optional context - - Returns: - Response with documentation - """ - # Extract workflow_id from context or message - workflow_id = context.get("workflow_id") if context else message.get("workflow_id", "unknown") - - # Get or create logging_utils - log_func = context.get("log_func") if context else None - logging_utils = LoggingUtils(workflow_id, log_func) - - # Create response structure - response = { - "role": "assistant", - "content": "", - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "result_format": self.result_format, - "workflow_id": workflow_id, - "documents": [] - } - - try: - # Initial status update - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Starting document creation", - sender_id=self.id, - status="in_progress", - progress=0.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Extract task from message - task = message.get("content", "") - - # Detect document type - 10% progress - document_type = self._detect_document_type(task) - logging_utils.info(f"Creating {document_type} documentation", "execution") - - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Identified document type: {document_type}", - sender_id=self.id, - status="in_progress", - progress=0.1, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Process any attached documents - 30% progress - document_context = "" - if message.get("documents"): - logging_utils.info("Processing reference documents", "execution") - - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Processing reference documents", - sender_id=self.id, - status="in_progress", - progress=0.2, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - document_context = await self._process_documents(message) - - # Update progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Reference documents processed", - sender_id=self.id, - status="in_progress", - progress=0.3, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Enhanced prompt with document context - enhanced_prompt = f"{task}\n\n{document_context}" - - # Assess complexity - 40% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Assessing document complexity", - sender_id=self.id, - status="in_progress", - progress=0.4, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - is_complex = await self._assess_complexity(enhanced_prompt) - complexity_type = "complex" if is_complex else "simple" - logging_utils.info(f"Document complexity assessment: {complexity_type}", "execution") - - # Generate title - 50% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Generating document title", - sender_id=self.id, - status="in_progress", - progress=0.5, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - title = await self._generate_title(enhanced_prompt, document_type) - logging_utils.info(f"Document title: {title}", "execution") - - # Update progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Generating {document_type}: {title}", - sender_id=self.id, - status="in_progress", - progress=0.6, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Generate content based on complexity - 70% progress - if is_complex: - # For complex documents, use the AI service with enhanced prompt - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Creating complex {document_type} document: {title}", - sender_id=self.id, - status="in_progress", - progress=0.7, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - content = await self._generate_complex_document(enhanced_prompt, document_type, title) - logging_utils.info("Complex document generated", "execution") - else: - # For simple documents, use direct generation - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Creating simple {document_type} document: {title}", - sender_id=self.id, - status="in_progress", - progress=0.7, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - content = await self._generate_simple_document(enhanced_prompt, document_type, title) - logging_utils.info("Simple document generated", "execution") - - # Finalize document - 90% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Finalizing document", - sender_id=self.id, - status="in_progress", - progress=0.9, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Create a document artifact if document handler is available - if self.document_handler: - doc_id = f"doc_{uuid.uuid4()}" - document = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": title, - "content_type": "text/markdown", - "size": len(content) - }, - "contents": [ - { - "type": "text", - "text": content, - "is_extracted": True - } - ] - } - - # Add document to response - response["documents"].append(document) - - # Store the latest document - self.last_document = document - - # Update response content to reference the document - response["content"] = f"I've created a document titled '{title}' that contains the requested information. The document is attached to this message." - - # If protocol message is required, send it - if context and context.get("require_protocol_message"): - result_message = self.send_document_result( - document_title=title, - document_content=content, - sender_id=self.id, - receiver_id=context.get("receiver_id", "workflow"), - context_id=workflow_id - ) - # Just log the message creation - logging_utils.info(f"Created protocol result message: {result_message.id}", "execution") - else: - # If no document handler, just put content in response - response["content"] = content - - # Final progress update - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Document creation completed", - sender_id=self.id, - status="completed", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - return response - - except Exception as e: - error_msg = f"Error in documentation agent: {str(e)}" - logging_utils.error(error_msg, "error") - - # Create error response using protocol - error_message = self.protocol.create_error_message( - error_description=error_msg, - sender_id=self.id, - error_type="documentation", - error_details={"traceback": traceback.format_exc()}, - context_id=workflow_id - ) - - # Log error status - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Error creating documentation: {str(e)}", - sender_id=self.id, - status="error", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "error", self.id, self.name) - - # Set error in response - response["content"] = f"## Error creating documentation\n\n{error_msg}\n\n```\n{traceback.format_exc()}\n```" - response["status"] = "error" - - return response - - # Helper method to process document content with enhanced logging - async def _process_documents(self, message: Dict[str, Any]) -> str: - """Process documents in the message with detailed logging""" - if not message.get("documents"): - return "" - - document_context = "" - - if self.document_handler: - # Use document handler to merge contents - document_context = self.document_handler.merge_document_contents(message) - else: - # Manual processing - for document in message.get("documents", []): - source = document.get("source", {}) - doc_name = source.get("name", "unnamed") - - document_context += f"\n\n--- {doc_name} ---\n" - - for content in document.get("contents", []): - if content.get("type") == "text": - document_context += content.get("text", "") - - # Log summary of processed documents - doc_count = len(message.get("documents", [])) - context_size = len(document_context) - - logger.info(f"Processed {doc_count} documents, extracted {context_size} characters of context") - - return document_context - - - - async def _assess_complexity(self, task: str) -> bool: - """ - Assess task complexity to determine document structure. - - Args: - task: The task description - - Returns: - True if complex document needed, False otherwise - """ - if not self.ai_service: - # Default to complex if no AI service - return True - - prompt = f""" - Analyze this task and determine if it requires a complex or simple document structure: - - {task} - - Respond with only "COMPLEX" or "SIMPLE". - """ - - try: - response = await self.ai_service.call_api([ - {"role": "system", "content": "You determine document complexity requirements."}, - {"role": "user", "content": prompt} - ]) - - return "COMPLEX" in response.upper() - except Exception: - # Default to complex on error - return True - - async def _generate_title(self, task: str, document_type: str) -> str: - """ - Generate a title for the document. - - Args: - task: The task description - document_type: Type of document - - Returns: - Generated title - """ - if not self.ai_service: - # Default title if no AI service - return f"{document_type.capitalize()} Document" - - prompt = f""" - Create a concise, professional title for this {document_type}: - - {task} - - Respond with ONLY the title, nothing else. - """ - - try: - title = await self.ai_service.call_api([ - {"role": "system", "content": "You create document titles."}, - {"role": "user", "content": prompt} - ]) - - # Clean up the title - return title.strip('"\'#*- \n\t') - except Exception: - # Default title on error - return f"{document_type.capitalize()} Document" - - async def _generate_complex_document(self, task: str, document_type: str, title: str) -> str: - """ - Generate a complex document with structure. - - Args: - task: The task description - document_type: Type of document - title: Document title - - Returns: - Generated document content - """ - if not self.ai_service: - return f"# {title}\n\nUnable to generate complex document: AI service not available." - - prompt = f""" - Create a comprehensive, well-structured {document_type} titled "{title}" based on: - - {task} - - The document should include: - 1. A clear introduction with purpose and scope - 2. Logically organized sections with headings - 3. Detailed content with examples and evidence - 4. A conclusion with key takeaways - 5. Appropriate formatting using Markdown - - Format the document in Markdown with proper headings, lists, and emphasis. - """ - - try: - content = await self.ai_service.call_api([ - {"role": "system", "content": "You create comprehensive, well-structured documentation."}, - {"role": "user", "content": prompt} - ]) - - # Ensure title is at the top - if not content.strip().startswith("# "): - content = f"# {title}\n\n{content}" - - return content - except Exception as e: - return f"# {title}\n\nError generating document: {str(e)}" - - async def _generate_simple_document(self, task: str, document_type: str, title: str) -> str: - """ - Generate a simple document without complex structure. - - Args: - task: The task description - document_type: Type of document - title: Document title - - Returns: - Generated document content - """ - if not self.ai_service: - return f"# {title}\n\nUnable to generate document: AI service not available." - - prompt = f""" - Create a concise, focused {document_type} titled "{title}" based on: - - {task} - - The document should be clear, precise, and to the point without complex chapter structure. - Format using Markdown with appropriate headings and formatting. - """ - - try: - content = await self.ai_service.call_api([ - {"role": "system", "content": "You create concise, focused documentation."}, - {"role": "user", "content": prompt} - ]) - - # Ensure title is at the top - if not content.strip().startswith("# "): - content = f"# {title}\n\n{content}" - - return content - except Exception as e: - return f"# {title}\n\nError generating document: {str(e)}" - - def _detect_document_type(self, message: str) -> str: - """ - Detect document type from the message. - - Args: - message: User message - - Returns: - Detected document type - """ - message = message.lower() - - if any(term in message for term in ["manual", "guide", "instruction", "tutorial"]): - return "manual" - elif any(term in message for term in ["report", "analysis", "assessment", "review"]): - return "report" - elif any(term in message for term in ["process", "workflow", "procedure", "steps"]): - return "process" - elif any(term in message for term in ["presentation", "slides", "deck"]): - return "presentation" - else: - return "document" - - def send_document_result(self, document_title: str, document_content: str, - sender_id: str, receiver_id: str, context_id: str = None) -> AgentMessage: - """Send a document result using the protocol""" - metadata = { - "document_type": self._detect_document_type(document_content), - "title": document_title, - "created_at": datetime.now().isoformat() - } - - return self.protocol.create_result_message( - result_content=document_content, - sender_id=sender_id, - receiver_id=receiver_id, - task_id=f"doc_{uuid.uuid4()}", - output_data=metadata, - result_format=self.result_format, - context_id=context_id - ) - - def send_error_message(self, error_description: str, sender_id: str, receiver_id: str = None, - context_id: str = None) -> AgentMessage: - """Send an error message using the protocol""" - return self.protocol.create_error_message( - error_description=error_description, - sender_id=sender_id, - receiver_id=receiver_id, - error_type="documentation_error", - error_details={"timestamp": datetime.now().isoformat()}, - context_id=context_id - ) - -# Singleton instance -_documentation_agent = None - -def get_documentation_agent(): - """Returns a singleton instance of the documentation agent""" - global _documentation_agent - if _documentation_agent is None: - _documentation_agent = DocumentationAgent() - return _documentation_agent \ No newline at end of file diff --git a/modules/agentservice_agent_webcrawler.py b/modules/agentservice_agent_webcrawler.py deleted file mode 100644 index 491bf654..00000000 --- a/modules/agentservice_agent_webcrawler.py +++ /dev/null @@ -1,1056 +0,0 @@ -""" -WebCrawler-Agent for research and retrieval of information from the web. -Adapted for the refactored Core-Module with language-agnostic detection. -""" - -import json -import logging -import time -import traceback -from typing import List, Dict, Any, Optional -from urllib.parse import quote_plus, unquote - -from bs4 import BeautifulSoup -import requests -from modules.agentservice_base import BaseAgent -from modules.agentservice_utils import MessageUtils, LoggingUtils -from modules.agentservice_protocol import AgentCommunicationProtocol -from modules.utility import APP_CONFIG - -logger = logging.getLogger(__name__) - -class WebcrawlerAgent(BaseAgent): - - """Agent for Web Research and Information Retrieval""" - - def __init__(self): - """Initialize the WebCrawler Agent""" - super().__init__() - self.id = "webcrawler" - self.name = "Webscraper" - self.type = "scraper" - self.description = "Researches information on the web" - self.capabilities = "web_search,information_retrieval,data_collection,source_verification,content_integration" - self.result_format = "SearchResults" - - # Add enhanced document capabilities - self.supports_documents = True - self.document_capabilities = ["read", "create"] - self.required_context = ["workflow_id"] - self.document_handler = None - - # Initialize AI service - self.ai_service = None - - # Initialize protocol - self.protocol = AgentCommunicationProtocol() - - # Initialize utility classes - self.message_utils = MessageUtils() - - # Web-Crawling configuration - self.max_url = int(APP_CONFIG.get("Connector_AiWebscraping_MAX_URLS")) - self.max_key = int(APP_CONFIG.get("Connector_AiWebscraping_MAX_SEARCH_KEYWORDS")) - self.max_result = int(APP_CONFIG.get("Connector_AiWebscraping_MAX_SEARCH_RESULTS")) - self.timeout = int(APP_CONFIG.get("Connector_AiWebscraping_TIMEOUT")) - - def get_agent_info(self) -> Dict[str, Any]: - """Get agent information for agent registry""" - info = super().get_agent_info() - info.update({ - "metadata": { - "max_url": self.max_url, - "max_result": self.max_result, - "timeout": self.timeout - } - }) - return info - - def set_document_handler(self, document_handler): - """Set the document handler for file operations""" - self.document_handler = document_handler - - async def process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Process a message and conduct web research if appropriate. - - Args: - message: The message to process - context: Additional context - - Returns: - The generated response or rejection if not a web research request - """ - # Extract workflow_id from context or message - workflow_id = context.get("workflow_id") if context else message.get("workflow_id", "unknown") - - # Get or create logging_utils - log_func = context.get("log_func") if context else None - logging_utils = LoggingUtils(workflow_id, log_func) - - # Create response structure - response = { - "role": "assistant", - "content": "", - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "result_format": self.result_format, - "workflow_id": workflow_id - } - - try: - # Get the query from the message - prompt = await self.get_prompt(message) - - # Check if this is explicitly a web research request using AI - is_web_research = await self._is_web_research_request_ai(prompt) - - if not is_web_research: - # Reject non-web research requests - logging_utils.info("Request rejected: not a web research task", "agents") - response["content"] = "This request doesn't appear to require web research. Redirecting to a more appropriate agent." - response["status"] = "rejected" - response["rejection_reason"] = "not_web_research" - return response - - # Continue with web research process - logging_utils.info(f"Web research for: {prompt[:50]}...", "agents") - - # Send status update using protocol - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Starting web research", - sender_id=self.id, - status="in_progress", - progress=0.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Update progress using protocol - 10% for starting the query analysis - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Analyzing search strategy for: {prompt[:30]}...", - sender_id=self.id, - status="in_progress", - progress=0.1, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Prepare the web query strategy - try: - # Log progress - 20% for query strategy preparation - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Creating search strategy", - sender_id=self.id, - status="in_progress", - progress=0.2, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Get the query strategy - content_text = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You are a web research expert who develops precise search strategies." - }, - { - "role": "user", - "content": f"""Create a comprehensive web research strategy for the task = '{prompt.replace("'","")}'. Return the results as a Python dictionary with these specific keys. If specific url are provided and the task requires analysis only on the provided url, then leave 'skey' open. - - 'url': A list of maximum {self.max_url} specific URLs extracted from the task string. - - 'skey': A list of maximum {self.max_key} key sentences to search for on the web. These should be precise, diverse, and targeted to get the most relevant information. - - Format your response as a valid json object with these two keys. Do not include any explanatory text or markdown outside of the object definition. - """ - } - ] - ) - - # Try to parse the JSON result - if content_text.startswith("```json"): - # Find the end of the JSON block - end_marker = "```" - end_index = content_text.rfind(end_marker) - if end_index != -1: - # Extract the JSON content without the markdown markers - content_text = content_text[7:end_index].strip() - - try: - logger.info(f"Valid json received: {str(content_text)}") - pjson = json.loads(content_text) - - # Log parsed search strategy - search_keys = pjson.get("skey", []) - search_urls = pjson.get("url", []) - - if search_keys: - logging_utils.info(f"Searching for {len(search_keys)} key terms: {', '.join(search_keys[:2])}...", "agents") - - if search_urls: - logging_utils.info(f"Searching in {len(search_urls)} direct URLs: {', '.join(search_urls[:2])}...", "agents") - - # Log progress - 30% for starting the search - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Starting web search", - sender_id=self.id, - status="in_progress", - progress=0.3, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Execute the search - results = [] - total_tasks = len(search_keys) + len(search_urls) - tasks_completed = 0 - - # Process search keywords - for keyword in search_keys: - logging_utils.info(f"Searching web for: '{keyword}'", "agents") - - # Log specific keyword search progress - if log_func: - progress_pct = 0.3 + (0.5 * (tasks_completed / total_tasks)) - status_message = self.protocol.create_status_update_message( - status_description=f"Searching for: '{keyword}'", - sender_id=self.id, - status="in_progress", - progress=progress_pct, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - keyword_results = self.search_web(keyword) - results.extend(keyword_results) - logging_utils.info(f"Found: {len(keyword_results)} results for '{keyword}'", "agents") - - tasks_completed += 1 - - # Process direct URLs - for url in search_urls: - logging_utils.info(f"Extracting content from: {url}", "agents") - - # Log specific URL extraction progress - if log_func: - progress_pct = 0.3 + (0.5 * (tasks_completed / total_tasks)) - status_message = self.protocol.create_status_update_message( - status_description=f"Reading URL: {url}", - sender_id=self.id, - status="in_progress", - progress=progress_pct, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - soup = self.read_url(url) - - # Extract title from the page if it exists - if isinstance(soup, BeautifulSoup): - title_tag = soup.find('title') - title = title_tag.text.strip() if title_tag else "No title" - - # Alternative: You could also look for h1 tags if the title tag is missing - if title == "No title": - h1_tag = soup.find('h1') - if h1_tag: - title = h1_tag.text.strip() - else: - # Handle the case where soup is an error message string - title = "Error fetching page" - - result = self.parse_result(soup, title, url) - results.append(result) - logging_utils.info(f"Extracted: '{title}' from {url}", "agents") - - tasks_completed += 1 - - # Log progress - 80% for processing results - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Analyzing {len(results)} search results", - sender_id=self.id, - status="in_progress", - progress=0.8, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Process results for the final output - logging_utils.info(f"Analyzing {len(results)} web results", "agents") - - # Generate summaries for each result - processed_results = [] - for i, result in enumerate(results): - result_data_limited = self.limit_text_for_api(result['data'], max_tokens=int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) - - # Log individual result processing - logging_utils.info(f"Analyzing result {i+1}/{len(results)}: {result['title'][:30]}...", "agents") - - web_answer_instructions = f""" - Summarize this search result according to the original request in approximately 2000 characters. Original request = '{prompt.replace("'","")}' - Focus on the most important insights and connect them to the original request. You can skip any introduction. - Extract only relevant and high-quality information related to the request, and present it in a clear format. Provide a balanced view of the researched information. - - Here is the search result: - {result_data_limited} - """ - - content_summary = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You are an information analyst who precisely and relevantly summarizes web content." - }, - { - "role": "user", - "content": web_answer_instructions - } - ] - ) - - # Limit summary to ~2000 characters - content_summary = content_summary[:2000] - - processed_result = { - "title": result['title'], - "url": result['url'], - "snippet": result['snippet'], - "summary": content_summary - } - - processed_results.append(processed_result) - - # Log progress - 90% for creating final summary - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Creating overall summary", - sender_id=self.id, - status="in_progress", - progress=0.9, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - # Create the final combined summary - all_summaries = "\n\n".join([r["summary"] for r in processed_results]) - all_summaries_limited = self.limit_text_for_api(all_summaries, max_tokens=int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) - - logging_utils.info("Creating overall summary of web research", "agents") - - final_summary = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You create concise summaries of research findings." - }, - { - "role": "user", - "content": f"Please summarize these findings in 5-6 sentences: {all_summaries_limited}\n" - } - ] - ) - - # Get the language of the request to use for result headers - request_language_analysis = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You determine the language of a text and return only the language name." - }, - { - "role": "user", - "content": f"What language is this text in? Only respond with the language name: {prompt}" - } - ] - ) - - # Get headers in the right language - headers = await self._get_localized_headers(request_language_analysis.strip()) - - # Format the final result - final_result = f"## {headers['web_research_results']}\n\n### {headers['summary']}\n{final_summary}\n\n### {headers['detailed_results']}\n" - - for i, result in enumerate(processed_results, 1): - final_result += f"\n\n[{i}] {result['title']}\n{headers['url']}: {result['url']}\n{headers['snippet']}: {result['snippet']}\n{headers['content']}: {result['summary']}" - - # Set the content in the response - response["content"] = final_result - - # Log completion - 100% progress - if log_func: - status_message = self.protocol.create_status_update_message( - status_description="Web research completed", - sender_id=self.id, - status="completed", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "info", self.id, self.name) - - logging_utils.info("Web research successfully completed", "agents") - - return response - - except json.JSONDecodeError as e: - logging_utils.error(f"Error parsing JSON data: {e}", "error") - - # Fallback for JSON parse error - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Error parsing search strategy: {str(e)}", - sender_id=self.id, - status="error", - progress=0.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "error", self.id, self.name) - - # Use a simple fallback approach - logging_utils.info("Using fallback search strategy with direct query", "agents") - - # Perform a direct search with the original query - results = self.search_web(prompt) - - # Process and format results directly - if results: - result_text = "## Web Research Results (Fallback Mode)\n\n" - - for i, result in enumerate(results, 1): - result_text += f"### [{i}] {result['title']}\n" - result_text += f"URL: {result['url']}\n" - result_text += f"Snippet: {result['snippet']}\n\n" - - response["content"] = result_text - else: - response["content"] = "## Web Research Results\n\nNo relevant results were found." - - return response - - except Exception as e: - error_msg = f"Error during web research: {str(e)}" - logging_utils.error(error_msg, "error") - - # Create error response using protocol - error_message = self.protocol.create_error_message( - error_description=error_msg, - sender_id=self.id, - error_type="web_search", - error_details={"traceback": traceback.format_exc()}, - context_id=workflow_id - ) - - # Log error status - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Error during web research: {str(e)}", - sender_id=self.id, - status="error", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "error", self.id, self.name) - - response["content"] = f"## Error during web research\n\n{error_msg}\n\n```\n{traceback.format_exc()}\n```" - - return response - - except Exception as e: - error_msg = f"Error during web research: {str(e)}" - logging_utils.error(error_msg, "error") - - # Create error response using protocol - error_message = self.protocol.create_error_message( - error_description=error_msg, - sender_id=self.id, - error_type="web_search", - error_details={"traceback": traceback.format_exc()}, - context_id=workflow_id - ) - - # Log error status - if log_func: - status_message = self.protocol.create_status_update_message( - status_description=f"Error during web research: {str(e)}", - sender_id=self.id, - status="error", - progress=1.0, - context_id=workflow_id - ) - log_func(workflow_id, status_message.content, "error", self.id, self.name) - - response["content"] = f"## Error during web research\n\n{error_msg}\n\n```\n{traceback.format_exc()}\n```" - - return response - - async def _is_web_research_request_ai(self, prompt: str) -> bool: - """ - Uses AI to determine if a prompt requires web research, making it language-agnostic. - - Args: - prompt: The user prompt - - Returns: - True if this is explicitly a web research request, False otherwise - """ - if not self.ai_service: - # Fallback to simpler detection if AI service isn't available - return self._simple_web_detection(prompt) - - try: - # Create a prompt to analyze whether this is a web research request - analysis_prompt = f""" - Analyze the following request and determine if it explicitly requires web research or online information. - - REQUEST: {prompt} - - A request requires web research if: - 1. It explicitly asks to search for information online - 2. It contains URLs or references to websites - 3. It requests current information that would be available on the web - 4. It asks to find information from web sources - 5. It implicitly requires up-to-date information from the internet - - ONLY respond with a single word - either "YES" if web research is required, or "NO" if it is not. - DO NOT include any explanation, just the answer YES or NO. - """ - - # Call AI to analyze - response = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You determine if a request requires web research. Always answer with only YES or NO." - }, - { - "role": "user", - "content": analysis_prompt - } - ] - ) - - # Clean the response - response = response.strip().upper() - - # Check if the response indicates it's a web research task - if "YES" in response: - return True - else: - return False - - except Exception as e: - # Log error but don't fail, fall back to simpler detection - logger.warning(f"Error using AI to detect web research request: {str(e)}") - return self._simple_web_detection(prompt) - - def _simple_web_detection(self, prompt: str) -> bool: - """ - Simpler fallback method to detect web research requests based on URLs. - - Args: - prompt: The user prompt - - Returns: - True if there are clear URL indicators, False otherwise - """ - # URLs in the prompt strongly indicate web research - url_indicators = ["http://", "https://", "www.", ".com", ".org", ".net", ".edu", ".gov"] - - # Check for URL patterns in the prompt - contains_url = any(indicator in prompt.lower() for indicator in url_indicators) - - return contains_url - - async def _get_localized_headers(self, language: str) -> Dict[str, str]: - """ - Get localized headers for the web research results based on detected language. - - Args: - language: The detected language - - Returns: - Dictionary with localized headers - """ - # Default English headers - headers = { - "web_research_results": "Web Research Results", - "summary": "Summary", - "detailed_results": "Detailed Results", - "url": "URL", - "snippet": "Snippet", - "content": "Content" - } - - # If language detection failed or is English, return defaults - if not language or language.lower() in ["english", "en"]: - return headers - - try: - # Use AI to translate headers to the detected language - translation_prompt = f""" - Translate these web research result headers to {language}: - - Web Research Results - Summary - Detailed Results - URL - Snippet - Content - - Return a JSON object with these keys: - web_research_results, summary, detailed_results, url, snippet, content - """ - - # Call AI for translation - response = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You translate headers to the specified language and return them as JSON." - }, - { - "role": "user", - "content": translation_prompt - } - ] - ) - - # Extract JSON - import re - json_match = re.search(r'\{.*\}', response, re.DOTALL) - - if json_match: - translated_headers = json.loads(json_match.group(0)) - return translated_headers - - except Exception as e: - # Log error but continue with English headers - logger.warning(f"Error translating headers to {language}: {str(e)}") - - return headers - - async def get_prompt(self, message_context: Dict[str, Any]) -> str: - task = message_context.get("content", "") - return task.strip() - - async def get_web_query(self, message_context: Dict[str, Any]) -> str: - prompt = await self.get_prompt(message_context) - result_json = await self.run_web_query(prompt) - result_data = "" - summary_src = "" - - logger.info(f"Web analysis prompt '{prompt}' delivers {len(result_json)} results.") - if isinstance(result_json, list): - total_tokens = 0 - - for i, result in enumerate(result_json, 1): - # Limit content size for each result - result_data_limited = self.limit_text_for_api(result['data'], max_tokens=int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) # Allow ~15000 tokens per result - - web_answer_instructions = f""" - Summarize this search result according to the original request in approximately 2000 characters. Original request = '{prompt.replace("'","")}' - Focus on the most important insights and connect them to the original request. You can skip any introduction. - Extract only relevant and high-quality information related to the request, and present it in a clear format. Provide a balanced view of the researched information. - - Here is the search result: - {result_data_limited} - """ - - # Count tokens in the instructions to ensure we don't exceed API limits - instruction_tokens = self.count_tokens(web_answer_instructions) - if total_tokens + instruction_tokens > 60000: - logger.warning(f"Skipping result {i} to avoid exceeding token limit") - break - - total_tokens += instruction_tokens - - # Additional instructions for web research - content_text = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You are an information analyst who precisely and relevantly summarizes web content." - }, - { - "role": "user", - "content": web_answer_instructions - } - ] - ) - - # Create a summary but ensure we stay within token limits - content_summary = content_text[:2000] # Limit to ~2000 characters - result_data += f"\n\n[{i}] {result['title']}\nURL: {result['url']}\nSnippet: {result['snippet']}\nContent: {content_summary}" - summary_src += f"\n{content_summary}" - - # Update token count - total_tokens += self.count_tokens(content_summary) + 100 # Add buffer for formatting - else: - result_data = "no data received" - - logger.info(f"Web analysis result sent {len(result_data)}B") - - # Additional summary - summary = "" - if len(summary_src) > 1: - # Limit summary source to ensure we don't exceed API limits - summary_src_limited = self.limit_text_for_api(summary_src, max_tokens=int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) - - summary = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You create concise summaries of research findings." - }, - { - "role": "user", - "content": f"Please summarize these findings in 5-6 sentences: {summary_src_limited}\n" - } - ] - ) - - # Format the final result - result = f"## Web Research Results\n\n### Summary\n{summary}\n\n### Detailed Results{result_data}" - return result - - async def run_web_query(self, prompt: str) -> List[Dict]: - if prompt=="": - return [] - - ptext=f"""Create a comprehensive web research strategy for the task = '{prompt.replace("'","")}'. Return the results as a Python dictionary with these specific keys. If specific url are provided and the task requires analysis only on the provided url, then leave 'skey' open. - - 'url': A list of maximum {self.max_url} specific URLs extracted from the task string. - - 'skey': A list of maximum {self.max_key} key sentences to search for on the web. These should be precise, diverse, and targeted to get the most relevant information. - - Format your response as a valid json object with these two keys. Do not include any explanatory text or markdown outside of the object definition. - """ - - content_text = await self.ai_service.call_api( - messages=[ - { - "role": "system", - "content": "You are a web research expert who develops precise search strategies." - }, - { - "role": "user", - "content": ptext - } - ] - ) - # Remove markdown formatting if present - if content_text.startswith("```json"): - # Find the end of the JSON block - end_marker = "```" - end_index = content_text.rfind(end_marker) - if end_index != -1: - # Extract the JSON content without the markdown markers - content_text = content_text[7:end_index].strip() - - # Now parse the JSON - try: - logger.info(f"Valid json received: {str(content_text)}") - pjson = json.loads(content_text) - # Now call scrape_json with the parsed dictionary - result_json = await self.scrape_json(pjson) - return result_json - except json.JSONDecodeError as e: - logger.error(f"Failed to parse JSON: {e}") - logger.error(f"Cleaned content: {content_text[:100]}...") - return [] - - async def scrape_json(self, research_strategy: Dict[str, List]) -> List[Dict]: - """ - Scrapes web content based on a research strategy JSON. - - Args: - research_strategy: A dictionary containing: - - 'skey': List of search keywords - - 'url': List of direct URLs to scrape - - Returns: - Dictionary with URLs as keys and scraped content as values - """ - - logger.info("Starting JSON-based web scraping") - results = [] - - # Validate input structure - if not isinstance(research_strategy, dict): - logger.error("Invalid research_strategy format: not a dictionary") - return {"error": "Invalid research_strategy format: not a dictionary"} - - keys = research_strategy.get("skey", []) - direct_urls = research_strategy.get("url", []) - - if not isinstance(keys, list) or not isinstance(direct_urls, list): - logger.error("Invalid research_strategy format: keys, or url is not a list") - return {"error": "Invalid research_strategy format: keys, or url is not a list"} - - # Process search keywords through search engine - for keyword in keys: - logger.info(f"Processing keyword: {keyword}") - found_results = self.search_web(keyword) # List with Dict: title,url,snippet,data - logger.info(f"... {len(found_results)} results found") - results.extend(found_results) - - # Process direct URLs - logger.info(f"Processing {len(direct_urls)} direct URLs") - for url in direct_urls: - if url in results: - logger.info(f"Skipping already scraped URL: {url}") - continue - soup = self.read_url(url) - - # Extract title from the page if it exists - if isinstance(soup, BeautifulSoup): - title_tag = soup.find('title') - title = title_tag.text.strip() if title_tag else "No title" - - # Alternative: You could also look for h1 tags if the title tag is missing - if title == "No title": - h1_tag = soup.find('h1') - if h1_tag: - title = h1_tag.text.strip() - else: - # Handle the case where soup is an error message string - title = "Error fetching page" - - results.append(self.parse_result(soup, title, url)) - logger.info(f"JSON scraping completed. Scraped {len(results)} URLs in total") - return results - - def extract_main_content(self, soup: BeautifulSoup, max_chars: int = int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) -> str: - """ - Extract the main content from an HTML page while limiting character count. - - Args: - soup: BeautifulSoup object containing the page content - max_chars: Maximum number of characters to extract - - Returns: - Extracted main content as string - """ - if not isinstance(soup, BeautifulSoup): - return str(soup)[:max_chars] - - # Try to find main content elements in order of priority - main_content = None - for selector in ['main', 'article', '#content', '.content', '#main', '.main']: - content = soup.select_one(selector) - if content: - main_content = content - break - - # If no main content found, use the body - if not main_content: - main_content = soup.find('body') or soup - - # Remove script, style, nav, footer elements that don't contribute to main content - for element in main_content.select('script, style, nav, footer, header, aside, .sidebar, #sidebar, .comments, #comments, .advertisement, .ads, iframe'): - element.extract() - - # Extract text content - text_content = main_content.get_text(separator=' ', strip=True) - - # Limit to max_chars - return text_content[:max_chars] - - def tokenize_for_counting(self, text: str) -> List[str]: - """ - Simple token counter for estimating token usage. - This is an approximation since the exact tokenization depends on the model. - - Args: - text: Input text - - Returns: - List of tokens - """ - # Simple tokenization by splitting on whitespace and punctuation - import re - return re.findall(r'\w+|[^\w\s]', text) - - def count_tokens(self, text: str) -> int: - """ - Count the approximate number of tokens in a text. - - Args: - text: Input text - - Returns: - Estimated token count - """ - tokens = self.tokenize_for_counting(text) - return len(tokens) - - def limit_text_for_api(self, text: str, max_tokens: int = int(APP_CONFIG.get("Connector_AiWebscraping_MAX_TOKENS"))) -> str: - """ - Limit the text to a maximum number of tokens. - - Args: - text: Input text - max_tokens: Maximum number of tokens allowed - - Returns: - Limited text - """ - if not text: - return "" - - tokens = self.tokenize_for_counting(text) - - # If text is already under the limit, return as is - if len(tokens) <= max_tokens: - return text - - # Otherwise, truncate text to max_tokens - return " ".join(tokens[:max_tokens]) + "... [content truncated due to length]" - - def search_web(self, query: str) -> List[Dict]: - formatted_query = quote_plus(query) - url = f"{APP_CONFIG("Connector_AiWebscraping_SEARCH_ENGINE")}{formatted_query}" - - search_results_soup = self.read_url(url) - if not search_results_soup or search_results_soup.select('.result') is None or len(search_results_soup.select('.result')) == 0: - logger.warning(f"No search results found for: {query}") - return [] - - # Extract search results - results = [] - - # Find all result containers - result_elements = search_results_soup.select('.result') - - for result in result_elements: - # Extract title - title_element = result.select_one('.result__a') - title = title_element.text.strip() if title_element else 'No title' - - # Extract URL (DuckDuckGo uses redirects, need to extract from href param) - url_element = title_element.get('href') if title_element else '' - extracted_url = 'No URL' - - if url_element: - # Extract the actual URL from DuckDuckGo's redirect - if url_element.startswith('/d.js?q='): - start = url_element.find('?q=') + 3 # Skip '?q=' - end = url_element.find('&', start) if '&' in url_element[start:] else None - extracted_url = unquote(url_element[start:end]) - - # Make sure the URL has the correct protocol prefix - if not extracted_url.startswith(('http://', 'https://')): - if not extracted_url.startswith('//'): - extracted_url = 'https://' + extracted_url - else: - extracted_url = 'https:' + extracted_url - else: - extracted_url = url_element - - # Extract snippet directly from search results page - snippet_element = result.select_one('.result__snippet') - snippet = snippet_element.text.strip() if snippet_element else 'No description' - - # Now fetch the actual page content for the data field - target_page_soup = self.read_url(extracted_url) - - # Use the new content extraction method to limit content size - content = self.extract_main_content(target_page_soup, max_chars=int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) - - results.append({ - 'title': title, - 'url': extracted_url, - 'snippet': snippet, - 'data': content - }) - - # Limit the number of results if needed - if len(results) >= self.max_result: - break - - return results - - def read_url(self, url: str) -> BeautifulSoup: - """ - Reads a URL and returns a BeautifulSoup parser for the content. - Returns an empty BeautifulSoup object for errors. - - Args: - url: The URL to read - - Returns: - BeautifulSoup object with the content or empty for errors - """ - headers = { - 'User-Agent': APP_CONFIG("Connector_AiWebscraping_USER_AGENT"), - 'Accept': 'text/html,application/xhtml+xml,application/xml', - 'Accept-Language': 'en-US,en;q=0.9', - } - - try: - # Initial request - response = requests.get(url, headers=headers, timeout=int(APP_CONFIG.get("Connector_AiWebscraping_TIMEOUT"))) - - # Polling for status 202 - if response.status_code == 202: - # Maximum 3 attempts with increasing intervals - backoff_times = [0.5, 1.0, 2.0, 5.0] # 0.5s, then 1s, then 2s - - for wait_time in backoff_times: - time.sleep(wait_time) # Wait with increasing time - response = requests.get(url, headers=headers, timeout=int(APP_CONFIG.get("Connector_AiWebscraping_TIMEOUT"))) - - # If no 202 anymore, then break - if response.status_code != 202: - break - - # For other error statuses, raise an error - response.raise_for_status() - - # Parse HTML - return BeautifulSoup(response.text, 'html.parser') - - except Exception as e: - # Create empty BeautifulSoup object - return BeautifulSoup("
", 'html.parser') - - def parse_result(self, data: BeautifulSoup, title: str, url: str) -> Dict[str, str]: - """ - Parse a BeautifulSoup object into a result dictionary. - - Args: - data: BeautifulSoup object containing the page content - title: Page title - url: Page URL - - Returns: - Dictionary with result data - """ - # Extract content using the main content extraction method - content = self.extract_main_content(data, max_chars=int(APP_CONFIG("Connector_AiWebscraping_MAX_TOKENS"))) - - result = { - 'title': title, - 'url': url, - 'snippet': 'No description', # Default value - 'data': content - } - return result - - -# Singleton instance -_webcrawler_agent = None - -def get_webcrawler_agent(): - """Returns a singleton instance of the WebCrawler Agent""" - global _webcrawler_agent - if _webcrawler_agent is None: - _webcrawler_agent = WebcrawlerAgent() - return _webcrawler_agent \ No newline at end of file diff --git a/modules/agentservice_base.py b/modules/agentservice_base.py deleted file mode 100644 index f09e071b..00000000 --- a/modules/agentservice_base.py +++ /dev/null @@ -1,233 +0,0 @@ -""" -Enhanced base agent class for the Agentservice. -Provides improved communication and document handling capabilities. -""" - -import logging -import json -from typing import Dict, Any, List, Optional, Tuple, Union -import asyncio -from datetime import datetime -import uuid - -logger = logging.getLogger(__name__) - -class AgentBase: - """ - Enhanced base agent class with improved communication capabilities. - All specialized agents should inherit from this class. - """ - - def __init__(self): - """Initialize the enhanced agent.""" - self.name = "base" - self.capabilities = "Basic agent operations" - self.result_format = "Text" - # System dependencies - self.ai_service = None - - def set_dependencies(self, ai_service=None, document_handler=None, lucydom_interface=None): - self.ai_service = ai_service - - def get_config(self) -> Dict[str, Any]: - """ - Get detailed information about the agent. - - Returns: - Dictionary with agent information - """ - return { - "name": self.name, - "capabilities": self.capabilities, - "result_format": self.result_format, - } - - def get_capabilities(self) -> List[str]: - """ - Get a list of agent capabilities. - - Returns: - List of capability strings - """ - # Split capabilities into a list - if isinstance(self.capabilities, str): - return [cap.strip() for cap in self.capabilities.split(",")] - return [] - - def get_supported_formats(self) -> List[str]: - """ - Get supported output formats. - - Returns: - List of supported format strings - """ - if isinstance(self.result_format, str): - return [fmt.strip() for fmt in self.result_format.split(",")] - return ["Text"] - - async def process_message(self, message: Dict[str, Any], context: Dict[str, Any] = None) -> Dict[str, Any]: - """ - Process a message and generate a response. - - Args: - message: Input message - context: Optional context information - - Returns: - Response message - """ - # Basic implementation - should be overridden by specialized agents - if not self.ai_service: - logger.warning(f"Agent {self.id} has no AI service configured") - return { - "role": "assistant", - "content": f"I'm {self.name}, but I'm not properly configured. Please set up the AI service.", - "agent_id": self.id, - "agent_type": self.type, - "result_format": "Text" - } - - # Process documents if available and set up document handler - document_context = "" - if self.supports_documents and self.document_handler and message.get("documents"): - document_context = await self._process_documents(message) - - # Create enhanced prompt - prompt = self._create_enhanced_prompt(message, document_context, context) - - # Generate response - try: - response_content = await self.ai_service.call_api([ - {"role": "system", "content": self._get_system_prompt()}, - {"role": "user", "content": prompt} - ]) - - # Process the response to extract any special instructions or status - content, status = self._process_response(response_content) - - return { - "role": "assistant", - "content": content, - "agent_id": self.id, - "agent_type": self.type, - "agent_name": self.name, - "result_format": self.result_format, - "status": status, - "workflow_id": message.get("workflow_id"), - "documents": message.get("documents", []) # Pass through documents - } - except Exception as e: - logger.error(f"Error in agent {self.id}: {str(e)}") - return { - "role": "assistant", - "content": f"I encountered an error: {str(e)}", - "agent_id": self.id, - "agent_type": self.type, - "result_format": "Text", - "status": "error" - } - - async def _process_documents(self, message: Dict[str, Any]) -> str: - """ - Process documents in the message. - - Args: - message: Input message with documents - - Returns: - Document context as text - """ - # Simply extract text from documents - if not self.document_handler: - return "" - - return self.document_handler.merge_document_contents(message) - - def _create_enhanced_prompt(self, message: Dict[str, Any], document_context: str, context: Dict[str, Any] = None) -> str: - """ - Create an enhanced prompt with context. - - Args: - message: Input message - document_context: Document context - context: Optional additional context - - Returns: - Enhanced prompt - """ - prompt = message.get("content", "") - - # Add document context if available - if document_context: - prompt += f"\n\n=== DOCUMENT CONTEXT ===\n{document_context}" - - # Add any additional context - if context: - # Add expected format if specified - if "expected_format" in context: - prompt += f"\n\nPlease format your response as: {context['expected_format']}" - - # Add dependency outputs if available - if "dependency_outputs" in context: - prompt += "\n\n=== OUTPUTS FROM PREVIOUS ACTIVITIES ===\n" - for key, value in context["dependency_outputs"].items(): - if isinstance(value, dict) and "content" in value: - prompt += f"\n--- {key} ---\n{value['content']}\n" - else: - prompt += f"\n--- {key} ---\n{str(value)}\n" - - return prompt - - def _get_system_prompt(self) -> str: - """ - Get the system prompt for the agent. - - Returns: - System prompt string - """ - return f""" - You are {self.name}, a specialized {self.type} agent. - - {self.description} - - Your capabilities include: {self.capabilities} - - You should format your responses according to: {self.result_format} - - Respond clearly and helpfully to the user's request. - When appropriate, include a status indicator at the end of your message: - - [STATUS: COMPLETE] - When you've fully addressed the request - [STATUS: PARTIAL] - When you've partially addressed the request - [STATUS: QUESTION] - When you need more information - """ - - def _process_response(self, response: str) -> Tuple[str, str]: - """ - Process the response to extract status and clean content. - - Args: - response: Raw response from the AI - - Returns: - Tuple of (cleaned content, status) - """ - # Default status - status = "complete" - - # Check for status tags - import re - status_match = re.search(r'\[STATUS:\s*(COMPLETE|PARTIAL|QUESTION)\]', response, re.IGNORECASE) - - if status_match: - status_value = status_match.group(1).lower() - # Remove the status tag - content = re.sub(r'\[STATUS:\s*(COMPLETE|PARTIAL|QUESTION)\]', '', response, flags=re.IGNORECASE).strip() - return content, status_value - - return response, status - -# Factory functions -def get_enhanced_base_agent() -> BaseAgent: - """Get an instance of the enhanced base agent.""" - return BaseAgent() diff --git a/modules/agentservice_dataextraction.py b/modules/agentservice_dataextraction.py deleted file mode 100644 index c05e6768..00000000 --- a/modules/agentservice_dataextraction.py +++ /dev/null @@ -1,921 +0,0 @@ -""" -Refactored helper function for intelligent data extraction (continued). -""" - -import logging -import json -from typing import List, Dict, Any, Optional, Tuple -import asyncio -from datetime import datetime -import uuid - -logger = logging.getLogger(__name__) - -async def data_extraction( - prompt: str, - files: List[Dict[str, Any]], - messages: List[Dict[str, Any]], - ai_service, - lucydom_interface = None, - workflow_id: str = None, - add_log_func = None, - document_handler = None # Add document handler parameter -) -> Dict[str, Any]: - """ - Performs AI-driven data extraction with improved document and image handling. - - Args: - prompt: Specification of what data to extract - files: List of all available files with metadata - messages: List of all messages in the workflow - ai_service: Service for AI requests - lucydom_interface: Interface for database access (optional) - workflow_id: Optional workflow ID for logging - add_log_func: Optional function for adding logs - document_handler: Optional document handler for structured document operations - - Returns: - Structured text object with extracted data and context information - """ - try: - # Log extraction start - _log(add_log_func, workflow_id, f"Starting data extraction with {len(files)} files", "info") - - # Create enhanced extraction plan using AI - _log(add_log_func, workflow_id, "Creating extraction plan", "info") - extraction_plan = await _create_extraction_plan(prompt, files, messages, ai_service, workflow_id, add_log_func) - - # If we have extraction plan, log summary - if extraction_plan: - extract_needed_count = sum(1 for item in extraction_plan if item.get("extract_needed", False)) - _log(add_log_func, workflow_id, - f"Extraction plan created: {len(extraction_plan)} files, {extract_needed_count} need extraction", "info") - - # Execute extractions, preferring document handler if available - if document_handler: - _log(add_log_func, workflow_id, "Using document handler for extraction", "info") - extracted_data = await _execute_extractions_with_handler( - extraction_plan, - files, - messages, - document_handler, - ai_service, - workflow_id, - add_log_func - ) - else: - # Fall back to original implementation - _log(add_log_func, workflow_id, "Using fallback extraction method", "info") - extracted_data = await _execute_extractions( - extraction_plan, - files, - messages, - lucydom_interface, - ai_service, - workflow_id, - add_log_func - ) - - # Structure extracted data - _log(add_log_func, workflow_id, f"Structuring extracted data from {len(extracted_data)} files", "info") - structured_result = _structure_extracted_data(extracted_data, files, prompt) - - # Enhance with contextual summaries using AI - if ai_service and structured_result["extracted_content"]: - _log(add_log_func, workflow_id, "Creating contextual summaries for extracted content", "info") - - try: - # Create a prompt for contextual summary - summary_prompt = f""" - Create concise, contextual summaries of the following extracted content according to this requirement: - - REQUIREMENT: {prompt} - - EXTRACTED CONTENT: - """ - - for item in structured_result["extracted_content"]: - file_name = item.get("name", "Unnamed file") - content_preview = item.get("content", "")[:500] + "..." if len(item.get("content", "")) > 500 else item.get("content", "") - summary_prompt += f"\n--- {file_name} ---\n{content_preview}\n" - - # Call AI for contextual summaries - summaries = await ai_service.call_api([{"role": "user", "content": summary_prompt}]) - structured_result["contextual_summary"] = summaries - - _log(add_log_func, workflow_id, "Added contextual summaries to extracted data", "info") - except Exception as e: - _log(add_log_func, workflow_id, f"Error creating contextual summaries: {str(e)}", "warning") - - # Handle image-specific content separately - image_content = [item for item in structured_result["extracted_content"] - if "Image Analysis" in item.get("content", "") or item.get("type") == "image"] - - if image_content and len(image_content) > 0: - _log(add_log_func, workflow_id, f"Processing {len(image_content)} image-related content items", "info") - - # Add image analysis summary if we have AI service - if ai_service: - try: - # Create a prompt for image analysis summary - image_summary_prompt = f""" - Summarize the key visual information from these image analyses according to this requirement: - - REQUIREMENT: {prompt} - - IMAGE ANALYSES: - """ - - for item in image_content: - file_name = item.get("name", "Unnamed image") - content = item.get("content", "") - image_summary_prompt += f"\n--- {file_name} ---\n{content}\n" - - # Call AI for image analysis summary - image_summaries = await ai_service.call_api([{"role": "user", "content": image_summary_prompt}]) - structured_result["image_analysis_summary"] = image_summaries - - _log(add_log_func, workflow_id, "Added image analysis summary to extracted data", "info") - except Exception as e: - _log(add_log_func, workflow_id, f"Error creating image analysis summary: {str(e)}", "warning") - - return structured_result - - except Exception as e: - logger.error(f"Error in data extraction: {str(e)}", exc_info=True) - - # Add error log - if add_log_func and workflow_id: - add_log_func(workflow_id, f"Data extraction error: {str(e)}", "error") - - # Return error result - return { - "error": str(e), - "status": "error", - "files_processed": len(files), - "message": f"Data extraction failed: {str(e)}" - } - - -async def _execute_extractions_with_handler( - extraction_plan: List[Dict[str, Any]], - files: List[Dict[str, Any]], - messages: List[Dict[str, Any]], - document_handler, - ai_service, - workflow_id: str = None, - add_log_func = None -) -> List[Dict[str, Any]]: - """ - Execute extractions using the document handler with enhanced image processing. - - Args: - extraction_plan: List of extraction instructions - files: List of all available files - messages: List of all messages - document_handler: Document handler for structured operations - ai_service: Service for AI requests - workflow_id: Optional workflow ID for logging - add_log_func: Optional function for adding logs - - Returns: - List with extracted data per file - """ - extracted_data = [] - - # Sort by importance (highest first) - sorted_plan = sorted(extraction_plan, key=lambda x: x.get("importance", 0), reverse=True) - - for extraction_item in sorted_plan: - file_id = extraction_item.get("file_id") - extract_needed = extraction_item.get("extract_needed", False) - extraction_prompt = extraction_item.get("extraction_prompt", "") - - # Find file metadata - file_metadata = next((f for f in files if f.get("id") == file_id), None) - - if not file_metadata: - logger.warning(f"File with ID {file_id} not found") - continue - - file_name = file_metadata.get("name", "") - file_type = file_metadata.get("type", "") - content_type = file_metadata.get("content_type", "") - - # Log extraction start - _log(add_log_func, workflow_id, - f"Processing file: {file_name} (Extraction needed: {extract_needed})", "info") - - # Only perform extraction if needed - if extract_needed: - # Check if file already exists in messages with content - existing_content = _find_document_in_messages(file_id, messages) - - if existing_content and existing_content.get("content"): - # Content already exists, check if we need more specialized extraction - current_context = existing_content.get("extraction_context", "") - - # Check if new extraction prompt is different or more specific - if extraction_prompt and extraction_prompt != current_context: - _log(add_log_func, workflow_id, - f"Re-extracting {file_name} with new prompt: {extraction_prompt}", "info") - - # Create an empty message to extract into - empty_message = {} - - # Use document handler to extract with new context - try: - result_message = await document_handler.add_file_to_message( - empty_message, - file_id, - extraction_prompt - ) - - # Get the document content from result - if "documents" in result_message and result_message["documents"]: - doc = result_message["documents"][0] - - # Get text content - content_text = "" - is_extracted = False - - for content in doc.get("contents", []): - if content.get("type") == "text": - content_text = content.get("text", "") - is_extracted = content.get("is_extracted", False) - break - - # Create extraction result - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": content_text, - "is_extracted": is_extracted, - "extraction_method": "document_handler_reextract", - "extraction_context": extraction_prompt - }) - - # Check for additional documents (e.g., extracted images) - for additional_doc in result_message.get("documents", [])[1:]: - source = additional_doc.get("source", {}) - - # Skip if not an extracted document - if source.get("type") != "extracted": - continue - - # Get content - add_content_text = "" - add_is_extracted = False - - for content in additional_doc.get("contents", []): - if content.get("type") == "text": - add_content_text = content.get("text", "") - add_is_extracted = content.get("is_extracted", False) - break - - # Add as separate extraction result - if add_content_text: - extracted_data.append({ - "file_id": source.get("id", f"extracted_{uuid.uuid4()}"), - "name": source.get("name", f"Extracted from {file_name}"), - "type": source.get("content_type", "image"), - "content": add_content_text, - "is_extracted": add_is_extracted, - "extraction_method": "document_handler_extracted_component", - "extraction_context": content.get("extraction_context", extraction_prompt), - "parent_file_id": file_id - }) - - _log(add_log_func, workflow_id, - f"Extracted embedded content from {file_name}", "info") - - _log(add_log_func, workflow_id, - f"Re-extracted {file_name} with new context", "info") - - continue - except Exception as e: - logger.error(f"Error re-extracting {file_name}: {str(e)}") - _log(add_log_func, workflow_id, - f"Error re-extracting {file_name}: {str(e)}", "warning") - - # Use existing content - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": existing_content.get("content", ""), - "is_extracted": existing_content.get("is_extracted", False), - "extraction_method": "existing_content", - "extraction_context": current_context - }) - - _log(add_log_func, workflow_id, - f"Using existing content for {file_name}", "info") - - continue - - # Need to extract content with document handler - try: - # Create an empty message to extract into - empty_message = {} - - # Use document handler to add file and extract content - result_message = await document_handler.add_file_to_message( - empty_message, - file_id, - extraction_prompt - ) - - # Get the document content from result - if "documents" in result_message and result_message["documents"]: - # Process main document - doc = result_message["documents"][0] # First document is the main file - - # Get text content - content_text = "" - is_extracted = False - - for content in doc.get("contents", []): - if content.get("type") == "text": - content_text = content.get("text", "") - is_extracted = content.get("is_extracted", False) - break - - # Create extraction result for main document - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": content_text, - "is_extracted": is_extracted, - "extraction_method": "document_handler", - "extraction_context": extraction_prompt - }) - - _log(add_log_func, workflow_id, - f"Extracted {file_name} using document handler", "info") - - # Process additional documents (e.g., extracted images) - for additional_doc in result_message.get("documents", [])[1:]: - source = additional_doc.get("source", {}) - - # Skip if not an extracted document - if source.get("type") != "extracted": - continue - - # Get content - add_content_text = "" - add_is_extracted = False - - for content in additional_doc.get("contents", []): - if content.get("type") == "text": - add_content_text = content.get("text", "") - add_is_extracted = content.get("is_extracted", False) - break - - # Add as separate extraction result - if add_content_text: - extracted_data.append({ - "file_id": source.get("id", f"extracted_{uuid.uuid4()}"), - "name": source.get("name", f"Extracted from {file_name}"), - "type": source.get("content_type", "image"), - "content": add_content_text, - "is_extracted": add_is_extracted, - "extraction_method": "document_handler_extracted_component", - "extraction_context": content.get("extraction_context", extraction_prompt), - "parent_file_id": file_id - }) - - _log(add_log_func, workflow_id, - f"Extracted embedded content from {file_name}", "info") - else: - # Extraction failed - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": f"Failed to extract content from {file_name}", - "is_extracted": False, - "extraction_method": "failed" - }) - - _log(add_log_func, workflow_id, - f"Failed to extract content from {file_name}", "warning") - except Exception as e: - logger.error(f"Error extracting {file_name}: {str(e)}") - - _log(add_log_func, workflow_id, - f"Error extracting {file_name}: {str(e)}", "warning") - - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": f"Error extracting: {str(e)}", - "is_extracted": False, - "extraction_method": "error" - }) - else: - # No extraction needed, use existing content - existing_content = _find_document_in_messages(file_id, messages) - - if existing_content: - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": existing_content.get("content", ""), - "is_extracted": existing_content.get("is_extracted", False), - "extraction_method": "existing_content", - "extraction_context": existing_content.get("extraction_context", "") - }) - - _log(add_log_func, workflow_id, - f"Using existing content for {file_name}", "info") - else: - # No existing content found - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": f"No content available for {file_name}", - "is_extracted": False, - "extraction_method": "none" - }) - - _log(add_log_func, workflow_id, - f"No content available for {file_name}", "warning") - - return extracted_data - - -def _find_document_in_messages(file_id: int, messages: List[Dict[str, Any]]) -> Dict[str, Any]: - """ - Find a document by file ID in workflow messages. - - Args: - file_id: ID of the file to find - messages: List of messages to search - - Returns: - Dictionary with document information or empty dict if not found - """ - for message in messages: - for doc_index, document in enumerate(message.get("documents", [])): - source = document.get("source", {}) - - # Check if file ID matches - if source.get("id") == str(file_id) or source.get("id") == file_id: - # Found the document - content_text = "" - is_extracted = False - - # Look for text content - for content in document.get("contents", []): - if content.get("type") == "text": - content_text = content.get("text", "") - is_extracted = content.get("is_extracted", False) - break - - return { - "document_id": document.get("id"), - "message_id": message.get("id"), - "content": content_text, - "is_extracted": is_extracted - } - - return {} - - -async def _create_extraction_plan( - prompt: str, - files: List[Dict[str, Any]], - messages: List[Dict[str, Any]], - ai_service, - workflow_id: str = None, - add_log_func = None -) -> List[Dict[str, Any]]: - """ - Erstellt einen Extraktionsplan mit AI-Unterstützung. - - Args: - prompt: Spezifizierung, welche Daten extrahiert werden sollen - files: Liste aller verfügbaren Dateien mit Metadaten - messages: Liste aller Nachrichten im Workflow - ai_service: Service für KI-Anfragen - workflow_id: Optionale ID des Workflows für Logging - add_log_func: Optionale Funktion für das Hinzufügen von Logs - - Returns: - Extraktionsplan (Liste von Extraktionsanweisungen pro Datei) - """ - # Erstelle Kontext-Informationen für den AI Call - file_infos = [] - for file in files: - # Basis-Metadaten - file_info = { - "id": file.get("id", ""), - "name": file.get("name", ""), - "type": file.get("type", ""), - "content_type": file.get("content_type", ""), - "size": file.get("size", "") - } - - # Extraktionsstatus prüfen (falls vorhanden) - doc_contents = _extract_document_contents_from_messages(file.get("id", ""), messages) - - if doc_contents: - # Prüfen, ob mindestens ein Content mit is_extracted=True existiert - already_extracted = any( - content.get("is_extracted", False) for content in doc_contents - ) - file_info["already_extracted"] = already_extracted - - # Eine kurze Vorschau des Inhalts hinzufügen (falls verfügbar) - for content in doc_contents: - if content.get("type") == "text" and content.get("text"): - preview_text = content.get("text", "")[:200] + "..." if len(content.get("text", "")) > 200 else content.get("text", "") - file_info["content_preview"] = preview_text - break - else: - file_info["already_extracted"] = False - - file_infos.append(file_info) - - # AI-Prompt erstellen - extraction_prompt = f""" - Du bist ein Datenextraktionsexperte, der mithilfe von KI-Analyse entscheidet, welche Dateien - und Inhalte für eine bestimmte Aufgabe extrahiert werden müssen. - - AUFGABE: - {prompt} - - VERFÜGBARE DATEIEN: - {json.dumps(file_infos, indent=2)} - - Für jede Datei, die für die Aufgabe relevant ist, erstelle eine Extraktionsanweisung mit den folgenden Informationen: - 1. file_id: Die ID der zu extrahierenden Datei - 2. extract_needed: Boolean, ob eine Extraktion erforderlich ist (True, wenn die Datei noch nicht extrahiert wurde und für die Aufgabe benötigt wird) - 3. extraction_prompt: Ein spezifischer Prompt für die Extraktion der Datei (besonders wichtig für Bilder und nicht-textbasierte Dateien) - 4. importance: Priorität/Wichtigkeit für die Aufgabe (1-5, wobei 5 am wichtigsten ist) - - Format: - [ - {{ - "file_id": 1234, - "extract_needed": true, - "extraction_prompt": "Extrahiere die Tabellendaten mit Fokus auf die Umsatzzahlen", - "importance": 5 - }}, - ... - ] - - Gib nur das JSON-Array zurück, ohne weitere Erklärungen. - """ - - # Log hinzufügen - if add_log_func and workflow_id: - add_log_func(workflow_id, "Extraktionsplan wird erstellt...", "info") - - try: - # AI-Call durchführen - extraction_plan_response = await ai_service.call_api([{"role": "user", "content": extraction_prompt}]) - - # JSON aus der Antwort extrahieren - import re - json_match = re.search(r'\[.*\]', extraction_plan_response, re.DOTALL) - - if json_match: - extraction_plan = json.loads(json_match.group(0)) - - # Log hinzufügen - if add_log_func and workflow_id: - add_log_func( - workflow_id, - f"Extraktionsplan erstellt für {len(extraction_plan)} Dateien", - "info" - ) - - return extraction_plan - else: - # Fallback bei Parsing-Problemen - if add_log_func and workflow_id: - add_log_func( - workflow_id, - "Parsing-Fehler beim Extraktionsplan, erstelle Standard-Plan", - "warning" - ) - - # Standard-Plan: Alle nicht extrahierten Dateien extrahieren - default_plan = [] - for file in files: - doc_contents = _extract_document_contents_from_messages(file.get("id", ""), messages) - already_extracted = any( - content.get("is_extracted", False) for content in doc_contents - ) if doc_contents else False - - default_plan.append({ - "file_id": file.get("id", 0), - "extract_needed": not already_extracted, - "extraction_prompt": f"Extrahiere alle relevanten Informationen aus {file.get('name', '')}", - "importance": 3 - }) - - return default_plan - - except Exception as e: - logger.error(f"Fehler bei der Erstellung des Extraktionsplans: {str(e)}", exc_info=True) - - if add_log_func and workflow_id: - add_log_func( - workflow_id, - f"Fehler bei der Erstellung des Extraktionsplans: {str(e)}", - "error" - ) - - # Leerer Plan bei Fehlern - return [] - -async def _execute_extractions( - extraction_plan: List[Dict[str, Any]], - files: List[Dict[str, Any]], - messages: List[Dict[str, Any]], - lucydom_interface, - ai_service, - workflow_id: str = None, - add_log_func = None, - logging_utils = None -) -> List[Dict[str, Any]]: - """ - Execute the planned extractions. - - Args: - extraction_plan: List of extraction instructions - files: List of all available files - lucydom_interface: Interface for database access - ai_service: Service for AI requests - workflow_id: Optional workflow ID for logging - add_log_func: Optional function for adding logs - logging_utils: Optional logging utility - - Returns: - List with extracted data per file - """ - extracted_data = [] - - # Sort by importance - sorted_plan = sorted(extraction_plan, key=lambda x: x.get("importance", 0), reverse=True) - - for extraction_item in sorted_plan: - file_id = extraction_item.get("file_id") - extract_needed = extraction_item.get("extract_needed", False) - extraction_prompt = extraction_item.get("extraction_prompt", "") - - # Find file metadata - file_metadata = next((f for f in files if f.get("id") == file_id), None) - - if not file_metadata: - logger.warning(f"File with ID {file_id} not found") - continue - - file_name = file_metadata.get("name", "") - file_type = file_metadata.get("type", "") - content_type = file_metadata.get("content_type", "") - - # Add log - if logging_utils: - logging_utils.info(f"Processing file: {file_name} (Extraction needed: {extract_needed})", "extraction") - elif add_log_func and workflow_id: - add_log_func( - workflow_id, - f"Processing file: {file_name} (Extraction needed: {extract_needed})", - "info" - ) - - # Only perform extraction if needed - if extract_needed: - # Get file content via LucyDOM interface - if lucydom_interface: - try: - file_content = await lucydom_interface.read_file_content(file_id) - - if not file_content: - if logging_utils: - logging_utils.warning(f"File {file_name} not found", "extraction") - elif add_log_func and workflow_id: - add_log_func(workflow_id, f"File {file_name} not found", "warning") - continue - - # Perform extraction based on file type - if file_type == "image" or file_name.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.webp')): - # Image analysis with AI service - if ai_service and hasattr(ai_service, "analyze_image"): - try: - image_analysis = await ai_service.analyze_image( - image_data=file_content, - prompt=extraction_prompt, - mime_type=content_type - ) - - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": image_analysis, - "is_extracted": True, - "extraction_method": "image_analysis" - }) - - if logging_utils: - logging_utils.info(f"Image {file_name} successfully analyzed", "extraction") - elif add_log_func and workflow_id: - add_log_func(workflow_id, f"Image {file_name} successfully analyzed", "info") - except Exception as e: - logger.error(f"Error analyzing image {file_name}: {str(e)}") - if logging_utils: - logging_utils.error(f"Error analyzing image {file_name}: {str(e)}", "extraction") - elif add_log_func and workflow_id: - add_log_func(workflow_id, f"Error analyzing image {file_name}: {str(e)}", "error") - else: - # Fallback if no image analysis available - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": f"Image: {file_name} (Analysis not available)", - "is_extracted": False, - "extraction_method": "none" - }) - else: - # Text-based extraction for all other file types - try: - # Import directly here to avoid circular imports - from modules.agentservice_utils import extract_text_from_file_content - - content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": content, - "is_extracted": is_extracted, - "extraction_method": "text_extraction" - }) - - if logging_utils: - logging_utils.info(f"File {file_name} extracted (Status: {is_extracted})", "extraction") - elif add_log_func and workflow_id: - add_log_func( - workflow_id, - f"File {file_name} extracted (Status: {is_extracted})", - "info" - ) - except Exception as e: - logger.error(f"Error extracting text from {file_name}: {str(e)}") - if logging_utils: - logging_utils.error(f"Error extracting text from {file_name}: {str(e)}", "extraction") - elif add_log_func and workflow_id: - add_log_func(workflow_id, f"Error extracting text from {file_name}: {str(e)}", "error") - except Exception as e: - logger.error(f"Error reading file {file_name}: {str(e)}") - if logging_utils: - logging_utils.error(f"Error reading file {file_name}: {str(e)}", "extraction") - elif add_log_func and workflow_id: - add_log_func(workflow_id, f"Error reading file {file_name}: {str(e)}", "error") - else: - logger.warning(f"No LucyDOM interface available for file {file_name}") - if logging_utils: - logging_utils.warning(f"No LucyDOM interface available for file {file_name}", "extraction") - elif add_log_func and workflow_id: - add_log_func(workflow_id, f"No LucyDOM interface available for file {file_name}", "warning") - else: - # No extraction needed, use existing content - doc_contents = _extract_document_contents_from_messages(file_id, messages) - - if doc_contents: - # Use first text content - for content in doc_contents: - if content.get("type") == "text": - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": content.get("text", ""), - "is_extracted": content.get("is_extracted", False), - "extraction_method": "existing_content" - }) - break - else: - # No existing content found - extracted_data.append({ - "file_id": file_id, - "name": file_name, - "type": file_type, - "content": f"No content available for {file_name}", - "is_extracted": False, - "extraction_method": "none" - }) - - return extracted_data - -def _structure_extracted_data( - extracted_data: List[Dict[str, Any]], - files: List[Dict[str, Any]], - prompt: str -) -> Dict[str, Any]: - """ - Structure the extracted data into a formatted result. - - Args: - extracted_data: List of extracted data per file - files: List of all available files - prompt: Original extraction prompt - - Returns: - Structured result object - """ - # Create base structure - result = { - "prompt": prompt, - "files_processed": len(extracted_data), - "total_files": len(files), - "extraction_timestamp": datetime.now().isoformat(), - "status": "success", - "extracted_content": [] - } - - # Add extracted content - for data_item in extracted_data: - # Enrich with file metadata - file_id = data_item.get("file_id", 0) - file_metadata = next((f for f in files if f.get("id") == file_id), {}) - - content_item = { - "file_id": file_id, - "name": data_item.get("name", file_metadata.get("name", "")), - "type": data_item.get("type", file_metadata.get("type", "")), - "content_type": file_metadata.get("content_type", ""), - "size": file_metadata.get("size", ""), - "is_extracted": data_item.get("is_extracted", False), - "extraction_method": data_item.get("extraction_method", ""), - "content": data_item.get("content", "") - } - - result["extracted_content"].append(content_item) - - return result - -def _extract_document_contents_from_messages(file_id: int, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """ - Extract document contents for a specific file from workflow messages. - Enhanced to handle the new document structure. - - Args: - file_id: ID of the file - messages: List of all messages in the workflow - - Returns: - List of document contents for the specified file - """ - contents = [] - - for message in messages: - # Search documents in the message - for document in message.get("documents", []): - source = document.get("source", {}) - - # Check if file ID matches (handle both string and int comparison) - if (source.get("id") == file_id or - (isinstance(source.get("id"), str) and source.get("id") == str(file_id)) or - (isinstance(file_id, str) and source.get("id") == file_id)): - - # Add contents of the file - doc_contents = document.get("contents", []) - - if doc_contents: - # Ensure each content has document reference - for content in doc_contents: - content_copy = content.copy() - content_copy["document_id"] = document.get("id") - content_copy["message_id"] = message.get("id") - contents.append(content_copy) - - return contents - -def _log(add_log_func, workflow_id, message, log_type, agent_id=None, agent_name=None): - """Helper function for logging with different log functions""" - # Log via logger instance - if log_type == "error": - logger.error(message) - elif log_type == "warning": - logger.warning(message) - else: - logger.info(message) - - # Log via provided log function (if available) - if add_log_func and workflow_id: - add_log_func(workflow_id, message, log_type, agent_id, agent_name) \ No newline at end of file diff --git a/modules/agentservice_document_handler.py b/modules/agentservice_document_handler.py deleted file mode 100644 index 63d2dde6..00000000 --- a/modules/agentservice_document_handler.py +++ /dev/null @@ -1,890 +0,0 @@ -""" -Enhanced document handling module for the Agentservice (continued). -""" - -import os -import logging -import uuid -from datetime import datetime -from typing import List, Dict, Any, Optional, Tuple, Union - -logger = logging.getLogger(__name__) - -class DocumentHandler: - """ - Centralized document handler for consistent document management across the system. - """ - - def __init__(self, workflow_id: str = None, lucydom_interface = None, ai_service = None): - """Initialize the document handler.""" - self.workflow_id = workflow_id - self.lucydom_interface = lucydom_interface - self.ai_service = ai_service - - # Import necessary utilities - from modules.agentservice_filemanager import get_file_manager - self.file_manager = get_file_manager() - - def set_workflow_id(self, workflow_id: str): - """Set or update the workflow ID.""" - self.workflow_id = workflow_id - - def set_lucydom_interface(self, lucydom_interface): - """Set or update the LucyDOM interface.""" - self.lucydom_interface = lucydom_interface - - def set_ai_service(self, ai_service): - """Set or update the AI service.""" - self.ai_service = ai_service - - - async def add_file_to_message(self, message: Dict[str, Any], file_id: int, extraction_prompt: str = None) -> Dict[str, Any]: - """ - Add a file to a message with contextual extraction. - - Args: - message: The message to add the file to - file_id: ID of the file to add - extraction_prompt: Optional prompt for contextual extraction (e.g., for images) - - Returns: - Updated message with the file added - """ - if not self.lucydom_interface: - logger.error("LucyDOM interface not available") - return message - - try: - # Get file metadata - file = self.lucydom_interface.get_file(file_id) - if not file: - logger.warning(f"File with ID {file_id} not found") - return message - - # Get necessary file information - file_name = file.get("name", "unnamed_file") - file_type = file.get("type", "unknown") - content_type = file.get("content_type") - - # Initialize documents array if needed - if "documents" not in message: - message["documents"] = [] - - # Check if file is already in the message - file_already_added = any( - doc.get("source", {}).get("id") == str(file_id) - for doc in message.get("documents", []) - ) - - if file_already_added: - logger.info(f"File {file_name} already exists in message, skipping") - return message - - # Create a unique document ID - doc_id = f"doc_{uuid.uuid4()}" - - # Create document structure - document = { - "id": doc_id, - "source": { - "type": "file", - "id": str(file_id), - "name": file_name, - "content_type": content_type, - "size": file.get("size"), - "upload_date": file.get("upload_date", datetime.now().isoformat()) - }, - "contents": [] - } - - # Only read content if we have extraction prompt or specific types - if (extraction_prompt or - file_type in ["document", "text"] or - (content_type and content_type.startswith("text/"))): - - # Read file content - file_content = await self.lucydom_interface.read_file_content(file_id) - - if file_content: - # Process based on file type - if file_type == "image" or (content_type and content_type.startswith("image/")): - # Image analysis if prompt provided - if self.ai_service and hasattr(self.ai_service, "analyze_image"): - try: - # Use provided prompt or default one - image_prompt = extraction_prompt or "Describe this image in detail" - - logger.info(f"Analyzing image {file_name} with prompt: {image_prompt}") - - image_analysis = await self.ai_service.analyze_image( - image_data=file_content, - prompt=image_prompt, - mime_type=content_type - ) - - # Add the analysis as text content - document["contents"].append({ - "type": "text", - "text": f"Image Analysis:\n{image_analysis}", - "is_extracted": True, - "extraction_context": extraction_prompt - }) - - logger.info(f"Added image analysis for {file_name} to message") - except Exception as e: - logger.error(f"Error analyzing image {file_name}: {str(e)}") - document["contents"].append({ - "type": "text", - "text": f"Image file: {file_name} (Analysis failed: {str(e)})", - "is_extracted": False - }) - else: - # Just add placeholder if no analysis available - document["contents"].append({ - "type": "text", - "text": f"Image file: {file_name} (no analysis requested)", - "is_extracted": False - }) - - # Enhanced PDF processing - extract text and images - elif file_name.lower().endswith('.pdf'): - logger.info(f"Processing PDF file: {file_name}") - - # Extract text content first - from modules.agentservice_utils import extract_text_from_file_content - - text_content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - # Add text content - document["contents"].append({ - "type": "text", - "text": text_content, - "is_extracted": is_extracted, - "extraction_context": extraction_prompt - }) - - logger.info(f"Extracted text content from PDF {file_name}") - - # Extract and analyze images from PDF if we have AI service - if self.ai_service and hasattr(self.ai_service, "analyze_image"): - try: - # Import necessary modules - import fitz # PyMuPDF - from io import BytesIO - - # Add detailed logging - logger.info(f"Starting PDF image extraction for {file_name}") - - # Check if extraction prompt is available or use default - image_prompt = extraction_prompt or "Describe this image from the PDF document" - - # Open PDF from memory stream with detailed error checking - try: - pdf_document = fitz.open(stream=file_content, filetype="pdf") - logger.info(f"Successfully opened PDF with {len(pdf_document)} pages") - except Exception as pdf_open_error: - logger.error(f"Failed to open PDF: {str(pdf_open_error)}") - raise - - # Initialize images list and image count - images_analysis = [] - image_count = 0 - - # Process each page - for page_num, page in enumerate(pdf_document, 1): - # Get list of images on the page - image_list = page.get_images(full=True) - - if image_list: - logger.info(f"Found {len(image_list)} images on page {page_num}") - - # Process each image - for img_index, img in enumerate(image_list): - try: - xref = img[0] # Get image reference - - # Extract image data - base_image = pdf_document.extract_image(xref) - image_bytes = base_image["image"] - image_ext = base_image["ext"] - - # Analyze image - image_analysis = await self.ai_service.analyze_image( - image_data=image_bytes, - prompt=f"{image_prompt} (Page {page_num}, Image {img_index+1})", - mime_type=f"image/{image_ext}" - ) - - # Add to analysis list - images_analysis.append({ - "page": page_num, - "index": img_index + 1, - "analysis": image_analysis - }) - - image_count += 1 - logger.info(f"Analyzed image {img_index+1} on page {page_num}") - - # Create a separate document for each extracted image if needed - if True: # Set to condition if you want to control this - img_doc_id = f"img_doc_{uuid.uuid4()}" - image_filename = f"page{page_num}_image{img_index+1}.{image_ext}" - - image_document = { - "id": img_doc_id, - "source": { - "type": "extracted", - "parent_id": str(file_id), - "id": img_doc_id, - "name": image_filename, - "content_type": f"image/{image_ext}", - "size": len(image_bytes) - }, - "contents": [{ - "type": "text", - "text": f"Image Analysis (PDF Page {page_num}, Image {img_index+1}):\n{image_analysis}", - "is_extracted": True, - "extraction_context": image_prompt - }] - } - - # Add image document to message - message["documents"].append(image_document) - logger.info(f"Added extracted image document {image_filename} to message") - - except Exception as img_err: - logger.warning(f"Error processing image {img_index} on page {page_num}: {str(img_err)}") - - # Close the PDF - pdf_document.close() - - # Add combined image analysis to the main document - if images_analysis: - combined_analysis = "\n\n## Embedded Images Analysis\n\n" - for img in images_analysis: - combined_analysis += f"### Page {img['page']}, Image {img['index']}\n{img['analysis']}\n\n" - - document["contents"].append({ - "type": "text", - "text": combined_analysis, - "is_extracted": True, - "extraction_context": f"Analysis of {image_count} images embedded in the PDF" - }) - - logger.info(f"Added combined analysis of {image_count} PDF images to document") - except ImportError: - logger.warning("PyMuPDF (fitz) is not installed, skipping PDF image extraction") - document["contents"].append({ - "type": "text", - "text": "\n\nNote: PDF may contain images that were not extracted due to missing libraries.", - "is_extracted": False - }) - except Exception as e: - logger.error(f"Error extracting images from PDF {file_name}: {str(e)}") - document["contents"].append({ - "type": "text", - "text": f"\n\nError extracting images from PDF: {str(e)}", - "is_extracted": False - }) - - # Word document processing with image extraction - elif file_name.lower().endswith(('.docx', '.doc')): - logger.info(f"Processing Word document: {file_name}") - - # Extract text content first - from modules.agentservice_utils import extract_text_from_file_content - - text_content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - # Add text content - document["contents"].append({ - "type": "text", - "text": text_content, - "is_extracted": is_extracted, - "extraction_context": extraction_prompt - }) - - logger.info(f"Extracted text content from Word document {file_name}") - - # Attempt to extract and analyze images from Word document - if self.ai_service and hasattr(self.ai_service, "analyze_image"): - try: - # For .docx documents - if file_name.lower().endswith('.docx'): - import zipfile - from io import BytesIO - - # Check if extraction prompt is available or use default - image_prompt = extraction_prompt or "Describe this image from the Word document" - - # Create a zipfile object from the .docx content - docx_zip = zipfile.ZipFile(BytesIO(file_content)) - - # Images in .docx are stored in the "word/media" directory - image_files = [f for f in docx_zip.namelist() if f.startswith('word/media/')] - - if image_files: - logger.info(f"Found {len(image_files)} images in Word document {file_name}") - - # Process each image - images_analysis = [] - for i, img_path in enumerate(image_files): - try: - # Extract image data - image_bytes = docx_zip.read(img_path) - - # Determine image type from filename - image_ext = img_path.split('.')[-1] if '.' in img_path else 'png' - - # Analyze image - image_analysis = await self.ai_service.analyze_image( - image_data=image_bytes, - prompt=f"{image_prompt} (Image {i+1})", - mime_type=f"image/{image_ext}" - ) - - # Add to analysis list - images_analysis.append({ - "index": i + 1, - "path": img_path, - "analysis": image_analysis - }) - - logger.info(f"Analyzed image {i+1} ({img_path}) from Word document") - - # Create a separate document for each extracted image if needed - img_doc_id = f"img_doc_{uuid.uuid4()}" - image_filename = f"word_image{i+1}.{image_ext}" - - image_document = { - "id": img_doc_id, - "source": { - "type": "extracted", - "parent_id": str(file_id), - "id": img_doc_id, - "name": image_filename, - "content_type": f"image/{image_ext}", - "size": len(image_bytes) - }, - "contents": [{ - "type": "text", - "text": f"Image Analysis (Word Document Image {i+1}):\n{image_analysis}", - "is_extracted": True, - "extraction_context": image_prompt - }] - } - - # Add image document to message - message["documents"].append(image_document) - logger.info(f"Added extracted image document {image_filename} to message") - - except Exception as img_err: - logger.warning(f"Error processing image {img_path}: {str(img_err)}") - - # Add combined image analysis to the main document - if images_analysis: - combined_analysis = "\n\n## Embedded Images Analysis\n\n" - for img in images_analysis: - combined_analysis += f"### Image {img['index']}\n{img['analysis']}\n\n" - - document["contents"].append({ - "type": "text", - "text": combined_analysis, - "is_extracted": True, - "extraction_context": f"Analysis of {len(images_analysis)} images embedded in the Word document" - }) - - logger.info(f"Added combined analysis of {len(images_analysis)} Word document images") - - # Close the zip file - docx_zip.close() - - # Note: For .doc (older format) we would need additional libraries - # This could be implemented with libraries like antiword or pywin32 - elif file_name.lower().endswith('.doc'): - logger.warning("Image extraction from .doc files is not supported yet") - document["contents"].append({ - "type": "text", - "text": "\n\nNote: This is an older .doc format document. Images may be present but could not be extracted.", - "is_extracted": False - }) - - except Exception as e: - logger.error(f"Error extracting images from Word document {file_name}: {str(e)}") - document["contents"].append({ - "type": "text", - "text": f"\n\nError extracting images from Word document: {str(e)}", - "is_extracted": False - }) - - # Excel file processing with enhanced capabilities - elif file_name.lower().endswith(('.xlsx', '.xls')): - logger.info(f"Processing Excel document: {file_name}") - - # Extract text representation of spreadsheet data - from modules.agentservice_utils import extract_text_from_file_content - - text_content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - # Add text content - document["contents"].append({ - "type": "text", - "text": text_content, - "is_extracted": is_extracted, - "extraction_context": extraction_prompt - }) - - logger.info(f"Extracted data from Excel document {file_name}") - - # Try to extract charts and images if available - if self.ai_service and hasattr(self.ai_service, "analyze_image"): - try: - # For .xlsx files (newer format) - if file_name.lower().endswith('.xlsx'): - import zipfile - from io import BytesIO - - # Create a zipfile object from the Excel content - xlsx_zip = zipfile.ZipFile(BytesIO(file_content)) - - # Charts and images can be in various directories - media_paths = [ - 'xl/media/', - 'xl/drawings/', - 'xl/charts/' - ] - - # Collect all potential media files - media_files = [] - for path in media_paths: - media_files.extend([f for f in xlsx_zip.namelist() if f.startswith(path)]) - - if media_files: - logger.info(f"Found {len(media_files)} media files in Excel document {file_name}") - - # Process image files (skip XML and other non-image files) - image_extensions = ['png', 'jpeg', 'jpg', 'gif', 'bmp', 'tiff', 'emf', 'wmf'] - image_files = [f for f in media_files if f.split('.')[-1].lower() in image_extensions] - - if image_files: - logger.info(f"Found {len(image_files)} images/charts in Excel document {file_name}") - - image_prompt = extraction_prompt or "Describe this chart/image from the Excel document" - images_analysis = [] - - for i, img_path in enumerate(image_files): - try: - # Extract image data - image_bytes = xlsx_zip.read(img_path) - - # Determine image type from filename - image_ext = img_path.split('.')[-1] if '.' in img_path else 'png' - - # Analyze image - image_analysis = await self.ai_service.analyze_image( - image_data=image_bytes, - prompt=f"{image_prompt} (Describe what this chart or image shows, including any data trends or patterns visible)", - mime_type=f"image/{image_ext}" - ) - - # Add to analysis list - images_analysis.append({ - "index": i + 1, - "path": img_path, - "analysis": image_analysis - }) - - logger.info(f"Analyzed image/chart {i+1} from Excel document") - - # Create a separate document for each extracted image - img_doc_id = f"img_doc_{uuid.uuid4()}" - image_filename = f"excel_image{i+1}.{image_ext}" - - image_document = { - "id": img_doc_id, - "source": { - "type": "extracted", - "parent_id": str(file_id), - "id": img_doc_id, - "name": image_filename, - "content_type": f"image/{image_ext}", - "size": len(image_bytes) - }, - "contents": [{ - "type": "text", - "text": f"Chart/Image Analysis (Excel Document Item {i+1}):\n{image_analysis}", - "is_extracted": True, - "extraction_context": image_prompt - }] - } - - # Add image document to message - message["documents"].append(image_document) - - except Exception as img_err: - logger.warning(f"Error processing image {img_path}: {str(img_err)}") - - # Add combined image analysis to the main document - if images_analysis: - combined_analysis = "\n\n## Embedded Charts and Images Analysis\n\n" - for img in images_analysis: - combined_analysis += f"### Chart/Image {img['index']}\n{img['analysis']}\n\n" - - document["contents"].append({ - "type": "text", - "text": combined_analysis, - "is_extracted": True, - "extraction_context": f"Analysis of {len(images_analysis)} charts/images from the Excel document" - }) - - # Close the zip file - xlsx_zip.close() - - except Exception as e: - logger.error(f"Error extracting charts/images from Excel document {file_name}: {str(e)}") - - else: - # For other file types, extract text - from modules.agentservice_utils import extract_text_from_file_content - - content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - document["contents"].append({ - "type": "text", - "text": content, - "is_extracted": is_extracted, - "extraction_context": extraction_prompt - }) - - logger.info(f"Added text content for {file_name} to message (extracted: {is_extracted})") - else: - # No content available - document["contents"].append({ - "type": "text", - "text": f"File content not available for {file_name}", - "is_extracted": False - }) - else: - # Just add reference without content - document["contents"].append({ - "type": "text", - "text": f"File: {file_name} (content not loaded)", - "is_extracted": False - }) - - # Add document to message - message["documents"].append(document) - - logger.info(f"File {file_name} successfully added to message") - return message - - except Exception as e: - logger.error(f"Error adding file {file_id} to message: {str(e)}") - return message - - - async def extract_document_content(self, doc_id: str, message: Dict[str, Any], extraction_prompt: str) -> Dict[str, Any]: - """ - Extract or update document content with contextual extraction. - - Args: - doc_id: ID of the document to extract - message: Message containing the document - extraction_prompt: Contextual prompt for extraction - - Returns: - Updated message with extracted content - """ - if not message or "documents" not in message: - return message - - updated_message = message.copy() - - # Find the document - for i, document in enumerate(updated_message.get("documents", [])): - if document.get("id") == doc_id: - # Get file ID from source - source = document.get("source", {}) - file_id = source.get("id") - - if file_id and self.lucydom_interface: - # Get file metadata - file = self.lucydom_interface.get_file(int(file_id)) - if not file: - continue - - # Get file content - file_content = await self.lucydom_interface.read_file_content(int(file_id)) - if not file_content: - continue - - # Process based on file type - file_name = file.get("name", "unnamed_file") - file_type = file.get("type", "unknown") - content_type = file.get("content_type") - - # Update content based on file type - if file_type == "image" or (content_type and content_type.startswith("image/")): - if self.ai_service and hasattr(self.ai_service, "analyze_image"): - try: - image_analysis = await self.ai_service.analyze_image( - image_data=file_content, - prompt=extraction_prompt, - mime_type=content_type - ) - - # Create or update content - new_content = { - "type": "text", - "text": f"Image Analysis:\n{image_analysis}", - "is_extracted": True, - "extraction_context": extraction_prompt - } - - # Update or add content - contents = document.get("contents", []) - contents_updated = False - - for j, content in enumerate(contents): - if content.get("type") == "text": - updated_message["documents"][i]["contents"][j] = new_content - contents_updated = True - break - - if not contents_updated: - if not updated_message["documents"][i].get("contents"): - updated_message["documents"][i]["contents"] = [] - updated_message["documents"][i]["contents"].append(new_content) - - logger.info(f"Updated image analysis for {file_name} with new context: {extraction_prompt}") - except Exception as e: - logger.error(f"Error updating image analysis for {file_name}: {str(e)}") - else: - # For other file types, extract text with new context - from modules.agentservice_utils import extract_text_from_file_content - - content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - new_content = { - "type": "text", - "text": content, - "is_extracted": is_extracted, - "extraction_context": extraction_prompt - } - - # Update or add content - contents = document.get("contents", []) - contents_updated = False - - for j, content_item in enumerate(contents): - if content_item.get("type") == "text": - updated_message["documents"][i]["contents"][j] = new_content - contents_updated = True - break - - if not contents_updated: - if not updated_message["documents"][i].get("contents"): - updated_message["documents"][i]["contents"] = [] - updated_message["documents"][i]["contents"].append(new_content) - - logger.info(f"Updated text extraction for {file_name} with new context: {extraction_prompt}") - - # Found and processed the document, stop searching - break - - return updated_message - - async def extract_files_from_workflow(self, workflow: Dict[str, Any], extraction_prompt: str, file_filter: str = None) -> Dict[str, Any]: - """ - Extract all relevant files from a workflow with context-aware extraction. - - Args: - workflow: The workflow object - extraction_prompt: Contextual prompt for extraction - file_filter: Optional filter for file types (e.g., "csv", "image") - - Returns: - Dictionary with extracted content - """ - # Import for data extraction - from modules.agentservice_dataextraction import data_extraction - - # Get all files from the workflow - files = [] - - # Process all messages - for message in workflow.get("messages", []): - # Extract documents from the message - for doc in message.get("documents", []): - source = doc.get("source", {}) - - # Only include file documents - if source.get("type") == "file": - file_info = { - "id": source.get("id", ""), - "name": source.get("name", ""), - "type": source.get("type", ""), - "content_type": source.get("content_type", ""), - "size": source.get("size", 0) - } - - # Apply filter if provided - if file_filter: - file_name = file_info.get("name", "").lower() - content_type = file_info.get("content_type", "").lower() - - if (file_filter.lower() in file_name or - file_filter.lower() in content_type): - # Check if file is already in the list - if not any(f.get("id") == file_info["id"] for f in files): - files.append(file_info) - else: - # No filter, include all files - if not any(f.get("id") == file_info["id"] for f in files): - files.append(file_info) - - # If no files found, return empty result - if not files: - return { - "prompt": extraction_prompt, - "files_processed": 0, - "extracted_content": [] - } - - # Get all messages from the workflow - workflow_messages = workflow.get("messages", []) - - # Extract data using the dataextraction module - extracted_data = await data_extraction( - prompt=extraction_prompt, - files=files, - messages=workflow_messages, - ai_service=self.ai_service, - lucydom_interface=self.lucydom_interface, - workflow_id=self.workflow_id, - add_log_func=None # We don't have access to add_log_func here - ) - - return extracted_data - - def get_file_content_from_message(self, message: Dict[str, Any], file_id: int = None, doc_id: str = None) -> str: - """ - Get file content from a message. - - Args: - message: The message containing the document - file_id: Optional file ID to search for - doc_id: Optional document ID to search for - - Returns: - Text content of the file if available - """ - if not message or "documents" not in message: - return "" - - # Search for the document - for document in message.get("documents", []): - # Match by document ID or file ID - source = document.get("source", {}) - source_file_id = source.get("id") - - if ((doc_id and document.get("id") == doc_id) or - (file_id and source_file_id and str(file_id) == str(source_file_id))): - - # Get text content from document - for content in document.get("contents", []): - if content.get("type") == "text": - return content.get("text", "") - - return "" - - def create_text_document(self, message: Dict[str, Any], content: str, title: str = "Generated Text") -> Dict[str, Any]: - """ - Create a new text document in a message. - - Args: - message: The message to add the document to - content: Text content - title: Document title - - Returns: - Updated message with the new document - """ - # Initialize documents array if needed - updated_message = message.copy() - if "documents" not in updated_message: - updated_message["documents"] = [] - - # Create document ID - doc_id = f"doc_{uuid.uuid4()}" - - # Create document structure - document = { - "id": doc_id, - "source": { - "type": "generated", - "id": doc_id, - "name": title, - "content_type": "text/plain", - "size": len(content) - }, - "contents": [ - { - "type": "text", - "text": content, - "is_extracted": True - } - ] - } - - # Add document to message - updated_message["documents"].append(document) - - logger.info(f"Created text document '{title}' in message") - return updated_message - - def merge_document_contents(self, message: Dict[str, Any]) -> str: - """ - Merge all document contents from a message into a single text. - - Args: - message: The message containing documents - - Returns: - Combined text content from all documents - """ - if not message or "documents" not in message: - return "" - - combined_text = "" - - for document in message.get("documents", []): - source = document.get("source", {}) - doc_name = source.get("name", "Unnamed Document") - - # Extract text content - doc_text = "" - for content in document.get("contents", []): - if content.get("type") == "text": - doc_text = content.get("text", "") - break - - if doc_text: - combined_text += f"\n\n--- {doc_name} ---\n\n{doc_text}" - - return combined_text.strip() - -# Factory function -def get_document_handler(workflow_id: str = None, lucydom_interface = None, ai_service = None) -> DocumentHandler: - """Get a document handler instance.""" - return DocumentHandler(workflow_id, lucydom_interface, ai_service) \ No newline at end of file diff --git a/modules/agentservice_filemanager.py b/modules/agentservice_filemanager.py deleted file mode 100644 index 18f4718d..00000000 --- a/modules/agentservice_filemanager.py +++ /dev/null @@ -1,1206 +0,0 @@ -""" -Central file management module for the Agentservice. -""" - -import os -import logging -import base64 -import json -import uuid -from datetime import datetime -from typing import List, Dict, Any, Optional, Tuple, Union, BinaryIO -from io import BytesIO - -# Import utilities from agentservice_utils -from modules.agentservice_utils import extract_text_from_file_content, is_text_extractable - -logger = logging.getLogger(__name__) - -# Helper function for adding logs -def _log(add_log_func, workflow_id, message, level="info"): - """Helper function for adding logs with standardized formatting.""" - if add_log_func and workflow_id: - add_log_func(workflow_id, message, level) - - # Also log to standard logger - if level == "info": - logger.info(message) - elif level == "warning": - logger.warning(message) - elif level == "error": - logger.error(message) - -class FileExtractionError(Exception): - """Exception for file extraction errors.""" - pass - - - -class FileManager: - """Central file management for the Agentservice.""" - - _instance = None - - @classmethod - def get_instance(cls): - """Get the singleton instance of FileManager.""" - if cls._instance is None: - cls._instance = cls() - return cls._instance - - def __init__(self): - """Initialize the FileManager.""" - # Ensure singleton pattern - if FileManager._instance is not None: - raise RuntimeError("Singleton instance already exists - use get_instance()") - - # Import utilities - # Instead of storing file_utils, we'll use the imported functions directly - - async def read_file_contents(self, - file_contexts: List[Dict[str, Any]], - lucydom_interface, - workflow_id: str = None, - add_log_func = None, - ai_service = None, - extraction_context: str = None # Add this parameter - ) -> Dict[str, Dict[str, Any]]: - """ - Read file contents with optional contextual extraction. - - Args: - file_contexts: List of file contexts with metadata - lucydom_interface: LucyDOM interface for file access - workflow_id: Optional workflow ID for logging - add_log_func: Optional function for adding logs - ai_service: AI service for image analysis - extraction_context: Optional context prompt for extraction - - Returns: - Dictionary with file contents and metadata - """ - file_contents = {} - # Add debug logging - logger.info(f"Reading contents of {len(file_contexts)} files for workflow {workflow_id}") - - for file in file_contexts: - file_id = file["id"] - file_name = file["name"] - file_type = file.get("type", "unknown") - content_type = file.get("content_type") - - try: - # Dateiinhalt über LucyDOM-Interface abrufen - file_data = await lucydom_interface.read_file_content(file_id) - - if not file_data: - _log(add_log_func, workflow_id, f"Datei {file_name} nicht gefunden", "warning") - file_contents[file_id] = { - "content": f"File content not available (File not found)", - "is_extracted": False, - "name": file_name, - "type": file_type, - "content_type": content_type - } - continue - - logger.info(f"Successfully read file: {file_name} (ID: {file_id}, Type: {file_type})") - - # For image analysis, add extraction context - if file_type == "image" or file_name.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.webp')): - if ai_service and hasattr(ai_service, "analyze_image"): - try: - # Use extraction context if provided - prompt = extraction_context or "Describe this image in detail" - - image_analysis = await ai_service.analyze_image( - image_data=file_data, - prompt=prompt, # Use contextual prompt - mime_type=content_type - ) - - file_contents[file_id] = { - "content": f"Image Analysis:\n{image_analysis}", - "is_extracted": True, # Mark as extracted - "name": file_name, - "type": file_type, - "content_type": content_type, - "extraction_context": prompt # Store the used prompt - } - _log(add_log_func, workflow_id, f"Image {file_name} analyzed successfully", "info") - except Exception as e: - logger.error(f"Error analyzing image {file_name}: {str(e)}") - _log(add_log_func, workflow_id, f"Error analyzing image {file_name}: {str(e)}", "error") - file_contents[file_id] = { - "content": f"Image file: {file_name} (Analysis failed: {str(e)})", - "is_extracted": False, - "name": file_name, - "type": file_type, - "content_type": content_type - } - else: - file_contents[file_id] = { - "content": f"Image file: {file_name} (AI analysis not available)", - "is_extracted": False, - "name": file_name, - "type": file_type, - "content_type": content_type - } - - # Dokument- und Textdateien - elif (file_type == "document" or not file_type or file_name.lower().endswith(('.csv', '.txt', '.json', '.xml')) or (content_type and content_type.startswith('text/'))): - # Verwende die zentrale Textextraktionsfunktion mit Dateiinhalt - content, is_extracted = extract_text_from_file_content( - file_data, file_name, content_type - ) - file_contents[file_id] = { - "content": content, - "is_extracted": is_extracted, - "name": file_name, - "type": file_type, - "content_type": content_type - } - _log(add_log_func, workflow_id, - f"File {file_name} read successfully (extracted: {is_extracted})", "info") - - # Andere Dateitypen - nur Metadaten speichern - else: - file_contents[file_id] = { - "content": f"File: {file_name} (Type: {file_type}, content not available)", - "is_extracted": False, - "name": file_name, - "type": file_type, - "content_type": content_type - } - _log(add_log_func, workflow_id, f"Unsupported file type: {file_type} for {file_name}", "warning") - - except Exception as e: - logger.error(f"Error reading file {file_name}: {str(e)}") - _log(add_log_func, workflow_id, f"Error reading file {file_name}: {str(e)}", "error") - file_contents[file_id] = { - "content": f"File content not available (Error: {str(e)})", - "is_extracted": False, - "name": file_name, - "type": file_type, - "content_type": content_type - } - - return file_contents - - @staticmethod - def add_file_to_message(message: Dict[str, Any], file_data: Dict[str, Any]) -> Dict[str, Any]: - """ - Add a file to a message with consistent document structure. - - Args: - message: The message to add the file to - file_data: File metadata and content - - Returns: - Updated message with the file added - """ - logger.info(f"Adding file to message: {file_data.get('name', 'unnamed_file')} (ID: {file_data.get('id', 'unknown')})") - - # Initialize documents array if needed - if "documents" not in message: - message["documents"] = [] - - # Create a unique ID for the document if not provided - doc_id = file_data.get("id", f"file_{uuid.uuid4()}") - - # Extract metadata - file_size = file_data.get("size") - if isinstance(file_size, str) and file_size.isdigit(): - file_size = int(file_size) - elif file_size is None and file_data.get("content"): - file_size = len(file_data.get("content", "")) - - # Determine if content is already extracted - content = file_data.get("content", "No content available") - file_name = file_data.get("name", "unnamed_file") - content_type = file_data.get("content_type") - is_extracted = file_data.get("is_extracted", False) - - # Create standard document structure that follows the data model - document = { - "id": f"doc_{uuid.uuid4()}", # Unique document ID separate from file ID - "source": { - "type": "file", - "id": doc_id, - "name": file_name, - "content_type": content_type, - "size": file_size, - "upload_date": file_data.get("upload_date", datetime.now().isoformat()) - }, - "contents": [ - { - "type": "text", - "text": content, - "is_extracted": is_extracted, - "extraction_context": file_data.get("extraction_context", None) - } - ] - } - - # Check if file is already in the message - file_already_added = any( - doc.get("source", {}).get("id") == doc_id - for doc in message.get("documents", []) - ) - - if not file_already_added: - message["documents"].append(document) - logger.info(f"File {file_name} added to message (total: {len(message.get('documents', []))} files)") - else: - logger.info(f"File {file_name} already exists in message, skipping") - - return message - - - async def analyze_file(self, file_id: int, prompt: str, lucydom_interface, ai_service) -> Dict[str, Any]: - """ - Analyze a file using the appropriate method based on file type. - - Args: - file_id: ID of the file to analyze - prompt: Analysis prompt - lucydom_interface: Interface for database access - ai_service: Service for AI requests - - Returns: - Analysis result - """ - if not lucydom_interface: - raise ValueError("LucyDOM interface not available") - - if not ai_service: - raise ValueError("AI service not available") - - try: - # Get file metadata - file = lucydom_interface.get_file(file_id) - if not file: - raise ValueError(f"File with ID {file_id} not found") - - # Get file content - file_content = await lucydom_interface.read_file_content(file_id) - if not file_content: - raise ValueError(f"Content for file {file_id} not found") - - # Extract metadata - file_name = file.get("name", "unnamed") - content_type = file.get("content_type") - file_type = file.get("type") - - # Process based on file type - if file_type == "image" or (content_type and content_type.startswith("image/")): - # Image analysis - if hasattr(ai_service, "analyze_image"): - analysis = await ai_service.analyze_image( - image_data=file_content, - prompt=prompt, - mime_type=content_type - ) - - return { - "file_id": file_id, - "file_name": file_name, - "analysis_type": "image", - "result": analysis - } - else: - raise ValueError("AI service does not support image analysis") - - elif file_name.endswith(".pdf"): - # PDF analysis - first extract text, then analyze - try: - # Extract text - text_content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - if not is_extracted: - raise ValueError(f"Failed to extract text from PDF {file_name}") - - # Analyze text with AI - pdf_analysis_prompt = f""" - Analyze the following PDF content based on this request: - - REQUEST: {prompt} - - PDF CONTENT: - {text_content} # In a future release to split into tokensets, if too big file - """ - - analysis = await ai_service.call_api([{"role": "user", "content": pdf_analysis_prompt}]) - - # Also check for images in the PDF - has_images = False - image_analysis = None - - try: - # Extract and analyze images - image_results = await self.extract_and_analyze_pdf_images( - file_content, - f"Analyze images with respect to: {prompt}", - ai_service - ) - - if image_results and len(image_results) > 0: - has_images = True - image_analysis = "\n\nPDF IMAGES ANALYSIS:\n" - for img in image_results: - image_analysis += f"- Image on page {img.get('page')}: {img.get('response')}\n" - except Exception as img_err: - logger.warning(f"Could not analyze images in PDF {file_name}: {str(img_err)}") - - # Combine text and image analysis if available - if has_images and image_analysis: - analysis += image_analysis - - return { - "file_id": file_id, - "file_name": file_name, - "analysis_type": "pdf", - "result": analysis, - "has_images": has_images - } - - except Exception as pdf_err: - logger.error(f"Error analyzing PDF {file_name}: {str(pdf_err)}") - raise - - elif file_name.endswith(('.xlsx', '.xls', '.csv')): - # Tabular data analysis - try: - # Extract text content - text_content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - if not is_extracted: - raise ValueError(f"Failed to extract data from {file_name}") - - # Analyze with AI - data_analysis_prompt = f""" - Analyze the following tabular data based on this request: - - REQUEST: {prompt} - - DATA CONTENT: - {text_content} # In a future release to split into tokensets to limit storage - - Provide a structured analysis including: - 1. Data overview - 2. Key insights - 3. Patterns and trends - 4. Answers to the specific request - """ - - analysis = await ai_service.call_api([{"role": "user", "content": data_analysis_prompt}]) - - return { - "file_id": file_id, - "file_name": file_name, - "analysis_type": "tabular_data", - "result": analysis - } - - except Exception as data_err: - logger.error(f"Error analyzing tabular data {file_name}: {str(data_err)}") - raise - - else: - # Default to text analysis for all other file types - try: - # Extract text content - text_content, is_extracted = extract_text_from_file_content( - file_content, file_name, content_type - ) - - if not is_extracted: - raise ValueError(f"Failed to extract text from {file_name}") - - # Analyze with AI - text_analysis_prompt = f""" - Analyze the following document content based on this request: - - REQUEST: {prompt} - - DOCUMENT CONTENT: - {text_content} # In a future release to split into tokensets - """ - - analysis = await ai_service.call_api([{"role": "user", "content": text_analysis_prompt}]) - - return { - "file_id": file_id, - "file_name": file_name, - "analysis_type": "text", - "result": analysis - } - - except Exception as text_err: - logger.error(f"Error analyzing text content {file_name}: {str(text_err)}") - raise - - except Exception as e: - logger.error(f"Error analyzing file {file_id}: {str(e)}") - raise - - - async def extract_and_analyze_pdf_images(self, - pdf_content: bytes, - prompt: str, - ai_service - ) -> List[Dict[str, Any]]: - """ - Extract images from a PDF file and analyze them. - Works with binary data instead of file paths. - - Args: - pdf_content: Binary data of the PDF file - prompt: Prompt for image analysis - ai_service: AI service for image analysis - - Returns: - List with analysis results for each image - """ - image_responses = [] - temp_files = [] # List of temporary files for cleanup - - try: - # Import required libraries - try: - import fitz # PyMuPDF - from io import BytesIO - import tempfile - - logger.info(f"Starting PDF image extraction with PyMuPDF") - except ImportError: - logger.error("PyMuPDF (fitz) is not installed. Install it with 'pip install pymupdf'") - return [] - - # Open PDF in memory - try: - doc = fitz.open(stream=pdf_content, filetype="pdf") - page_count = len(doc) - logger.info(f"PDF opened with {page_count} pages") - except Exception as pdf_err: - logger.error(f"Error opening PDF: {str(pdf_err)}") - return [] - - # Process each page with multiple extraction methods - for page_num, page in enumerate(doc, 1): - logger.info(f"Processing page {page_num}/{page_count}") - - # Method 1: Standard extraction using get_images - try: - image_list = page.get_images(full=True) - if image_list: - logger.info(f"Method 1: Found {len(image_list)} images on page {page_num}") - - for img_index, img in enumerate(image_list): - try: - xref = img[0] # Get image reference - - # Extract image data - base_image = doc.extract_image(xref) - image_bytes = base_image["image"] - image_ext = base_image["ext"] - - # Check for valid image data - if not image_bytes or len(image_bytes) < 100: - logger.warning(f"Empty or very small image data for image {img_index+1} on page {page_num}") - continue - - # Analyze image - analysis_result = await ai_service.analyze_image( - image_data=image_bytes, - prompt=prompt, - mime_type=f"image/{image_ext}" - ) - - # Store image size - image_size = f"{base_image.get('width', 0)}x{base_image.get('height', 0)}" - - # Add result - image_responses.append({ - "page": page_num, - "image_index": img_index, - "format": image_ext, - "image_size": image_size, - "method": "get_images", - "response": analysis_result - }) - - logger.info(f"Successfully analyzed image {img_index+1} on page {page_num} using method 1") - except Exception as e: - logger.warning(f"Error processing image {img_index} on page {page_num} (Method 1): {str(e)}") - else: - logger.info(f"Method 1: No images found on page {page_num} using get_images") - except Exception as m1_err: - logger.warning(f"Error in Method 1 for page {page_num}: {str(m1_err)}") - - # Method 2: Extract embedded images using page.get_drawings() - try: - drawings = page.get_drawings() - drawing_images = 0 - - for drawing_index, drawing in enumerate(drawings): - try: - # Check if drawing contains an image - if "image" in str(drawing).lower(): - drawing_images += 1 - rect = drawing["rect"] # Get rectangle of the drawing - - # Extract the area as an image - pix = page.get_pixmap(matrix=fitz.Matrix(2, 2), clip=rect) - img_bytes = pix.tobytes("png") - - # Analyze the image - analysis_result = await ai_service.analyze_image( - image_data=img_bytes, - prompt=f"{prompt} (Page {page_num}, Drawing {drawing_index+1})", - mime_type="image/png" - ) - - # Add result - image_responses.append({ - "page": page_num, - "image_index": drawing_index, - "format": "png", - "image_size": f"{pix.width}x{pix.height}", - "method": "get_drawings", - "response": analysis_result - }) - - logger.info(f"Successfully analyzed drawing image {drawing_index+1} on page {page_num} using method 2") - except Exception as drawing_err: - logger.warning(f"Error processing drawing {drawing_index} on page {page_num}: {str(drawing_err)}") - - if drawing_images > 0: - logger.info(f"Method 2: Processed {drawing_images} images from drawings on page {page_num}") - else: - logger.info(f"Method 2: No images found in drawings on page {page_num}") - except Exception as m2_err: - logger.warning(f"Error in Method 2 for page {page_num}: {str(m2_err)}") - - # Method 3: Extract using blocks detection - try: - blocks = page.get_text("dict")["blocks"] - img_blocks = [b for b in blocks if b.get("type") == 1] # type 1 = image - - if img_blocks: - logger.info(f"Method 3: Found {len(img_blocks)} image blocks on page {page_num}") - - for block_index, block in enumerate(img_blocks): - try: - # Extract using pixmap for the block region - rect = block["bbox"] - pix = page.get_pixmap(matrix=fitz.Matrix(2, 2), clip=rect) - img_bytes = pix.tobytes("png") - - # Analyze image - analysis_result = await ai_service.analyze_image( - image_data=img_bytes, - prompt=f"{prompt} (Page {page_num}, Block {block_index+1})", - mime_type="image/png" - ) - - # Add result - image_responses.append({ - "page": page_num, - "image_index": block_index, - "format": "png", - "image_size": f"{pix.width}x{pix.height}", - "method": "block_extraction", - "response": analysis_result - }) - - logger.info(f"Successfully analyzed image block {block_index+1} on page {page_num} using method 3") - except Exception as block_err: - logger.warning(f"Error processing block {block_index} on page {page_num}: {str(block_err)}") - else: - logger.info(f"Method 3: No image blocks found on page {page_num}") - except Exception as m3_err: - logger.warning(f"Error in Method 3 for page {page_num}: {str(m3_err)}") - - # Method 4: Last resort - render the entire page as an image and analyze - if not image_responses or not any(resp.get("page") == page_num for resp in image_responses): - try: - logger.info(f"Method 4: Rendering entire page {page_num} as image") - - # Render the entire page as an image - pix = page.get_pixmap(matrix=fitz.Matrix(2, 2)) - img_bytes = pix.tobytes("png") - - # Analyze the page as an image - analysis_result = await ai_service.analyze_image( - image_data=img_bytes, - prompt=f"{prompt} (Full page {page_num})", - mime_type="image/png" - ) - - # Add result - image_responses.append({ - "page": page_num, - "image_index": 0, - "format": "png", - "image_size": f"{pix.width}x{pix.height}", - "method": "full_page_render", - "response": analysis_result - }) - - logger.info(f"Successfully analyzed full page {page_num} as image using method 4") - except Exception as m4_err: - logger.warning(f"Error in Method 4 for page {page_num}: {str(m4_err)}") - - # Close the document - doc.close() - - # Deduplicate results (different methods might extract the same image) - deduplicated_responses = [] - seen_areas = set() - - for response in image_responses: - # Create a unique identifier for the image area - area_key = f"{response['page']}_{response['image_size']}" - - if area_key not in seen_areas: - seen_areas.add(area_key) - deduplicated_responses.append(response) - - logger.info(f"PDF image extraction complete: Found {len(image_responses)} images, deduplicated to {len(deduplicated_responses)}") - return deduplicated_responses - - except ImportError as imp_err: - logger.error(f"Required library not available for PDF image extraction: {str(imp_err)}") - return [] - except Exception as e: - logger.error(f"Error extracting images from PDF: {str(e)}") - return [] - finally: - # Clean up temporary files - for temp_file in temp_files: - try: - if os.path.exists(temp_file): - os.remove(temp_file) - except Exception as e: - logger.warning(f"Could not remove temporary file: {temp_file} - {str(e)}") - - - async def analyze_multiple_files( - self, - file_ids: List[int], - prompt: str, - lucydom_interface, - ai_service - ) -> Dict[str, Any]: - """ - Analyze multiple files and synthesize a combined result. - - Args: - file_ids: List of file IDs to analyze - prompt: Analysis prompt - lucydom_interface: Interface for database access - ai_service: Service for AI requests - - Returns: - Combined analysis result - """ - results = [] - - # Analyze each file - for file_id in file_ids: - try: - analysis = await self.analyze_file(file_id, prompt, lucydom_interface, ai_service) - results.append(analysis) - except Exception as e: - logger.error(f"Error analyzing file {file_id}: {str(e)}") - results.append({ - "file_id": file_id, - "error": str(e), - "analysis_type": "error" - }) - - # Now synthesize a combined analysis - if results: - try: - # Prepare prompt for synthesis - synthesis_prompt = f""" - Synthesize a combined analysis based on these individual file analyses: - - ORIGINAL REQUEST: {prompt} - - INDIVIDUAL ANALYSES: - """ - - for i, result in enumerate(results, 1): - file_name = result.get("file_name", f"File {i}") - analysis_type = result.get("analysis_type", "unknown") - analysis_result = result.get("result", "No analysis available") - - synthesis_prompt += f""" - ## {file_name} ({analysis_type}) - {analysis_result} - - --- - """ - - synthesis_prompt += """ - Please provide a comprehensive synthesis that: - 1. Combines insights from all files - 2. Addresses the original request - 3. Highlights connections between different files - 4. Provides a unified conclusion - """ - - # Call AI for synthesis - synthesis = await ai_service.call_api([{"role": "user", "content": synthesis_prompt}]) - - return { - "synthesis": synthesis, - "individual_results": results, - "files_analyzed": len(results) - } - - except Exception as e: - logger.error(f"Error synthesizing combined analysis: {str(e)}") - return { - "error": str(e), - "individual_results": results, - "files_analyzed": len(results) - } - else: - return { - "synthesis": "No files were successfully analyzed.", - "individual_results": [], - "files_analyzed": 0 - } - - def determine_file_type(self, file_name: str, content_type: str = None) -> str: - """ - Determine the file type based on name and content type. - - Args: - file_name: Name of the file - content_type: MIME type (optional) - - Returns: - File type string ('document', 'image', etc.) - """ - # Check content type first - if content_type: - if content_type.startswith('image/'): - return "image" - elif content_type in ['application/pdf']: - return "document" - elif content_type in ['application/vnd.ms-excel', - 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', - 'text/csv']: - return "spreadsheet" - - # Check file extension - lower_name = file_name.lower() - - # Images - if lower_name.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg')): - return "image" - - # Documents - if lower_name.endswith(('.pdf', '.doc', '.docx', '.txt', '.md', '.rtf')): - return "document" - - # Spreadsheets - if lower_name.endswith(('.xlsx', '.xls', '.csv')): - return "spreadsheet" - - # Presentations - if lower_name.endswith(('.pptx', '.ppt')): - return "presentation" - - # Data files - if lower_name.endswith(('.json', '.xml', '.yaml', '.yml')): - return "data" - - # Default to document - return "document" - - def get_mime_type(self, file_name: str) -> str: - """Get MIME type based on file name.""" - # Import from lucydom_interface - from lucydom_interface import LucyDOMInterface - temp_interface = LucyDOMInterface(0, 0) # Default values - return temp_interface.get_mime_type(file_name) - - def prepare_file_contexts(self, files: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """ - Bereitet die Dateikontexte basierend auf Metadaten vor. - Akzeptiert keine Pfade mehr, sondern nur Metadaten aus der Datenbank. - - Args: - files: Liste von Dateien mit Metadaten (Dict mit id, name, type, content_type) - - Returns: - Liste von Dateikontexten für die Verarbeitung - """ - file_contexts = [] - - logger.info(f"Preparing file contexts for {len(files)} files") - - for file in files: - file_id = file.get("id") - file_name = file.get("name") - file_type = file.get("type") - - # Create a comprehensive context with all available metadata - context = { - "id": file_id, - "name": file_name, - "type": file_type, - "size": file.get("size", "Unbekannt"), - "content_type": file.get("content_type"), - "path": file.get("path"), - "upload_date": file.get("upload_date"), - "hash": file.get("hash"), - "mandate_id": file.get("mandate_id"), - "user_id": file.get("user_id") - } - - # Log for debugging - logger.info(f"Created file context: {file_name} (ID: {file_id}, Type: {file_type})") - - file_contexts.append(context) - - return file_contexts - - def create_document_reference(self, message: Dict[str, Any], file_id: int, reference_type: str = "reference") -> Dict[str, Any]: - """ - Create a document reference without loading content. - - Args: - message: The message to add the reference to - file_id: ID of the file to reference - reference_type: Type of reference (reference, citation, etc.) - - Returns: - Updated message with the document reference - """ - if not self.lucydom_interface: - logger.warning("LucyDOM interface not available for document reference") - return message - - # Get file metadata - file = self.lucydom_interface.get_file(file_id) - if not file: - logger.warning(f"File with ID {file_id} not found for reference") - return message - - # Create document structure with just the reference - document = { - "id": f"ref_{uuid.uuid4()}", - "source": { - "type": "file", - "id": str(file_id), - "name": file.get("name", "referenced_file"), - "content_type": file.get("content_type"), - "size": file.get("size"), - "reference_type": reference_type - }, - "contents": [] # Empty contents - will be loaded on demand - } - - # Add to message - updated_message = message.copy() - if "documents" not in updated_message: - updated_message["documents"] = [] - - updated_message["documents"].append(document) - logger.info(f"Added document reference for file {file.get('name')} (ID: {file_id})") - - return updated_message - - def should_extract_document(self, document: Dict[str, Any], context_prompt: str = None) -> bool: - """ - Determine if a document needs content extraction. - - Args: - document: The document object - context_prompt: Current context prompt - - Returns: - True if extraction is needed, False otherwise - """ - # If document has no contents, extraction is needed - if not document.get("contents"): - return True - - # If document has contents but extraction status is False, extraction may be needed - for content in document.get("contents", []): - if content.get("type") == "text": - # If already extracted, check if context has changed - if content.get("is_extracted", False): - # If context prompt is different from what was used previously, - # we may need to re-extract with the new context - prev_context = content.get("extraction_context") - if context_prompt and prev_context != context_prompt: - return True - return False - return True - - # Default to needing extraction - return True - - - - # Factory method - @staticmethod - def get_instance(): - """Get the singleton instance of FileManager.""" - if FileManager._instance is None: - FileManager._instance = FileManager() - return FileManager._instance - - - -# Create a singleton instance for module-level access -file_manager = FileManager.get_instance() - -def get_file_manager(): - """Get the singleton instance of FileManager.""" - return file_manager - - - - -class WorkflowFileManager: - """ - Specialized file manager for workflow operations. - Handles workflow-specific file operations and document management. - """ - - def __init__(self, workflow_id: str = None, lucydom_interface = None): - """ - Initialize the workflow file manager. - - Args: - workflow_id: Optional workflow ID for context - lucydom_interface: LucyDOM interface for database operations - """ - self.workflow_id = workflow_id - self.lucydom_interface = lucydom_interface - self.file_manager = get_file_manager() - self.document_handler = None - - def set_workflow_id(self, workflow_id: str): - """Set or update the workflow ID.""" - self.workflow_id = workflow_id - - def set_lucydom_interface(self, lucydom_interface): - """Set or update the LucyDOM interface.""" - self.lucydom_interface = lucydom_interface - - async def add_files_to_message(self, - message: Dict[str, Any], - file_ids: List[int], - add_log_func = None) -> Dict[str, Any]: - """ - Add multiple files to a message. - - Args: - message: The message to add files to - file_ids: List of file IDs to add - add_log_func: Optional logging function - - Returns: - Updated message - """ - - # If document handler is available, use it - if self.document_handler: - return await self.document_handler.add_files_to_message( - message, - file_ids, - extraction_prompt=None # Default to no extraction - ) - - if not self.lucydom_interface: - _log(add_log_func, self.workflow_id, "LucyDOM interface not available", "error") - return message - - updated_message = message.copy() - - # Get file metadata - files = [] - for file_id in file_ids: - file = self.lucydom_interface.get_file(file_id) - if file: - files.append(file) - else: - _log(add_log_func, self.workflow_id, f"File not found: {file_id}", "warning") - - # Prepare file contexts - file_contexts = self.file_manager.prepare_file_contexts(files) - - # Read file contents - file_contents = await self.file_manager.read_file_contents( - file_contexts, - self.lucydom_interface, - self.workflow_id, - add_log_func - ) - - # Add files to message - for file_id, content_data in file_contents.items(): - # Add file to message - updated_message = FileManager.add_file_to_message(updated_message, content_data) - - return updated_message - - def get_files_from_message(self, message: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Extract file references from a message. - - Args: - message: The message to extract files from - - Returns: - List of file metadata - """ - files = [] - - # Process documents - for doc in message.get("documents", []): - source = doc.get("source", {}) - - # Only include file documents - if source.get("type") == "file": - file_info = { - "id": source.get("id", ""), - "name": source.get("name", ""), - "type": source.get("content_type", ""), - "content_type": source.get("content_type", ""), - "size": source.get("size", 0) - } - - files.append(file_info) - - return files - - def get_document_text_content(self, message: Dict[str, Any]) -> str: - """ - Extract text content from all documents in a message. - - Args: - message: The message to extract content from - - Returns: - Combined text content - """ - content = "" - - # Process all documents - for doc in message.get("documents", []): - for doc_content in doc.get("contents", []): - if doc_content.get("type") == "text": - content += "\n\n" + doc_content.get("text", "") - - return content - - async def extract_document_info(self, - workflow: Dict[str, Any], - message_id: str = None) -> Dict[str, Any]: - """ - Extract document information from a workflow or specific message. - - Args: - workflow: The workflow object - message_id: Optional message ID to focus on a specific message - - Returns: - Document information - """ - result = { - "documents": [], - "file_count": 0, - "extracted_text": "" - } - - if message_id: - # Process only the specified message - for message in workflow.get("messages", []): - if message.get("id") == message_id: - files = self.get_files_from_message(message) - result["documents"].extend(files) - result["file_count"] = len(files) - result["extracted_text"] = self.get_document_text_content(message) - break - else: - # Process all messages - for message in workflow.get("messages", []): - files = self.get_files_from_message(message) - result["documents"].extend(files) - result["extracted_text"] += self.get_document_text_content(message) - - # De-duplicate files - unique_files = {} - for file in result["documents"]: - file_id = file.get("id") - if file_id and file_id not in unique_files: - unique_files[file_id] = file - - result["documents"] = list(unique_files.values()) - result["file_count"] = len(result["documents"]) - - return result - - async def analyze_workflow_documents(self, - workflow: Dict[str, Any], - prompt: str, - ai_service, - message_id: str = None) -> Dict[str, Any]: - """ - Analyze documents in a workflow. - - Args: - workflow: The workflow object - prompt: Analysis prompt - ai_service: Service for AI analysis - message_id: Optional message ID to focus on specific message - - Returns: - Analysis result - """ - if not self.lucydom_interface: - raise ValueError("LucyDOM interface not available") - - if not ai_service: - raise ValueError("AI service not available") - - # Extract document info - doc_info = await self.extract_document_info(workflow, message_id) - - if doc_info["file_count"] == 0: - return { - "result": "No documents found for analysis", - "files_analyzed": 0 - } - - # Get file IDs - file_ids = [doc.get("id") for doc in doc_info["documents"] if doc.get("id")] - - # Analyze files - analysis = await self.file_manager.analyze_multiple_files( - file_ids, - prompt, - self.lucydom_interface, - ai_service - ) - - return analysis - -# Export the workflow file manager factory function -def get_workflow_file_manager(workflow_id: str = None, lucydom_interface = None): - """Get a workflow file manager instance.""" - return WorkflowFileManager(workflow_id, lucydom_interface) \ No newline at end of file diff --git a/modules/agentservice_protocol.py b/modules/agentservice_protocol.py deleted file mode 100644 index 357e66d0..00000000 --- a/modules/agentservice_protocol.py +++ /dev/null @@ -1,338 +0,0 @@ -""" -Agent Communication Protocol module for the Agentservice. -Defines a standardized format for agents to exchange information. -""" - -import json -import uuid -from typing import Dict, Any, List, Optional -from datetime import datetime - -class AgentMessage: - """ - Standard message format for inter-agent communication. - Includes content, metadata, and document references. - """ - - def __init__( - self, - content: str, - sender_id: str, - receiver_id: Optional[str] = None, - message_type: str = "text", - metadata: Optional[Dict[str, Any]] = None, - documents: Optional[List[Dict[str, Any]]] = None, - context_id: Optional[str] = None - ): - """ - Initialize an agent message. - - Args: - content: The main message content - sender_id: ID of the sending agent - receiver_id: Optional ID of the receiving agent - message_type: Type of message (text, task, result, etc.) - metadata: Optional metadata dictionary - documents: Optional list of document references - context_id: Optional conversation context ID - """ - self.id = f"msg_{uuid.uuid4()}" - self.timestamp = datetime.now().isoformat() - self.content = content - self.sender_id = sender_id - self.receiver_id = receiver_id - self.message_type = message_type - self.metadata = metadata or {} - self.documents = documents or [] - self.context_id = context_id - - def to_dict(self) -> Dict[str, Any]: - """Convert the message to a dictionary.""" - return { - "id": self.id, - "timestamp": self.timestamp, - "content": self.content, - "sender_id": self.sender_id, - "receiver_id": self.receiver_id, - "message_type": self.message_type, - "metadata": self.metadata, - "documents": self.documents, - "context_id": self.context_id - } - - @classmethod - def from_dict(cls, data: Dict[str, Any]) -> 'AgentMessage': - """Create a message from a dictionary.""" - message = cls( - content=data.get("content", ""), - sender_id=data.get("sender_id", "unknown"), - receiver_id=data.get("receiver_id"), - message_type=data.get("message_type", "text"), - metadata=data.get("metadata", {}), - documents=data.get("documents", []), - context_id=data.get("context_id") - ) - message.id = data.get("id", message.id) - message.timestamp = data.get("timestamp", message.timestamp) - return message - - def to_json(self) -> str: - """Convert the message to a JSON string.""" - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> 'AgentMessage': - """Create a message from a JSON string.""" - return cls.from_dict(json.loads(json_str)) - -class AgentCommunicationProtocol: - """ - Defines the protocol for agents to communicate with each other. - Provides standardized message creation and handling. - """ - - @staticmethod - def create_text_message( - content: str, - sender_id: str, - receiver_id: Optional[str] = None, - metadata: Optional[Dict[str, Any]] = None, - documents: Optional[List[Dict[str, Any]]] = None, - context_id: Optional[str] = None - ) -> AgentMessage: - """Create a simple text message.""" - return AgentMessage( - content=content, - sender_id=sender_id, - receiver_id=receiver_id, - message_type="text", - metadata=metadata, - documents=documents, - context_id=context_id - ) - - @staticmethod - def create_task_message( - task_description: str, - sender_id: str, - receiver_id: str, - input_data: Optional[Dict[str, Any]] = None, - documents: Optional[List[Dict[str, Any]]] = None, - context_id: Optional[str] = None - ) -> AgentMessage: - """Create a task assignment message.""" - metadata = { - "task_type": "general", - "input_data": input_data or {}, - "priority": "normal", - "task_id": f"task_{uuid.uuid4()}" - } - - return AgentMessage( - content=task_description, - sender_id=sender_id, - receiver_id=receiver_id, - message_type="task", - metadata=metadata, - documents=documents, - context_id=context_id - ) - - @staticmethod - def create_result_message( - result_content: str, - sender_id: str, - receiver_id: str, - task_id: str, - output_data: Optional[Dict[str, Any]] = None, - result_format: str = "text", - documents: Optional[List[Dict[str, Any]]] = None, - context_id: Optional[str] = None - ) -> AgentMessage: - """Create a task result message.""" - metadata = { - "task_id": task_id, - "result_format": result_format, - "status": "completed", - "output_data": output_data or {} - } - - return AgentMessage( - content=result_content, - sender_id=sender_id, - receiver_id=receiver_id, - message_type="result", - metadata=metadata, - documents=documents, - context_id=context_id - ) - - @staticmethod - def create_error_message( - error_description: str, - sender_id: str, - receiver_id: Optional[str] = None, - error_type: str = "general", - error_details: Optional[Dict[str, Any]] = None, - context_id: Optional[str] = None - ) -> AgentMessage: - """Create an error message.""" - metadata = { - "error_type": error_type, - "error_details": error_details or {}, - "severity": "error" - } - - return AgentMessage( - content=error_description, - sender_id=sender_id, - receiver_id=receiver_id, - message_type="error", - metadata=metadata, - context_id=context_id - ) - - @staticmethod - def create_document_request_message( - document_description: str, - sender_id: str, - receiver_id: str, - filters: Optional[Dict[str, Any]] = None, - context_id: Optional[str] = None - ) -> AgentMessage: - """Create a document request message.""" - metadata = { - "request_type": "document", - "filters": filters or {}, - "request_id": f"req_{uuid.uuid4()}" - } - - return AgentMessage( - content=document_description, - sender_id=sender_id, - receiver_id=receiver_id, - message_type="request", - metadata=metadata, - context_id=context_id - ) - - @staticmethod - def create_status_update_message( - status_description: str, - sender_id: str, - receiver_id: Optional[str] = None, - status: str = "in_progress", - progress: float = 0.0, - context_id: Optional[str] = None - ) -> AgentMessage: - """Create a status update message.""" - metadata = { - "status": status, - "progress": progress, - "update_type": "status" - } - - return AgentMessage( - content=status_description, - sender_id=sender_id, - receiver_id=receiver_id, - message_type="status", - metadata=metadata, - context_id=context_id - ) - - @staticmethod - def convert_system_message_to_agent_message(system_message: Dict[str, Any], sender_id: str) -> AgentMessage: - """ - Convert a system message to an agent message. - - Args: - system_message: Message object from the workflow - sender_id: ID of the sending agent - - Returns: - AgentMessage instance - """ - # Extract basic information - content = system_message.get("content", "") - message_id = system_message.get("id", f"msg_{uuid.uuid4()}") - timestamp = system_message.get("started_at", datetime.now().isoformat()) - - # Create metadata - metadata = { - "agent_type": system_message.get("agent_type"), - "agent_name": system_message.get("agent_name"), - "workflow_id": system_message.get("workflow_id"), - "sequence_no": system_message.get("sequence_no"), - "result_format": system_message.get("result_format"), - "original_message_id": message_id - } - - # Create agent message - agent_message = AgentMessage( - content=content, - sender_id=sender_id, - message_type="system", - metadata=metadata, - documents=system_message.get("documents", []), - context_id=system_message.get("workflow_id") - ) - - # Set original ID and timestamp - agent_message.id = message_id - agent_message.timestamp = timestamp - - return agent_message - - @staticmethod - def convert_agent_message_to_system_message(agent_message: AgentMessage) -> Dict[str, Any]: - """ - Convert an agent message to a system message. - - Args: - agent_message: The agent message to convert - - Returns: - System message dictionary - """ - message_data = agent_message.to_dict() - metadata = message_data.get("metadata", {}) - - # Create system message structure - system_message = { - "id": message_data.get("id", f"msg_{uuid.uuid4()}"), - "workflow_id": message_data.get("context_id"), - "started_at": message_data.get("timestamp", datetime.now().isoformat()), - "finished_at": datetime.now().isoformat(), - "sequence_no": metadata.get("sequence_no", 0), - - "status": "completed", - "role": "assistant", - - "data_stats": { - "processing_time": 0.0, - "token_count": 0, - "bytes_sent": 0, - "bytes_received": 0 - }, - - "agent_type": metadata.get("agent_type"), - "agent_id": message_data.get("sender_id"), - "agent_name": metadata.get("agent_name"), - "result_format": metadata.get("result_format", "text"), - - "content": message_data.get("content", ""), - "documents": message_data.get("documents", []) - } - - # If this is a result message, add more metadata - if message_data.get("message_type") == "result": - system_message["output_data"] = metadata.get("output_data", {}) - system_message["task_id"] = metadata.get("task_id") - - return system_message - -# Factory function -def get_agent_protocol(): - """Get the agent communication protocol.""" - return AgentCommunicationProtocol \ No newline at end of file diff --git a/modules/agentservice_registry.py b/modules/agentservice_registry.py deleted file mode 100644 index 301e5e73..00000000 --- a/modules/agentservice_registry.py +++ /dev/null @@ -1,290 +0,0 @@ -""" -Updated registry for all available agents in the system. -Provides centralized agent registration and access with improved error handling. -""" - -import os -import logging -import importlib -from typing import Dict, Any, List, Optional - -# Import direct base agent module -from modules.agentservice_base import BaseAgent - -logger = logging.getLogger(__name__) - -class AgentRegistry: - """Registry for all available agents in the system""" - - _instance = None - - @classmethod - def get_instance(cls): - """Get a singleton instance of the Agent Registry""" - if cls._instance is None: - cls._instance = cls() - return cls._instance - - def __init__(self): - """Initialize the Agent Registry""" - if AgentRegistry._instance is not None: - raise RuntimeError("Singleton instance already exists - use get_instance()") - self.agents = {} - self.ai_service = None - self.document_handler = None - self.lucydom_interface = None - self._load_agents() - - def _load_agents(self): - """Load all available agents""" - # List of all agent modules to load - logger.info("Automatically loading agent modules...") - agent_modules = [] - for filename in os.listdir(os.path.dirname(__file__)): - if filename.startswith("agentservice_agent_") and filename.endswith(".py"): - agent_modules.append(filename[:-3]) # Remove .py extension - if not agent_modules: - logger.warning("No agent modules found") - return - logger.info(f"Found {len(agent_modules)} agent modules") - - for module_name in agent_modules: - try: - # Import the module - module = importlib.import_module(f"modules.{module_name}") - - # Look for the agent class or a get_*_agent function - agent_type = module_name.split('_')[-1] - class_name = f"{agent_type.capitalize()}Agent" - getter_name = f"get_{agent_type}_agent" - - agent = None - - # Try to get the agent via the get_*_agent function - if hasattr(module, getter_name): - getter_func = getattr(module, getter_name) - agent = getter_func() - logger.info(f"Agent '{agent.name}' (Type: {agent.type}) loaded via {getter_name}()") - - # Alternatively, try to instantiate the agent directly - elif hasattr(module, class_name): - agent_class = getattr(module, class_name) - agent = agent_class() - logger.info(f"Agent '{agent.name}' (Type: {agent.type}) directly instantiated") - - if agent: - # Register the agent - self.register_agent(agent) - else: - logger.warning(f"No agent class or getter function found in module {module_name}") - - except ImportError as e: - logger.error(f"Module {module_name} could not be imported: {e}") - except Exception as e: - logger.error(f"Error loading agent from module {module_name}: {e}") - - def set_dependencies(self, ai_service=None, document_handler=None, lucydom_interface=None): - """ - Set system dependencies for all agents. - - Args: - ai_service: AI service for text generation - document_handler: Document handler for document operations - lucydom_interface: LucyDOM interface for database access - """ - self.ai_service = ai_service - # Update all registered agents - self.update_agent_dependencies() - - - def update_agent_dependencies(self): - """Update dependencies for all registered agents""" - for agent_id, agent in self.agents.items(): - if hasattr(agent, 'set_dependencies'): - agent.set_dependencies( - ai_service=self.ai_service, - document_handler=self.document_handler, - lucydom_interface=self.lucydom_interface - ) - - def register_agent(self, agent: 'BaseAgent'): - """ - Register an agent in the registry. - - Args: - agent: The agent to register - """ - agent_type = agent.type - agent_id = getattr(agent, 'id', agent_type) - - # Initialize enhanced agents with dependencies - if hasattr(agent, 'set_dependencies'): - agent.set_dependencies( - ai_service=self.ai_service, - document_handler=self.document_handler, - lucydom_interface=self.lucydom_interface - ) - - self.agents[agent_type] = agent - # Also register by ID if it's different from type - if agent_id != agent_type: - self.agents[agent_id] = agent - - logger.debug(f"Agent '{agent.name}' (Type: {agent_type}, ID: {agent_id}) registered") - - def get_agent(self, agent_identifier: str) -> Optional[BaseAgent]: - """ - Get an agent instance by ID or type. - - Args: - agent_identifier: ID or type of the desired agent - - Returns: - Agent instance or None if not found - """ - # Try to find directly by type - if agent_identifier in self.agents: - return self.agents[agent_identifier] - - # If not found, try different name variants - variants = [ - agent_identifier, - agent_identifier.replace('_agent', ''), - f"{agent_identifier}_agent" - ] - - for variant in variants: - if variant in self.agents: - return self.agents[variant] - - logger.warning(f"Agent with identifier '{agent_identifier}' not found") - return None - - def get_all_agents(self) -> Dict[str, BaseAgent]: - """Get all registered agents.""" - return self.agents - - def get_agent_infos(self) -> List[Dict[str, Any]]: - """Get information about all registered agents.""" - agent_infos = [] - # Only once per agent instance (since we register both by type and ID) - seen_agents = set() - for agent in self.agents.values(): - if agent not in seen_agents: - agent_infos.append(agent.get_agent_info()) - seen_agents.add(agent) - return agent_infos - - def get_agent_by_format(self, required_format: str) -> Optional[BaseAgent]: - """ - Find an agent that can produce the required output format. - - Args: - required_format: The required output format - - Returns: - Agent that can produce the required format, or None if not found - """ - # Create mapping of result format -> agent for faster lookup - format_to_agent = {} - seen_agents = set() - - for agent in self.agents.values(): - if agent not in seen_agents: - # Get the agent's result format - agent_format = getattr(agent, 'result_format', None) - if agent_format: - format_to_agent[agent_format.lower()] = agent - seen_agents.add(agent) - - # Try to find an exact match - if required_format.lower() in format_to_agent: - return format_to_agent[required_format.lower()] - - # If no exact match, try to find a partial match - for fmt, agent in format_to_agent.items(): - if required_format.lower() in fmt or fmt in required_format.lower(): - return agent - - # No match found - return None - - def initialize_agents_for_workflow(self) -> Dict[str, Dict[str, Any]]: - """Initialize agents for a workflow.""" - initialized_agents = {} - seen_agents = set() - for agent in self.agents.values(): - if agent not in seen_agents: - agent_info = agent.get_agent_info() - agent_id = agent_info["id"] - initialized_agents[agent_id] = agent_info - seen_agents.add(agent) - return initialized_agents - - def get_agent_capabilities(self) -> Dict[str, List[str]]: - """ - Get a mapping of capabilities to agents. - Useful for finding the right agent for a specific task. - - Returns: - Dict mapping capability keywords to agent IDs - """ - capabilities_map = {} - seen_agents = set() - - for agent in self.agents.values(): - if agent not in seen_agents: - # Get agent info - agent_id = getattr(agent, 'id', agent.type) - - # Extract capabilities - check for get_capabilities method first - if hasattr(agent, 'get_capabilities') and callable(getattr(agent, 'get_capabilities')): - capabilities = agent.get_capabilities() - else: - # Fall back to string parsing - capabilities_str = getattr(agent, 'capabilities', "") - capabilities = [kw.strip().lower() for kw in capabilities_str.split(',') if kw.strip()] - - # Add each capability to the mapping - for capability in capabilities: - if capability not in capabilities_map: - capabilities_map[capability] = [] - if agent_id not in capabilities_map[capability]: - capabilities_map[capability].append(agent_id) - - seen_agents.add(agent) - - return capabilities_map - - def get_agent_by_capability(self, capability: str) -> Optional['BaseAgent']: - """ - Find an agent with a specific capability. - - Args: - capability: The required capability - - Returns: - Agent with the required capability, or None if not found - """ - # Create mapping of capabilities for faster lookup - capability_map = self.get_agent_capabilities() - - # Look for the capability (case-insensitive) - capability = capability.lower() - matching_agents = [] - - # Direct match - if capability in capability_map: - matching_agents = capability_map[capability] - else: - # Partial matches - for cap, agents in capability_map.items(): - if capability in cap or cap in capability: - matching_agents.extend(agents) - - # Return the first matching agent - if matching_agents: - agent_id = matching_agents[0] - return self.get_agent(agent_id) - - return None \ No newline at end of file diff --git a/modules/agentservice_utils.py b/modules/agentservice_utils.py deleted file mode 100644 index 28253b2a..00000000 --- a/modules/agentservice_utils.py +++ /dev/null @@ -1,760 +0,0 @@ -""" -Centralized utility functions for the Agentservice (continued). -""" - -import os -import logging -import json -import uuid -from datetime import datetime -from typing import List, Dict, Any, Optional, Tuple, Union, Callable -from io import BytesIO - -logger = logging.getLogger(__name__) - -class WorkflowUtils: - """ - Utility class for workflow operations. - Centralizes common workflow-related functions. - """ - - def __init__(self, workflow_id: str = None): - """Initialize with optional workflow ID""" - self.workflow_id = workflow_id - - def set_workflow_id(self, workflow_id: str): - """Set or update the workflow ID""" - self.workflow_id = workflow_id - - def get_documents(self, workflow: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Get all documents from a workflow across all messages. - - Args: - workflow: The workflow object - - Returns: - List of document objects - """ - documents = [] - - # Process all messages - for message in workflow.get("messages", []): - # Extract documents from the message - for doc in message.get("documents", []): - # Add to list if not already present - if not any(d.get("id") == doc.get("id") for d in documents): - documents.append(doc) - - return documents - - def get_files(self, workflow: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Get all file references from a workflow. - - Args: - workflow: The workflow object - - Returns: - List of file metadata objects - """ - files = [] - - # Process all messages - for message in workflow.get("messages", []): - # Extract documents from the message - for doc in message.get("documents", []): - source = doc.get("source", {}) - - # Only include file documents - if source.get("type") == "file": - file_info = { - "id": source.get("id", ""), - "name": source.get("name", ""), - "type": source.get("content_type", ""), - "content_type": source.get("content_type", ""), - "size": source.get("size", 0) - } - - # Check if file is already in the list - if not any(f.get("id") == file_info["id"] for f in files): - files.append(file_info) - - return files - - def extract_by_prompt(self, workflow: Dict[str, Any], prompt: str, ai_service) -> Dict[str, Any]: - """ - Extract data from workflow documents based on an AI prompt. - - Args: - workflow: The workflow object - prompt: The extraction prompt - ai_service: The AI service to use for extraction - - Returns: - Extracted data - """ - # This is an async method but we're exposing it as a regular method - # The caller should use it with asyncio.run() or await - async def _extract(): - # Create extraction prompt - files = self.get_files(workflow) - file_descriptions = "\n".join([f"- {f.get('name', 'unnamed')} ({f.get('type', 'unknown')})" for f in files]) - - extraction_prompt = f""" - Extract relevant information from the following files based on this request: - - REQUEST: {prompt} - - FILES: - {file_descriptions} - - Focus on the most relevant content and provide a structured output. - """ - - # Call AI - response = await ai_service.call_api([{"role": "user", "content": extraction_prompt}]) - - return { - "prompt": prompt, - "extracted_content": response, - "files_processed": len(files) - } - - # Return the coroutine - return _extract() - - def merge_workflows(self, workflows: List[Dict[str, Any]]) -> Dict[str, Any]: - """ - Merge multiple workflows into a single unified workflow. - Useful for workflow templates or combining partial workflows. - - Args: - workflows: List of workflow objects to merge - - Returns: - Merged workflow - """ - if not workflows: - return {} - - # Start with the first workflow - result = workflows[0].copy() - - # Initialize lists if not present - if "messages" not in result: - result["messages"] = [] - if "logs" not in result: - result["logs"] = [] - - # Merge additional workflows - for workflow in workflows[1:]: - # Append messages - for message in workflow.get("messages", []): - # Check for duplicates - if not any(m.get("id") == message.get("id") for m in result["messages"]): - result["messages"].append(message) - - # Append logs - for log in workflow.get("logs", []): - # Check for duplicates - if not any(l.get("id") == log.get("id") for l in result["logs"]): - result["logs"].append(log) - - # Update status if needed - if workflow.get("status") == "failed": - result["status"] = "failed" - - # Update last_activity if newer - if (workflow.get("last_activity") and - (not result.get("last_activity") or - workflow["last_activity"] > result["last_activity"])): - result["last_activity"] = workflow["last_activity"] - - return result - - def get_message(self, workflow: Dict[str, Any], message_id: str) -> Optional[Dict[str, Any]]: - """ - Find a message by ID in the workflow. - - Args: - workflow: The workflow object - message_id: The message ID to find - - Returns: - Message object or None if not found - """ - for message in workflow.get("messages", []): - if message.get("id") == message_id: - return message - return None - - def to_str(self, workflow: Dict[str, Any]) -> str: - """ - Convert workflow to a formatted string representation. - - Args: - workflow: The workflow object - - Returns: - String representation of the workflow - """ - # Create a summary string - result = f"Workflow: {workflow.get('id')}\n" - result += f"Status: {workflow.get('status', 'unknown')}\n" - result += f"Started: {workflow.get('started_at', 'unknown')}\n" - result += f"Last Activity: {workflow.get('last_activity', 'unknown')}\n" - - # Add message count - message_count = len(workflow.get("messages", [])) - result += f"Messages: {message_count}\n" - - # Add log count - log_count = len(workflow.get("logs", [])) - result += f"Logs: {log_count}\n" - - return result - - -class MessageUtils: - """ - Utility class for message operations. - Centralizes common message-related functions. - """ - - def create_message(self, workflow_id: str, role: str = "system") -> Dict[str, Any]: - """ - Create a new message object. - - Args: - workflow_id: ID of the workflow - role: Role of the message ('system', 'user', 'assistant') - - Returns: - New message object - """ - message_id = f"msg_{uuid.uuid4()}" - current_time = datetime.now().isoformat() - - # Create message object - message = { - "id": message_id, - "workflow_id": workflow_id, - "parent_message_id": None, - "started_at": current_time, - "finished_at": None, - "sequence_no": 0, - - "status": "pending", - "role": role, - - "data_stats": { - "processing_time": 0.0, - "token_count": 0, - "bytes_sent": 0, - "bytes_received": 0 - }, - - "documents": [], - "content": None, - "agent_type": None - } - - return message - - def finalize_message(self, message: Dict[str, Any]) -> Dict[str, Any]: - """ - Finalize a message by setting completion timestamp. - - Args: - message: The message object - - Returns: - Updated message object - """ - message["finished_at"] = datetime.now().isoformat() - message["status"] = "completed" - return message - - def get_documents(self, message: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Get all documents from a message. - - Args: - message: The message object - - Returns: - List of document objects - """ - return message.get("documents", []) - - def get_files(self, message: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Get all file references from a message. - - Args: - message: The message object - - Returns: - List of file metadata objects - """ - files = [] - - # Extract documents from the message - for doc in message.get("documents", []): - source = doc.get("source", {}) - - # Only include file documents - if source.get("type") == "file": - file_info = { - "id": source.get("id", ""), - "name": source.get("name", ""), - "type": source.get("content_type", ""), - "content_type": source.get("content_type", ""), - "size": source.get("size", 0) - } - - files.append(file_info) - - return files - - def extract_text_content(self, message: Dict[str, Any]) -> str: - """ - Extract text content from a message including document content. - - Args: - message: The message object - - Returns: - String with all text content from the message - """ - content = message.get("content", "") - - # Add document content - for doc in message.get("documents", []): - # Check for document contents - for doc_content in doc.get("contents", []): - if doc_content.get("type") == "text": - content += "\n\n" + doc_content.get("text", "") - - return content - - def to_str(self, message: Dict[str, Any]) -> str: - """ - Convert message to a formatted string representation. - - Args: - message: The message object - - Returns: - String representation of the message - """ - # Create a summary string - result = f"Message: {message.get('id')}\n" - result += f"Role: {message.get('role', 'unknown')}\n" - - # Add agent info if available - if message.get("agent_type"): - result += f"Agent: {message.get('agent_name', message.get('agent_type', 'unknown'))}\n" - - # Add content summary - content = message.get("content", "") - if content: - content_preview = content[:100] + "..." if len(content) > 100 else content - result += f"Content: {content_preview}\n" - - # Add document count - doc_count = len(message.get("documents", [])) - result += f"Documents: {doc_count}\n" - - return result - - -class FileUtils: - """ - Utility class for file operations. - Centralizes common file-related functions. - """ - - def is_text_extractable(self, file_name: str, content_type: str = None) -> bool: - """ - Check if text can be extracted from a file. - - Args: - file_name: Name of the file - content_type: MIME type (optional) - - Returns: - True if text can be extracted, False otherwise - """ - # Text files - if file_name.endswith(('.txt', '.md', '.json', '.xml', '.html', '.htm', '.css', '.js', '.py', '.csv')): - return True - - # Excel files - if file_name.endswith(('.xlsx', '.xls')): - try: - import pandas - return True - except ImportError: - return False - - # PDF files - if file_name.endswith('.pdf'): - try: - # Check if PyPDF2 or PyMuPDF is available - try: - import PyPDF2 - return True - except ImportError: - try: - import fitz # PyMuPDF - return True - except ImportError: - return False - except: - return False - - # Images and other non-text files - if file_name.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg', - '.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', - '.mp3', '.wav', '.ogg', '.flac', '.aac')): - return False - - # Check content type if file extension doesn't give a clear answer - if content_type: - if content_type.startswith(('text/', 'application/json', 'application/xml')): - return True - elif content_type == 'application/pdf': - return True - elif content_type.startswith(('image/', 'video/', 'audio/')): - return False - - # Default to allowing extraction attempt - return True - - def get_mime_type(self, file_name: str) -> str: - """ - Get MIME type based on file name. - - Args: - file_name: Name of the file - - Returns: - MIME type string - """ - import mimetypes - - # Initialize mimetypes - mimetypes.init() - - # Get MIME type - mime_type, _ = mimetypes.guess_type(file_name) - - if not mime_type: - # Default mappings for common extensions - extension_map = { - 'txt': 'text/plain', - 'md': 'text/markdown', - 'json': 'application/json', - 'csv': 'text/csv', - 'html': 'text/html', - 'htm': 'text/html', - 'pdf': 'application/pdf', - 'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', - 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', - 'pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', - 'jpg': 'image/jpeg', - 'jpeg': 'image/jpeg', - 'png': 'image/png', - 'gif': 'image/gif', - 'svg': 'image/svg+xml', - 'webp': 'image/webp', - 'mp4': 'video/mp4', - 'mp3': 'audio/mpeg' - } - - # Get extension - ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # Return mapped MIME type or default - mime_type = extension_map.get(ext, 'application/octet-stream') - - return mime_type - - -class LoggingUtils: - """ - Enhanced logging utilities for better workflow tracking. - Provides structured and categorized logging for workflows. - """ - - def __init__(self, workflow_id: str = None, log_func: Callable = None): - """ - Initialize logging utilities. - - Args: - workflow_id: ID of the workflow for context - log_func: Function to call for adding workflow logs - """ - self.workflow_id = workflow_id - self.log_func = log_func - self.logger = logging.getLogger(__name__) - - # Define log categories - self.categories = { - "workflow": "Workflow Management", - "planning": "Activity Planning", - "execution": "Activity Execution", - "agents": "Agent Selection & Execution", - "files": "File Processing", - "summary": "Results Summary", - "error": "Error Handling", - "code": "Code Execution", - } - - def set_workflow_id(self, workflow_id: str): - """Update the workflow ID""" - self.workflow_id = workflow_id - - def set_log_func(self, log_func: Callable): - """Update the log function""" - self.log_func = log_func - - def info(self, message: str, category: str = "workflow", details: str = None): - """ - Log an informational message. - - Args: - message: The log message - category: Log category - details: Optional detailed information - """ - category_name = self.categories.get(category, category) - log_message = f"[{category_name}] {message}" - - # Log to standard logger - self.logger.info(log_message) - - # Log to workflow if function available - if self.log_func and self.workflow_id: - self.log_func(self.workflow_id, message, "info", category, category_name) - - def warning(self, message: str, category: str = "workflow", details: str = None): - """ - Log a warning message. - - Args: - message: The log message - category: Log category - details: Optional detailed information - """ - category_name = self.categories.get(category, category) - log_message = f"[{category_name}] {message}" - - # Log to standard logger - self.logger.warning(log_message) - - # Log to workflow if function available - if self.log_func and self.workflow_id: - self.log_func(self.workflow_id, message, "warning", category, category_name) - - def error(self, message: str, category: str = "error", details: str = None): - """ - Log an error message. - - Args: - message: The log message - category: Log category - details: Optional detailed information - """ - category_name = self.categories.get(category, category) - log_message = f"[{category_name}] {message}" - - # Log to standard logger - self.logger.error(log_message) - - # Log to workflow if function available - if self.log_func and self.workflow_id: - self.log_func(self.workflow_id, message, "error", category, category_name) - - def debug(self, message: str, category: str = "workflow", details: str = None): - """ - Log a debug message. - - Args: - message: The log message - category: Log category - details: Optional detailed information - """ - category_name = self.categories.get(category, category) - log_message = f"[{category_name}] {message}" - - # Log to standard logger - self.logger.debug(log_message) - - def get_category_name(self, category: str) -> str: - """ - Get human-readable category name. - - Args: - category: Category code - - Returns: - Human-readable category name - """ - return self.categories.get(category, category) - - -def extract_text_from_file_content(file_content: bytes, file_name: str, content_type: str = None) -> Tuple[str, bool]: - """ - Extract text from various file formats based on binary content. - - Args: - file_content: Binary content of the file - file_name: Name of the file for format detection - content_type: Optional MIME type of the file - - Returns: - Tuple with (extracted text, is_extracted flag) - """ - # Check if file is likely text-extractable - if not is_text_extractable(file_name, content_type): - return f"[File: {file_name} - Text extraction not supported]", False - - try: - # Simple text files - if file_name.endswith(('.txt', '.md', '.json', '.xml', '.html', '.htm', '.css', '.js', '.py', '.csv', '.log', '.ini', '.cfg', '.conf')) or (content_type and (content_type.startswith('text/') or content_type in ['application/json', 'application/xml', 'text/csv'])): - try: - return file_content.decode('utf-8'), True - except UnicodeDecodeError: - try: - return file_content.decode('latin1'), True - except: - return file_content.decode('cp1252', errors='replace'), True - - # Excel files - elif file_name.endswith(('.xlsx', '.xls')): - try: - import pandas as pd - # Create temporary in-memory file - file_obj = BytesIO(file_content) - df = pd.read_excel(file_obj) - result = f"Excel file with {len(df)} rows and {len(df.columns)} columns.\n" - result += f"Columns: {', '.join(df.columns.tolist())}\n\n" - result += df.to_string(index=False) - return result, True - except ImportError: - return f"[Excel file: {file_name} - pandas not installed]", False - except Exception as e: - return f"[Error extracting Excel content: {str(e)}]", False - - # CSV files - elif file_name.endswith('.csv'): - try: - import pandas as pd - try: - # Create temporary in-memory file - file_obj = BytesIO(file_content) - df = pd.read_csv(file_obj, encoding='utf-8') - except UnicodeDecodeError: - file_obj = BytesIO(file_content) - try: - df = pd.read_csv(file_obj, encoding='latin1') - except: - file_obj = BytesIO(file_content) - df = pd.read_csv(file_obj, encoding='cp1252') - - result = f"CSV file with {len(df)} rows and {len(df.columns)} columns.\n" - result += f"Columns: {', '.join(df.columns.tolist())}\n\n" - result += df.to_string(index=False) - return result, True - except ImportError: - return f"[CSV file: {file_name} - pandas not installed]", False - except Exception as e: - return f"[Error extracting CSV content: {str(e)}]", False - - # PDF files - elif file_name.endswith('.pdf'): - try: - try: - from PyPDF2 import PdfReader - reader = PdfReader(BytesIO(file_content)) - text = "" - for page in reader.pages: - text += page.extract_text() + "\n\n" - return text, True - except ImportError: - try: - import fitz # PyMuPDF - doc = fitz.open(stream=file_content, filetype="pdf") - text = "" - for page in doc: - text += page.get_text() + "\n\n" - return text, True - except ImportError: - return f"[PDF: {file_name} - No PDF library installed]", False - except Exception as e: - return f"[Error reading PDF file {file_name}: {str(e)}]", False - - # Default case - try basic text extraction - else: - try: - return file_content.decode('utf-8', errors='replace'), True - except Exception as e: - logger.error(f"Error extracting text from {file_name}: {str(e)}") - return f"[Text extraction error: {str(e)}]", False - - except Exception as e: - logger.error(f"Error extracting text from {file_name}: {str(e)}") - return f"[Text extraction error: {str(e)}]", False - - -def is_text_extractable(file_name: str, content_type: str = None) -> bool: - """Check if text can be extracted from a file.""" - # Text files - if file_name.endswith(('.txt', '.md', '.json', '.xml', '.html', '.htm', '.css', '.js', '.py', '.csv')): - return True - - # Excel files - if file_name.endswith(('.xlsx', '.xls')): - try: - import pandas - return True - except ImportError: - return False - - # PDF files - if file_name.endswith('.pdf'): - try: - # Check if PyPDF2 or PyMuPDF is available - try: - import PyPDF2 - return True - except ImportError: - try: - import fitz # PyMuPDF - return True - except ImportError: - return False - except: - return False - - # Images and other non-text files - if file_name.endswith(('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg', - '.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', - '.mp3', '.wav', '.ogg', '.flac', '.aac')): - return False - - # Check content type if file extension doesn't give a clear answer - if content_type: - if content_type.startswith(('text/', 'application/json', 'application/xml')): - return True - elif content_type == 'application/pdf': - return True - elif content_type.startswith(('image/', 'video/', 'audio/')): - return False - - # Default to allowing extraction attempt - return True - diff --git a/modules/agentservice_workflow_execution.py b/modules/agentservice_workflow_execution.py deleted file mode 100644 index aca436d9..00000000 --- a/modules/agentservice_workflow_execution.py +++ /dev/null @@ -1,1251 +0,0 @@ -""" -Refactored architecture for the Agentservice multi-agent system. -This module defines the revised workflow execution with improved agent handovers. -""" - -import os -import logging -import asyncio -import uuid -from datetime import datetime -from typing import List, Dict, Any, Optional, Tuple, Union -import json -import re - -logger = logging.getLogger(__name__) -logging.getLogger('matplotlib.font_manager').setLevel(logging.INFO) - -class WorkflowExecution: - """ - Handles the execution of workflows with improved agent collaboration. - Integrates planning and execution phases for better context awareness. - """ - - def __init__(self, workflow_manager, workflow_id: str, mandate_id: int, user_id: int, ai_service, lucydom_interface): - """Initialize the workflow execution""" - self.workflow_manager = workflow_manager - self.workflow_id = workflow_id - self.mandate_id = mandate_id - self.user_id = user_id - self.ai_service = ai_service - self.lucydom_interface = lucydom_interface - - # Import necessary modules - from modules.agentservice_utils import WorkflowUtils, MessageUtils, LoggingUtils - from modules.agentservice_registry import AgentRegistry - from modules.agentservice_filemanager import get_workflow_file_manager - - # Initialize utilities - self.workflow_utils = WorkflowUtils(workflow_id) - self.message_utils = MessageUtils() - self.logging_utils = LoggingUtils(workflow_id, self._add_log) - - # Initialize agent registry - self.agent_registry = AgentRegistry.get_instance() - # Set dependencies for agents - - # Initialize file manager - self.file_manager = get_workflow_file_manager(workflow_id, lucydom_interface) - - # Import and initialize document handler - from modules.agentservice_document_handler import get_document_handler - self.document_handler = get_document_handler(workflow_id, lucydom_interface, ai_service) - - self.agent_registry.set_dependencies( - ai_service=ai_service, - document_handler=self.document_handler, - lucydom_interface=lucydom_interface - ) - - async def execute(self, message: Dict[str, Any], workflow: Dict[str, Any], files: List[Dict[str, Any]] = None, is_user_input: bool = False): - """ - Execute the workflow with integrated planning and agent selection. - - Args: - message: The initiating message (prompt or user input) - workflow: The workflow object - files: Optional list of file metadata - is_user_input: Flag indicating if this is user input - - Returns: - Dict with workflow status and result - """ - try: - # 1. Initialize workflow logging - self.logging_utils.info("Starting workflow execution", "workflow", "Workflow initialized") - - # 2. Process user message and files - user_message = await self._process_user_message(workflow, message, files) - self.logging_utils.info("User message processed", "workflow", "User input added to workflow") - - # 3. Create agent-aware work plan - work_plan = await self._create_agent_aware_work_plan(workflow, user_message) - self.logging_utils.info(f"Created agent-aware work plan with {len(work_plan)} activities", "planning") - self.logging_utils.debug(f"{work_plan}.", "planning") - - # 4. Execute the activities in the work plan - results = await self._execute_work_plan(workflow, work_plan) - - # 5. Create summary - summary = await self._create_summary(workflow, results) - self.logging_utils.info("Created workflow summary", "summary") - - # Set workflow status to completed - workflow["status"] = "completed" - workflow["last_activity"] = datetime.now().isoformat() - - # Final save - self.workflow_manager._save_workflow(workflow) - - return { - "workflow_id": self.workflow_id, - "status": "completed", - "messages": workflow.get("messages", []) - } - - except Exception as e: - self.logging_utils.error(f"Workflow execution failed: {str(e)}", "error") - workflow["status"] = "failed" - self.workflow_manager._save_workflow(workflow) - - return { - "workflow_id": self.workflow_id, - "status": "failed", - "error": str(e) - } - - async def _process_user_message(self, workflow: Dict[str, Any], message: Dict[str, Any], files: List[Dict[str, Any]] = None) -> Dict[str, Any]: - """ - Process the user message and add it to the workflow. - - Args: - workflow: The workflow object - message: The user message - files: Optional list of file metadata - - Returns: - The processed user message - """ - # Create a message with user input - user_message = self._create_message(workflow, message.get("role", "user")) - user_message["content"] = message.get("content", "") - - # Process files if provided - if files and len(files) > 0: - self.logging_utils.info(f"Processing {len(files)} files", "files") - - # Add files to message via file manager instead of _process_files - user_message = await self.file_manager.add_files_to_message( - user_message, - [f.get('id') for f in files], - self._add_log - ) - - # Add the message to the workflow - if "messages" not in workflow: - workflow["messages"] = [] - workflow["messages"].append(user_message) - - # Save workflow state - self.workflow_manager._save_workflow(workflow) - - return user_message - - - - async def _create_agent_aware_work_plan(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Create an agent-aware work plan that integrates agent selection during planning. - - Args: - workflow: The workflow object - message: The initiating message - - Returns: - List of structured activities with agent assignments - """ - - import json - import re - import os - - # Extract context information - task = message.get("content", "") - - # Direct check for PowerOn keyword as an additional safeguard - if "poweron" in task.lower(): - self.logging_utils.info("PowerOn keyword directly detected, creating specialized plan with creative agent", "planning") - return [{ - "title": "PowerOn Response", - "description": "Generate specialized PowerOn response", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "", - "expected_output": "Text", - "dependencies": [] - }] - - # Get all available agents and their capabilities - agent_infos = self.agent_registry.get_agent_infos() - - # Extract documents - documents = message.get("documents", []) - document_info = [] - - # Analyze documents without language-specific criteria - has_documents = len(documents) > 0 - pdf_documents = [] - table_documents = [] - already_extracted_docs = [] - - for doc in documents: - source = doc.get("source", {}) - doc_name = source.get("name", "unnamed") - doc_type = source.get("type", "unknown") - content_type = source.get("content_type", "unknown") - - # Add to general document info - document_info.append({ - "id": doc.get("id"), - "name": doc_name, - "type": doc_type, - "content_type": content_type - }) - - # Identify document types - if "pdf" in content_type.lower(): - pdf_documents.append(doc_name) - - # Look for signs of tables based on content structure, not language - if doc.get("contents"): - contents = doc.get("contents") - for content_item in contents: - if isinstance(content_item, dict) and content_item.get("type") == "table": - table_documents.append(doc_name) - break - - # Check for already extracted content - if doc.get("contents") or (source and source.get("extracted_content")): - already_extracted_docs.append(doc_name) - - # Create a more detailed document list for analysis - detailed_document_info = [] - for doc in documents: - source = doc.get("source", {}) - doc_name = source.get("name", "unnamed") - doc_type = source.get("type", "unknown") - content_type = source.get("content_type", "unknown") - doc_id = doc.get("id", "unknown_id") - - # Extract document properties that might help in matching - doc_properties = { - "id": doc_id, - "name": doc_name, - "type": doc_type, - "content_type": content_type - } - - # Add file extension if present, handling scope properly - if "." in doc_name: - doc_properties["file_extension"] = os.path.splitext(doc_name)[1].lower() - - detailed_document_info.append(doc_properties) - - # Convert to JSON string safely before using in f-string - detailed_docs_json = "No documents provided" - if detailed_document_info: - try: - detailed_docs_json = json.dumps(detailed_document_info, indent=2) - except Exception as e: - self.logging_utils.warning(f"Error converting document info to JSON: {str(e)}", "planning") - - # Update the task analysis prompt to better identify document processing tasks - task_analysis_prompt = f""" -Analyze the following user task and classify it. -This analysis will be used internally by the system to optimize the workflow. - -TASK: {task} - -AVAILABLE DOCUMENTS: -{json.dumps(detailed_document_info, indent=2) if detailed_document_info else "No documents provided"} - -Please determine: -1. The primary type of operation requested (extraction, transformation, formatting, analysis, creation) -2. Whether the task appears to be primarily about: -- Extracting information from documents -- Transforming existing information -- Analyzing available information -- Creating new content -3. The documents relevant to this task (any documents that might be needed) -4. The expected output format or presentation style -5. Whether the task involves any kind of document processing (such as extracting information, -transforming data, creating tables, summarizing text, or analyzing document contents) -6. Whether the task requires online information retrieval -7. Whether the task requires complex computational algorithms or repetitive calculations -8. Whether the task contains the keyword "poweron" in any form - -Return your analysis as a JSON object with these properties: -- primaryOperationType: string (extraction, transformation, formatting, analysis, creation) -- isUsingExistingData: boolean (true if primarily using already available data) -- mentionedDocuments: array of document IDs or names that are relevant to this task -- expectedOutputFormat: string (html, text, table, etc. or "unspecified") -- involvesDocumentProcessing: boolean (true if task involves any document extraction, transformation, summarization, etc.) -- requiresWebResearch: boolean (true if task requires online information) -- requiresComplexComputation: boolean (true if task requires complex algorithms or repetitive calculations) -- containsPowerOnKeyword: boolean (true if the keyword "poweron" is found in any form) -""" - - # Call AI to analyze the task - self.logging_utils.info("Analyzing task to determine optimal planning approach", "planning") - - # Initialize task analysis variables with defaults - operation_type = "" - is_using_existing_data = False - mentioned_documents = [] - expected_output = "unspecified" - contains_poweron = False - requires_web_research = False - requires_complex_computation = False - involves_document_processing = False - can_use_optimized_plan = False - task_analysis = {} - - try: - task_analysis_response = await self.ai_service.call_api([{"role": "user", "content": task_analysis_prompt}]) - - # Extract JSON from response - json_match = re.search(r'\{.*\}', task_analysis_response, re.DOTALL) - - if json_match: - json_str = json_match.group(0) - task_analysis = json.loads(json_str) - - # Log the analysis - try: - analysis_str = json.dumps(task_analysis) - self.logging_utils.info(f"Task analysis: {analysis_str}", "planning") - except Exception as e: - self.logging_utils.warning(f"Error logging task analysis: {str(e)}", "planning") - - # Extract all analysis criteria from the response - operation_type = task_analysis.get("primaryOperationType", "").lower() - is_using_existing_data = task_analysis.get("isUsingExistingData", False) - mentioned_documents = task_analysis.get("mentionedDocuments", []) - expected_output = task_analysis.get("expectedOutputFormat", "").lower() - contains_poweron = task_analysis.get("containsPowerOnKeyword", False) - requires_web_research = task_analysis.get("requiresWebResearch", False) - requires_complex_computation = task_analysis.get("requiresComplexComputation", False) - involves_document_processing = task_analysis.get("involvesDocumentProcessing", False) - - # PowerOn handling takes highest priority - check it first - if contains_poweron: - self.logging_utils.info("PowerOn keyword detected, creating specialized plan with creative agent", "planning") - return [{ - "title": "PowerOn Response", - "description": "Generate specialized PowerOn response", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "", - "expected_output": "Text", - "dependencies": [] - }] - - # For web research tasks, create a simple plan with webcrawler agent - if requires_web_research: - self.logging_utils.info("Web research task detected, creating specialized plan with webcrawler agent", "planning") - return [{ - "title": "Web Research", - "description": "Perform web research to answer the query", - "assigned_agents": ["webcrawler"], - "agent_prompts": [task], - "document_requirements": "", - "expected_output": "Text", - "dependencies": [] - }] - - # If documents are available and task involves document processing, prioritize creative agent - if has_documents and involves_document_processing: - self.logging_utils.info("Document processing task detected with available documents, using creative agent", "planning") - return [{ - "title": "Document Processing", - "description": "Process documents according to requirements", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "All available documents", - "expected_output": expected_output if expected_output != "unspecified" else "Text", - "dependencies": [] - }] - - # If task is a document processing task even without documents, still use creative agent - if involves_document_processing and not requires_complex_computation: - self.logging_utils.info("Document processing task detected, using creative agent", "planning") - return [{ - "title": "Document Processing", - "description": "Process content according to requirements", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "", - "expected_output": expected_output if expected_output != "unspecified" else "Text", - "dependencies": [] - }] - - # Only use coder for complex computation tasks - if requires_complex_computation: - self.logging_utils.info("Complex computation task detected, using coder agent", "planning") - return [{ - "title": "Complex Computation", - "description": "Perform complex calculations or processing", - "assigned_agents": ["coder"], - "agent_prompts": [task], - "document_requirements": "All available documents may be needed", - "expected_output": expected_output if expected_output != "unspecified" else "Text", - "dependencies": [] - }] - - # Flag for optimized planning - can_use_optimized_plan = ( - (operation_type in ["formatting", "transformation"]) and - is_using_existing_data and - has_documents - ) - - except Exception as e: - self.logging_utils.warning(f"Error analyzing task: {str(e)}, proceeding with standard planning", "planning") - - - - # Create the base planning prompt - plan_prompt = f""" -As an AI workflow manager, create a detailed agent-aware work plan for the following task: - -TASK: {task} - -AVAILABLE AGENTS: -{self._format_agent_info(agent_infos)} - -AVAILABLE DOCUMENTS: -{json.dumps(document_info, indent=2) if document_info else "No documents provided"} - -""" - - # Add context about documents if they exist - if already_extracted_docs: - plan_prompt += f""" -IMPORTANT CONTEXT: -The following documents already have extracted content ready to use: {', '.join(already_extracted_docs)} -This means NO extraction step is needed for these documents - the data is ALREADY AVAILABLE. -""" - - # Add context specific to this task based on AI analysis, not language-specific keywords - if task_analysis: - if operation_type and is_using_existing_data and has_documents: - plan_prompt += f""" -CRITICAL INSTRUCTION FOR THIS TASK: -Based on analysis, this task involves {operation_type} of data that is ALREADY AVAILABLE. -The system has identified this as primarily working with existing data, not requiring new extraction. -If this task involves structured data, that data has already been parsed and is immediately available. -DO NOT create separate extraction tasks - go directly to creating the requested output. -""" - - plan_prompt += """ -DOCUMENT HANDLING REQUIREMENTS: -1. When a task involves document analysis, focus on WHAT information is needed, not HOW to extract it -2. The document handler automatically extracts and processes all document components including: -- Text content from documents -- Images embedded within documents -- Charts and graphics -- Structured data and tables -3. Each document's content is pre-processed and made available to any agent that needs it -4. For document extraction specifications, simply state what information is needed from which document -5. The system will handle conversion between formats, extraction, and specialized processing - -AGENT SELECTION GUIDELINES: -1. The creative agent should handle: -- All document processing tasks (extraction, summarization, analysis) -- All content creation and knowledge-based tasks -- All tasks involving documents and text transformation -- All document descriptions and data extraction -- All table creation and data representation -- All tasks with PowerOn keyword - -2. The webcrawler agent should ONLY handle: -- Tasks explicitly requiring online information retrieval -- Tasks needing current information from the web - -3. The coder agent should ONLY handle: -- Tasks requiring complex computational algorithms -- Tasks involving repetitive mathematical calculations -- Tasks requiring specialized programming logic - -IMPORTANT DOCUMENT HANDLING PRIORITIES: -- For tasks involving document processing, ALWAYS use the creative agent even for structured data -- For tasks to extract information from documents, ALWAYS use the creative agent -- For tasks to describe or summarize document content, ALWAYS use the creative agent -- For tasks to transform data from documents, ALWAYS use the creative agent -- Only use the coder agent when complex computational logic is the primary requirement -""" - - # Add task optimization advice - language agnostic, based on AI analysis - if can_use_optimized_plan: - plan_prompt += """ -TASK-SPECIFIC OPTIMIZATION: -This task appears to be primarily about formatting or transforming ALREADY EXTRACTED data. -The most efficient approach is: -1. DO NOT include any extraction activities - the document data is already parsed and available -2. Use a SINGLE activity with an appropriate agent to create the requested output format -3. Focus on specifying the desired output format in detail, not on how to extract the data -""" - - plan_prompt += """ -The work plan should include a structured list of activities. Each activity should have: -1. title - A short descriptive title for the activity -2. description - What needs to be done in this activity -3. assigned_agents - List of agent IDs that should handle this activity (can be multiple in sequence) -4. agent_prompts - Specific instructions for each agent (matched by index to assigned_agents) -5. document_requirements - Description of WHAT information is needed from which documents (not HOW to extract it) -6. expected_output - The expected output format and content -7. dependencies - List of previous activities this depends on (by index) - -IMPORTANT GUIDELINES: -- Optimize agent assignments based on their specialized capabilities -- Create a logical sequence of activities that builds toward the final output -- DO NOT create activities solely for document extraction - specify needed information in document_requirements -- DO NOT assign extraction tasks to specific agents - the system handles this automatically -- When a document contains both text and images, both will be processed automatically -- If a task requires analyzing images, specify what to look for in the images -- Create detailed agent_prompts that clearly explain what each agent should accomplish -- ELIMINATE redundant steps - if data is already extracted, go directly to generating the desired output format - -Return the work plan as a JSON array of activity objects, each with the above properties. -""" - - self.logging_utils.info("Creating agent-aware work plan", "planning") - - # For tasks that can use optimized plans, generate one directly - if can_use_optimized_plan: - # For formatting/transformation tasks with extracted data, use an optimized 1-step plan - self.logging_utils.info("Using optimized single-step plan based on task analysis", "planning") - - # Use the specific output format from the task analysis - expected_format = task_analysis.get("expectedOutputFormat", "HTML").upper() - if expected_format.lower() == "unspecified": - expected_format = "Text" - - # Create appropriate agent assignment based on expected output and task classification - # Prefer creative agent for document processing tasks - agent_id = "creative" if involves_document_processing else "coder" - - # Create a direct single-activity plan - optimized_plan = [{ - "title": f"Process and Format Data", - "description": f"Process the existing data and format it as {expected_format}", - "assigned_agents": [agent_id], - "agent_prompts": [ - f"The data from the documents has already been extracted and is available. " - f"Create a well-formatted {expected_format} representation of this data. " - f"No extraction is needed - focus only on proper formatting and presentation." - ], - "document_requirements": f"Use the already extracted data from the available documents", - "expected_output": expected_format, - "dependencies": [] - }] - - # Log the optimized plan - self.logging_utils.info(f"Created optimized single-step plan with agent: {agent_id}", "planning") - - return optimized_plan - - # For more complex tasks, use the AI to generate a plan - try: - plan_response = await self.ai_service.call_api([{"role": "user", "content": plan_prompt}]) - - # Extract JSON plan - json_pattern = r'\[\s*\{.*\}\s*\]' - json_match = re.search(json_pattern, plan_response, re.DOTALL) - - if json_match: - json_str = json_match.group(0) - work_plan = json.loads(json_str) - self.logging_utils.info(f"Work plan created with {len(work_plan)} activities", "planning") - - # Post-process to ensure document tasks go to creative agent - for activity in work_plan: - doc_requirements = activity.get("document_requirements", "") - activity_description = activity.get("description", "").lower() - - # If activity involves documents or document processing terms but isn't assigned to creative - if (doc_requirements or - "document" in activity_description or - "extract" in activity_description or - "summarize" in activity_description): - - # Check if creative is not already assigned - if "creative" not in activity.get("assigned_agents", []): - activity["assigned_agents"] = ["creative"] - self.logging_utils.info("Changed agent assignment for document activity to creative agent", "planning") - - # Post-process based on the task analysis to optimize if needed - if task_analysis and task_analysis.get("isUsingExistingData", False): - work_plan = self._optimize_work_plan(work_plan, task_analysis) - self.logging_utils.info(f"Post-processed work plan now has {len(work_plan)} activities", "planning") - - # Log detailed work plan to console - for i, activity in enumerate(work_plan): - activity_title = activity.get("title", f"Activity {i+1}") - activity_agents = ", ".join(activity.get("assigned_agents", ["unknown"])) - self.logging_utils.info(f"Activity {i+1}: {activity_title} (Agents: {activity_agents})", "planning") - - # Log document requirements if any - if activity.get("document_requirements"): - self.logging_utils.info(f" Document requirements: {activity.get('document_requirements')}", "planning") - - # Log dependencies if any - if activity.get("dependencies"): - deps = [str(d + 1) for d in activity.get("dependencies")] - self.logging_utils.info(f" Dependencies: Activities {', '.join(deps)}", "planning") - - return work_plan - else: - self.logging_utils.warning("Could not extract JSON from AI response", "planning") - - # Fallback based on previous analysis - if requires_web_research: - return [{ - "title": "Web Research", - "description": "Perform web research to answer the query", - "assigned_agents": ["webcrawler"], - "agent_prompts": [task], - "document_requirements": "", - "expected_output": "Text", - "dependencies": [] - }] - elif involves_document_processing: - return [{ - "title": "Document Processing", - "description": "Process documents or content according to requirements", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "All available documents may be needed", - "expected_output": "Text", - "dependencies": [] - }] - elif requires_complex_computation: - return [{ - "title": "Complex Computation", - "description": "Perform complex calculations or processing", - "assigned_agents": ["coder"], - "agent_prompts": [task], - "document_requirements": "All available documents may be needed", - "expected_output": "Text", - "dependencies": [] - }] - else: - # Fallback: Create a simple default work plan with creative agent - return [{ - "title": "Process Task", - "description": "Process the request directly", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "All available documents may be needed", - "expected_output": "Text", - "dependencies": [] - }] - - except Exception as e: - self.logging_utils.error(f"Error creating work plan: {str(e)}", "planning") - - # Check for PowerOn directly in fallback - if "poweron" in task.lower(): - return [{ - "title": "PowerOn Response (Fallback)", - "description": "Generate specialized PowerOn response after planning error", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "", - "expected_output": "Text", - "dependencies": [] - }] - - # Return a minimal fallback plan with creative agent - return [{ - "title": "Process Task (Error Recovery)", - "description": "Process the request after planning error", - "assigned_agents": ["creative"], - "agent_prompts": [task], - "document_requirements": "All available documents may be needed", - "expected_output": "Text", - "dependencies": [] - }] - - - - - # Language-agnostic optimization function using task analysis instead of keywords - def _optimize_work_plan(self, work_plan: List[Dict[str, Any]], task_analysis: Dict[str, Any]) -> List[Dict[str, Any]]: - """ - Optimize a work plan based on task analysis, not language-specific keywords. - - Args: - work_plan: The original work plan - task_analysis: Analysis of the task - - Returns: - Optimized work plan - """ - # Check if plan has multiple activities - if len(work_plan) <= 1: - return work_plan - - # Only optimize when the task is about using existing data - if not task_analysis.get("isUsingExistingData", False): - return work_plan - - # For tasks that use existing data, try to identify and remove redundant extraction steps - operation_type = task_analysis.get("primaryOperationType", "").lower() - if operation_type in ["formatting", "transformation"]: - # Use AI to identify extraction vs formatting activities instead of keywords - activities_analyzed = [] - - for activity in work_plan: - title = activity.get("title", "") - description = activity.get("description", "") - - # Create an activity object with classification - activity_info = { - "original_activity": activity, - "is_extraction": False, - "is_formatting": False - } - - # Use simple heuristics to classify (can be replaced with AI classification) - # These are pattern-based, not language-dependent - if any(x in title.lower() or x in description.lower() for x in ["extract", "parse", "read"]): - activity_info["is_extraction"] = True - - if any(x in title.lower() or x in description.lower() for x in ["format", "convert", "transform"]): - activity_info["is_formatting"] = True - - activities_analyzed.append(activity_info) - - # Check if we have both extraction and formatting activities - has_extraction = any(a["is_extraction"] for a in activities_analyzed) - has_formatting = any(a["is_formatting"] for a in activities_analyzed) - - if has_extraction and has_formatting: - # Create a new optimized plan - self.logging_utils.info("Optimizing plan by removing redundant extraction steps", "planning") - - # First, separate formatting and non-extraction activities - formatting_activities = [a["original_activity"] for a in activities_analyzed if a["is_formatting"]] - other_activities = [a["original_activity"] for a in activities_analyzed - if not a["is_extraction"] and not a["is_formatting"]] - - # Combine into a new optimized plan - optimized_plan = [] - - # Add formatting activities first - for activity in formatting_activities: - # Enhance the prompt to indicate that data is already available - prompt = activity.get("agent_prompts", [""])[0] - activity["agent_prompts"] = [ - f"IMPORTANT: The data from the documents has already been extracted and is available. " - f"You do not need to perform any extraction steps.\n\n{prompt}" - ] - - # Reset dependencies since we're removing extraction activities - activity["dependencies"] = [] - optimized_plan.append(activity) - - # Add other non-extraction activities - for activity in other_activities: - # Reset dependencies - activity["dependencies"] = [] - optimized_plan.append(activity) - - return optimized_plan - - # If no optimization possible, return original plan - return work_plan - - - async def _execute_work_plan(self, workflow: Dict[str, Any], work_plan: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """ - Execute all activities in the work plan with proper agent handovers. - - Args: - workflow: The workflow object - work_plan: The work plan with activities - - Returns: - Results from all activities - """ - results = [] - activity_outputs = {} # Store outputs for dependency resolution - - for activity_index, activity in enumerate(work_plan): - # Extract activity info - title = activity.get("title", f"Activity {activity_index+1}") - description = activity.get("description", "") - assigned_agents = activity.get("assigned_agents", ["assistant"]) - agent_prompts = activity.get("agent_prompts", [description]) - doc_requirements = activity.get("document_requirements", "") - expected_output = activity.get("expected_output", "Text") - dependencies = activity.get("dependencies", []) - - self.logging_utils.info(f"Starting activity: {title}", "execution") - - # Validate assigned_agents and agent_prompts - if len(assigned_agents) > len(agent_prompts): - # Duplicate the last prompt for additional agents - agent_prompts.extend([agent_prompts[-1]] * (len(assigned_agents) - len(agent_prompts))) - elif len(agent_prompts) > len(assigned_agents): - # Truncate excess prompts - agent_prompts = agent_prompts[:len(assigned_agents)] - - # Process dependencies first - dependency_context = {} - for dep_index in dependencies: - if dep_index < activity_index and dep_index in activity_outputs: - dep_output = activity_outputs[dep_index] - dependency_context[f"activity_{dep_index+1}"] = dep_output - - # Extract required documents if needed - document_content = "" - if doc_requirements: - extracted_data = await self._extract_required_documents(workflow, doc_requirements) - if extracted_data and "extracted_content" in extracted_data: - # Format document content for the prompt - document_content = "\n\n=== EXTRACTED DOCUMENT CONTENT ===\n\n" - for item in extracted_data.get("extracted_content", []): - doc_name = item.get("name", "Unnamed document") - doc_content = item.get("content", "No content available") - document_content += f"--- {doc_name} ---\n{doc_content}\n\n" - - # Execute the activity with the assigned agents - activity_result = await self._execute_agent_sequence( - workflow, - assigned_agents, - agent_prompts, - document_content, - dependency_context, - expected_output - ) - - # Store the result - activity_outputs[activity_index] = activity_result - results.append({ - "title": title, - "description": description, - "agents": assigned_agents, - "result": activity_result.get("content", ""), - "output_format": activity_result.get("format", "Text") - }) - - self.logging_utils.info(f"Completed activity: {title}", "execution") - - # Save intermediate state - self.workflow_manager._save_workflow(workflow) - - return results - - async def _execute_agent_sequence( - self, - workflow: Dict[str, Any], - agent_ids: List[str], - prompts: List[str], - document_content: str, - dependency_context: Dict[str, Any], - expected_output: str - ) -> Dict[str, Any]: - """ - Execute a sequence of agents with proper handovers. - - Args: - workflow: The workflow object - agent_ids: List of agent IDs to execute in sequence - prompts: List of prompts for each agent - document_content: Extracted document content - dependency_context: Context from dependent activities - expected_output: Expected output format - - Returns: - Result of the agent sequence execution - """ - context = { - "workflow_id": self.workflow_id, - "expected_format": expected_output, - "dependency_outputs": dependency_context, - "include_chat_history": True # Flag to indicate chat history should be included - } - - last_result = None - last_documents = [] - - for i, agent_id in enumerate(agent_ids): - # Get the agent - agent = self.agent_registry.get_agent(agent_id) - if agent: - # Ensure dependencies are set - if hasattr(agent, 'set_dependencies'): - agent.set_dependencies( - ai_service=self.ai_service, - document_handler=self.document_handler, - lucydom_interface=self.lucydom_interface - ) - - # Set document handler if agent supports it - if hasattr(agent, 'set_document_handler') and hasattr(self, 'document_handler'): - agent.set_document_handler(self.document_handler) - - if not agent: - self.logging_utils.warning(f"Agent '{agent_id}' not found, using assistant instead", "agents") - agent = self.agent_registry.get_agent("assistant") - if not agent: - # If assistant not found, create a minimal agent response - continue - - # Get the agent prompt - base_prompt = prompts[i] if i < len(prompts) else prompts[-1] - - # Enhance the prompt with context - enhanced_prompt = self._enhance_prompt( - base_prompt, - document_content, - dependency_context, - last_result.get("content", "") if last_result else "", - i > 0, # is_continuation flag - workflow # Pass the workflow parameter - ) - - if document_content and "Image Analysis" not in document_content: - # Instead of trying to access message or documents directly, - # We can use what we know about the workflow we're currently processing - workflow_id = self.workflow_id - - # Log a warning that might help identify the issue - self.logging_utils.warning( - f"Document content available but no image analysis found - PDF image extraction may have failed for workflow {workflow_id}", - "agents" - ) - - # Create the message for this agent - agent_message = self._create_message(workflow, "user") - agent_message["content"] = enhanced_prompt - - # IMPORTANT FIX: Document handling logic - # First, check if we have documents from previous agent if this is a continuation - if last_documents and i > 0: - agent_message["documents"] = last_documents - # For the first agent, make sure we pass any documents from the most recent user message - elif i == 0: - # Find the most recent user message with documents - for msg in reversed(workflow.get("messages", [])): - if msg.get("role") == "user" and msg.get("documents"): - agent_message["documents"] = msg.get("documents", []) - self.logging_utils.info(f"Passing {len(agent_message['documents'])} documents from user message to {agent_id}", "agents") - break - - # Log agent execution - self.logging_utils.info(f"Executing agent: {agent_id}", "agents") - - # Execute the agent - agent_response = await agent.process_message(agent_message, context) - - # Create response message - response_message = self._create_message(workflow, "assistant") - response_message["content"] = agent_response.get("content", "") - response_message["agent_type"] = agent_id - response_message["agent_id"] = agent_id - response_message["agent_name"] = agent.name - response_message["result_format"] = agent_response.get("result_format", expected_output) - - # Capture documents from response - if "documents" in agent_response: - response_message["documents"] = agent_response["documents"] - last_documents = agent_response["documents"] - self.logging_utils.info(f"Agent {agent_id} produced {len(last_documents)} documents", "agents") - - # Add to workflow - workflow["messages"].append(response_message) - - # Update last result - last_result = { - "content": agent_response.get("content", ""), - "format": agent_response.get("result_format", expected_output), - "agent_id": agent_id, - "documents": agent_response.get("documents", []) - } - - return last_result or { - "content": "No agent response was generated.", - "format": "Text" - } - - - async def _extract_required_documents(self, workflow: Dict[str, Any], doc_requirements: str) -> Dict[str, Any]: - """ - Extract required documents based on requirements description with enhanced image extraction. - - Args: - workflow: The workflow object - doc_requirements: Description of document requirements - - Returns: - Extracted document data - """ - # Import for data extraction - from modules.agentservice_dataextraction import data_extraction - - # Get all files from the workflow - files = self.workflow_utils.get_files(workflow) - - # Get all messages from the workflow - workflow_messages = workflow.get("messages", []) - - # Log document requirements - self.logging_utils.info(f"Document requirements: {doc_requirements}", "extraction") - self.logging_utils.info(f"Found {len(files)} files in workflow", "extraction") - - # Create enhanced extraction prompt - enhanced_prompt = f""" - Extract the following information from the available documents: - - REQUIRED INFORMATION: {doc_requirements} - - For all documents, please: - 1. Extract relevant text portions matching the requirements - 2. Identify and analyze any embedded images or charts - 3. Provide structured data from tables or spreadsheets - 4. Summarize key information in context of the requirements - - Handle multi-format documents comprehensively (text, images, charts, tables) - For images, include detailed descriptions of visual content - """ - - # Extract data using the dataextraction module with enhanced prompt - self.logging_utils.info("Starting document extraction process", "extraction") - - extracted_data = await data_extraction( - prompt=enhanced_prompt, - files=files, - messages=workflow_messages, - ai_service=self.ai_service, - lucydom_interface=self.lucydom_interface, - workflow_id=self.workflow_id, - add_log_func=self._add_log, - document_handler=self.document_handler # Pass document handler for better extraction - ) - - # Log extraction results - if extracted_data: - extracted_content = extracted_data.get("extracted_content", []) - self.logging_utils.info(f"Extracted content from {len(extracted_content)} documents", "extraction") - - # Log details for each extracted document with more detail - for doc in extracted_content: - doc_name = doc.get("name", "Unnamed document") - extraction_method = doc.get("extraction_method", "unknown") - is_extracted = doc.get("is_extracted", False) - content_preview = doc.get("content", "")[:100] + "..." if len(doc.get("content", "")) > 100 else doc.get("content", "") - - self.logging_utils.info( - f"Document: {doc_name}, Method: {extraction_method}, Extracted: {is_extracted}", - "extraction" - ) - self.logging_utils.info( - f"Content preview: {content_preview}", - "extraction" - ) - - # Specifically check for image content - if "Image Analysis:" in doc.get("content", ""): - self.logging_utils.info(f"Image content found in {doc_name}", "extraction") - else: - self.logging_utils.warning(f"No image content found in {doc_name} - check PDF extraction", "extraction") - return extracted_data - - - async def _create_summary(self, workflow: Dict[str, Any], results: List[Dict[str, Any]]) -> Dict[str, Any]: - """ - Create a summary of the workflow results for the user. - - Args: - workflow: The workflow object - results: Results from activity executions - - Returns: - Summary message - """ - # Create a summary prompt - summary_prompt = "Create a clear, concise summary of the following workflow results:\n\n" - - for i, result in enumerate(results, 1): - title = result.get("title", f"Activity {i}") - description = result.get("description", "") - content = result.get("result", "") - agents = ", ".join(result.get("agents", ["unknown"])) - - # Limit content length for the summary prompt - content_preview = content[:500] + "..." if len(content) > 500 else content - - summary_prompt += f""" - ACTIVITY {i}: {title} - Description: {description} - Executed by: {agents} - - {content_preview} - - --- - """ - - summary_prompt += """ - Provide a well-structured summary that: - 1. Highlights the key findings and results - 2. Connects the results to the original task - 3. Presents any conclusions or recommendations - - Make sure the summary is clear, concise, and useful to the user. - """ - - # Call AI to generate summary - summary_content = await self.ai_service.call_api([{"role": "user", "content": summary_prompt}]) - - # Create summary message - summary_message = self._create_message(workflow, "assistant") - summary_message["content"] = summary_content - summary_message["agent_type"] = "summary" - summary_message["agent_id"] = "workflow_summary" - summary_message["agent_name"] = "Workflow Summary" - summary_message["result_format"] = "Text" - summary_message["workflow_complete"] = True - - # Add to workflow - workflow["messages"].append(summary_message) - - return summary_message - - def _create_message(self, workflow: Dict[str, Any], role: str) -> Dict[str, Any]: - """Create a new message object for the workflow""" - message_id = f"msg_{uuid.uuid4()}" - current_time = datetime.now().isoformat() - - # Determine sequence number - sequence_no = 1 - if "messages" in workflow and workflow["messages"]: - sequence_no = len(workflow["messages"]) + 1 - - # Create message object - message = { - "id": message_id, - "workflow_id": self.workflow_id, - "parent_message_id": None, - "started_at": current_time, - "finished_at": None, - "sequence_no": sequence_no, - - "status": "pending", - "role": role, - - "data_stats": { - "processing_time": 0.0, - "token_count": 0, - "bytes_sent": 0, - "bytes_received": 0 - }, - - "documents": [], - "content": None, - "agent_type": None - } - - return message - - def _add_log(self, workflow_id: str, message: str, log_type: str, agent_id: str = None, agent_name: str = None): - """Add a log entry to the workflow""" - # This calls back to the workflow manager's log function - self.workflow_manager._add_log(workflow_id, message, log_type, agent_id, agent_name) - - def _format_agent_info(self, agent_infos: List[Dict[str, Any]]) -> str: - """Format agent information for the planning prompt""" - formatted_info = "" - for agent in agent_infos: - formatted_info += f""" - - ID: {agent.get('id', 'unknown')} - Name: {agent.get('name', '')} - Type: {agent.get('type', '')} - Description: {agent.get('description', '')} - Capabilities: {agent.get('capabilities', '')} - Result Format: {agent.get('result_format', 'Text')} - """ - return formatted_info - - def _enhance_prompt( - self, - base_prompt: str, - document_content: str, - dependency_context: Dict[str, Any], - previous_result: str, - is_continuation: bool, - workflow: Dict[str, Any] = None # Add workflow parameter - ) -> str: - """ - Enhance a prompt with context information. - - Args: - base_prompt: The original prompt - document_content: Extracted document content - dependency_context: Context from dependent activities - previous_result: Result from previous agent in sequence - is_continuation: Flag indicating if this is a continuation - - Returns: - Enhanced prompt - """ - enhanced_prompt = base_prompt - - # Add continuation context if this is a continuation - if is_continuation and previous_result: - enhanced_prompt = f""" -{enhanced_prompt} - -=== PREVIOUS AGENT OUTPUT === -{previous_result} -""" - # Add document content if available - if document_content: - enhanced_prompt += f"\n\n{document_content}" - - # Add dependency context if available - if dependency_context: - dependency_section = "\n\n=== OUTPUTS FROM PREVIOUS ACTIVITIES ===\n\n" - for name, value in dependency_context.items(): - if isinstance(value, dict) and "content" in value: - # Extract content if it's in the standard format - dependency_section += f"--- {name} ---\n{value['content']}\n\n" - else: - # Use the value directly - dependency_section += f"--- {name} ---\n{str(value)}\n\n" - - enhanced_prompt += dependency_section - - # Add chat history from workflow if available - if workflow and "messages" in workflow: - chat_history = "\n\n=== CONVERSATION HISTORY ===\n\n" - relevant_messages = [] - - # Collect relevant messages (user and assistant interactions) - for msg in workflow.get("messages", []): - if msg.get("role") in ["user", "assistant"] and msg.get("content"): - relevant_messages.append(msg) - - # Add up to the last 5 messages for context - if relevant_messages: - for msg in relevant_messages[-5:]: - role = msg.get("role", "").upper() - content = msg.get("content", "") - if content: - chat_history += f"{role}: {content}\n\n" - - enhanced_prompt += chat_history - - return enhanced_prompt diff --git a/modules/agentservice_workflow_manager.py b/modules/agentservice_workflow_manager.py deleted file mode 100644 index b1e10b82..00000000 --- a/modules/agentservice_workflow_manager.py +++ /dev/null @@ -1,689 +0,0 @@ -""" -Refactored WorkflowManager class for the Agentservice (continued). -""" - -import os -import logging -import asyncio -import uuid -from datetime import datetime -from typing import List, Dict, Any, Optional, Tuple, Union - -logger = logging.getLogger(__name__) - -class WorkflowManager: - - def __init__(self, mandate_id: int = None, user_id: int = None, ai_service = None, lucydom_interface = None): - """Initialize the WorkflowManager.""" - self.mandate_id = mandate_id - self.user_id = user_id - self.ai_service = ai_service - self.lucydom_interface = lucydom_interface - - # Cache for workflows - self.workflows = {} - - # Directory for results - self.results_dir = os.path.join("results", "workflows") - os.makedirs(self.results_dir, exist_ok=True) - - # Initialize document handler - from modules.agentservice_document_handler import get_document_handler - self.document_handler = get_document_handler( - lucydom_interface=lucydom_interface, - ai_service=ai_service - ) - - # Initialize agent registry with dependencies - from modules.agentservice_registry import AgentRegistry - registry = AgentRegistry.get_instance() - registry.set_dependencies( - ai_service=ai_service, - document_handler=self.document_handler, - lucydom_interface=lucydom_interface - ) - - async def list_workflows(self, mandate_id: int = None, user_id: int = None) -> List[Dict[str, Any]]: - """ - List all available workflows. - - Args: - mandate_id: Optional mandate ID for filtering - user_id: Optional user ID for filtering - - Returns: - List of workflow summaries - """ - workflows = [] - - # Load from database if available - if self.lucydom_interface: - try: - # Get all workflows for the user - if user_id is not None: - user_workflows = self.lucydom_interface.get_workflows_by_user(user_id) - else: - user_workflows = self.lucydom_interface.get_all_workflows() - - # Filter by mandate if specified - if mandate_id is not None: - user_workflows = [wf for wf in user_workflows if wf.get("mandate_id") == mandate_id] - - # Create workflow summaries - for workflow in user_workflows: - summary = { - "id": workflow.get("id"), - "name": workflow.get("name", f"Workflow {workflow.get('id')}"), - "status": workflow.get("status"), - "started_at": workflow.get("started_at"), - "last_activity": workflow.get("last_activity"), - "completed_at": workflow.get("completed_at") - } - - # Add message count if available - messages = self.lucydom_interface.get_workflow_messages(workflow.get("id")) - if messages: - summary["message_count"] = len(messages) - - workflows.append(summary) - - logger.info(f"Loaded {len(workflows)} workflows from database") - - # Sort by last activity (newest first) - return sorted(workflows, key=lambda w: w.get("last_activity", ""), reverse=True) - - except Exception as e: - logger.error(f"Error retrieving workflows from database: {str(e)}") - - # Load from files if no database or error occurred - try: - for filename in os.listdir(self.results_dir): - if filename.startswith("workflow_") and filename.endswith(".json"): - workflow_path = os.path.join(self.results_dir, filename) - - try: - import json - with open(workflow_path, 'r', encoding='utf-8') as f: - workflow = json.load(f) - - # Check if mandate and user ID match filters - if mandate_id is not None and workflow.get("mandate_id") != mandate_id: - continue - - if user_id is not None and workflow.get("user_id") != user_id: - continue - - # Create workflow summary - summary = { - "id": workflow.get("id"), - "name": workflow.get("name", f"Workflow {workflow.get('id')}"), - "status": workflow.get("status"), - "started_at": workflow.get("started_at"), - "last_activity": workflow.get("last_activity"), - "message_count": len(workflow.get("messages", [])) - } - - workflows.append(summary) - except Exception as e: - logger.error(f"Error loading workflow file {filename}: {str(e)}") - - logger.info(f"Loaded {len(workflows)} workflows from files") - - # Sort by last activity (newest first) - return sorted(workflows, key=lambda w: w.get("last_activity", ""), reverse=True) - - except Exception as e: - logger.error(f"Error listing workflows: {str(e)}") - return [] - - async def execute_workflow(self, message: Dict[str, Any], files: List[Dict[str, Any]] = None, workflow_id: str = None, is_user_input: bool = False) -> Dict[str, Any]: - """ - Execute a workflow with the given message and files. - - Args: - message: Input message (prompt) - files: Optional list of file metadata - workflow_id: Optional ID for continuing an existing workflow - is_user_input: Flag indicating if this is user input to an existing workflow - - Returns: - Workflow execution result - """ - - # Use provided workflow_id or generate a new one for a new workflow - if not workflow_id: - workflow_id = f"wf_{uuid.uuid4()}" - # Initialize a new workflow - workflow = self._initialize_workflow(workflow_id) - else: - # Load existing workflow for continuation - workflow = await self.load_workflow(workflow_id) - if not workflow: - # Fallback: initialize a new workflow with the provided ID - workflow = self._initialize_workflow(workflow_id) - - # Capture start time - start_time = datetime.now() - - try: - # Create WorkflowExecution with document handler - from modules.agentservice_workflow_execution import WorkflowExecution - execution = WorkflowExecution( - workflow_manager=self, - workflow_id=workflow_id, - mandate_id=self.mandate_id, - user_id=self.user_id, - ai_service=self.ai_service, - lucydom_interface=self.lucydom_interface - ) - - # Set the document handler's workflow ID - self.document_handler.set_workflow_id(workflow_id) - - # Execute the workflow - result = await execution.execute(message, workflow, files, is_user_input) - - # Calculate duration - duration = (datetime.now() - start_time).total_seconds() - - # Update workflow stats - if "data_stats" not in workflow: - workflow["data_stats"] = { - "total_processing_time": 0.0, - "total_token_count": 0, - "total_bytes_sent": 0, - "total_bytes_received": 0 - } - workflow["data_stats"]["total_processing_time"] = duration - workflow["completed_at"] = datetime.now().isoformat() - - # Save final state - self._save_workflow(workflow) - - return result - - except Exception as e: - logger.error(f"Error executing workflow: {str(e)}", exc_info=True) - - # Update workflow status - workflow["status"] = "failed" - workflow["last_activity"] = datetime.now().isoformat() - self._add_log(workflow, f"Workflow execution failed: {str(e)}", "error") - - # Save failed state - self._save_workflow(workflow) - - return { - "workflow_id": workflow_id, - "status": "failed", - "error": str(e) - } - - def _save_workflow(self, workflow: Dict[str, Any]) -> bool: - """ - Save workflow state to database and/or file. - Enhanced to handle structured documents. - - Args: - workflow: The workflow object to save - - Returns: - True if saved successfully, False otherwise - """ - try: - workflow_id = workflow.get("id") - - # Update in-memory cache - self.workflows[workflow_id] = workflow - - # Update in database if available - if self.lucydom_interface: - # NEW: Enhanced document handling for database persistence - # Create a copy of the workflow for database storage - db_workflow = workflow.copy() - # Save to database - try: - self.lucydom_interface.save_workflow_state(db_workflow) - logger.info(f"Workflow {workflow_id} saved to database") - except Exception as db_error: - logger.error(f"Error saving workflow to database: {str(db_error)}") - # Continue to file saving even if database fails - - # Save to file (always do this as backup) - import json - workflow_path = os.path.join(self.results_dir, f"workflow_{workflow_id}.json") - - with open(workflow_path, 'w', encoding='utf-8') as f: - json.dump(workflow, f, indent=2, ensure_ascii=False) - - logger.info(f"Workflow {workflow_id} saved to file: {workflow_path}") - return True - - except Exception as e: - logger.error(f"Error saving workflow state: {str(e)}") - return False - - async def load_workflow(self, workflow_id: str) -> Optional[Dict[str, Any]]: - """ - Load a workflow by ID. - Enhanced to ensure document handler is properly configured. - - Args: - workflow_id: ID of the workflow to load - - Returns: - The workflow object or None if not found - """ - # Check memory cache first - if workflow_id in self.workflows: - workflow = self.workflows[workflow_id] - - # NEW: Configure document handler for this workflow - self.document_handler.set_workflow_id(workflow_id) - - return workflow - - # Try to load from database - if self.lucydom_interface: - try: - workflow = self.lucydom_interface.load_workflow_state(workflow_id) - if workflow: - # Cache in memory - self.workflows[workflow_id] = workflow - - # NEW: Configure document handler for this workflow - self.document_handler.set_workflow_id(workflow_id) - - logger.info(f"Workflow {workflow_id} loaded from database") - return workflow - except Exception as e: - logger.error(f"Error loading workflow from database: {str(e)}") - - # Try to load from file - workflow_path = os.path.join(self.results_dir, f"workflow_{workflow_id}.json") - - if os.path.exists(workflow_path): - try: - import json - with open(workflow_path, 'r', encoding='utf-8') as f: - workflow = json.load(f) - - # Cache in memory - self.workflows[workflow_id] = workflow - - # NEW: Configure document handler for this workflow - self.document_handler.set_workflow_id(workflow_id) - - logger.info(f"Workflow {workflow_id} loaded from file: {workflow_path}") - return workflow - except Exception as e: - logger.error(f"Error loading workflow from file: {str(e)}") - - logger.warning(f"Workflow {workflow_id} not found") - return None - - async def delete_workflow(self, workflow_id: str) -> bool: - """ - Delete a workflow. - - Args: - workflow_id: ID of the workflow - - Returns: - True on success, False if workflow not found - """ - # Remove from memory - if workflow_id in self.workflows: - del self.workflows[workflow_id] - - # Delete from database - if self.lucydom_interface: - try: - db_success = self.lucydom_interface.delete_workflow(workflow_id) - logger.info(f"Workflow {workflow_id} deleted from database: {db_success}") - except Exception as e: - logger.error(f"Error deleting workflow {workflow_id} from database: {str(e)}") - - # Delete file - workflow_path = os.path.join(self.results_dir, f"workflow_{workflow_id}.json") - - try: - if os.path.exists(workflow_path): - os.remove(workflow_path) - logger.info(f"Workflow {workflow_id} deleted from file: {workflow_path}") - return True - else: - logger.warning(f"Workflow {workflow_id} not found: {workflow_path}") - return False - except Exception as e: - logger.error(f"Error deleting workflow file {workflow_id}: {str(e)}") - return False - - def _initialize_workflow(self, workflow_id: str) -> Dict[str, Any]: - """ - Initialize a new workflow. - - Args: - workflow_id: ID of the workflow - - Returns: - The initialized workflow object - """ - current_time = datetime.now().isoformat() - - # Create complete workflow object according to the data model - workflow = { - "id": workflow_id, - "name": f"Workflow {workflow_id}", - "mandate_id": self.mandate_id, - "user_id": self.user_id, - "status": "running", - "started_at": current_time, - "last_activity": current_time, - "current_round": 1, - - # Complete statistics structure according to DataStats model - "data_stats": { - "total_processing_time": 0.0, - "total_token_count": 0, - "total_bytes_sent": 0, - "total_bytes_received": 0 - }, - - # Empty arrays for messages and logs - "messages": [], - "logs": [] - } - - # Log entry for workflow start - self._add_log(workflow, "Workflow started", "info", "workflow", "Workflow Management") - - # Save workflow to database - if self.lucydom_interface: - try: - # Direct save of the complete workflow object - self.lucydom_interface.save_workflow_state(workflow) - logger.info(f"Workflow {workflow_id} created in database") - except Exception as e: - logger.error(f"Error creating workflow {workflow_id} in database: {str(e)}") - - # Cache workflow in memory - self.workflows[workflow_id] = workflow - - return workflow - - async def stop_workflow(self, workflow_id: str) -> bool: - """ - Stop a running workflow. - - Args: - workflow_id: ID of the workflow to stop - - Returns: - True on success, False if workflow not found or already stopped - """ - try: - workflow = self.workflows.get(workflow_id) - - if not workflow: - # Try to load the workflow - workflow = await self.load_workflow(workflow_id) - if not workflow: - return False - - # If workflow is not running or completed, abort - if workflow.get("status") not in ["running", "completed"]: - return False - - # Set status to stopped - workflow["status"] = "stopped" - workflow["last_activity"] = datetime.now().isoformat() - - self._add_log(workflow, "Workflow was manually stopped", "info", "workflow", "Workflow Management") - - # Save workflow - self._save_workflow(workflow) - - return True - except Exception as e: - logger.error(f"Error stopping workflow {workflow_id}: {str(e)}") - return False - - def _add_log(self, workflow: Dict[str, Any], message: str, log_type: str, agent_id: Optional[str] = None, agent_name: Optional[str] = None) -> None: - """Add a log entry to the workflow.""" - # First, check if workflow is a string (ID) instead of dictionary - if isinstance(workflow, str): - # Try to load the workflow by ID - workflow_id = workflow - workflow = self.workflows.get(workflow_id) - if not workflow: - # Just log to the logger and return - logger.info(f"Log (couldn't add to workflow {workflow_id}): {log_type} - {message}") - return - - # Check if workflow is a dictionary - if not isinstance(workflow, dict): - logger.error(f"Invalid workflow type: {type(workflow)}. Expected dictionary.") - # Just log to the logger and return - logger.info(f"Log (couldn't add to workflow): {log_type} - {message}") - return - - # Create log entry - log_entry = { - "id": f"log_{uuid.uuid4()}", - "message": message, - "type": log_type, - "timestamp": datetime.now().isoformat(), - "agent_id": agent_id, - "agent_name": agent_name - } - - # Add log entry to workflow - if "logs" not in workflow: - workflow["logs"] = [] - - workflow["logs"].append(log_entry) - - # Update last activity - workflow["last_activity"] = log_entry["timestamp"] - - # Save log entry to database if available - if self.lucydom_interface: - try: - # Add workflow ID to log entry - log_data = log_entry.copy() - log_data["workflow_id"] = workflow["id"] - - self.lucydom_interface.create_workflow_log(log_data) - logger.debug(f"Log entry for workflow {workflow['id']} saved to database") - except Exception as e: - logger.error(f"Error saving log entry for workflow {workflow['id']} to database: {str(e)}") - - # Also log to standard logger with the category prefix - category_prefix = f"[{agent_name or agent_id or 'Workflow'}]" if agent_name or agent_id else "" - log_message = f"{category_prefix} {message}" - - if log_type == "error": - logger.error(log_message) - elif log_type == "warning": - logger.warning(log_message) - else: - logger.info(log_message) - - def get_workflow_status(self, workflow_id: str) -> Optional[Dict[str, Any]]: - """ - Get the status of a workflow. - - Args: - workflow_id: ID of the workflow - - Returns: - Dictionary with status information or None if workflow not found - """ - # Get from memory - workflow = self.workflows.get(workflow_id) - - # If not in memory, load from database or file - if not workflow: - # Load from database if available - if self.lucydom_interface: - try: - workflow_data = self.lucydom_interface.get_workflow(workflow_id) - if workflow_data: - workflow = workflow_data - except Exception as e: - logger.error(f"Error loading workflow status from database: {str(e)}") - - # If not in database, load from file - if not workflow: - try: - import json - workflow_path = os.path.join(self.results_dir, f"workflow_{workflow_id}.json") - if os.path.exists(workflow_path): - with open(workflow_path, 'r', encoding='utf-8') as f: - workflow = json.load(f) - except Exception as e: - logger.error(f"Error loading workflow status from file: {str(e)}") - return None - - if not workflow: - return None - - # Extract status information - status_info = { - "id": workflow.get("id"), - "name": workflow.get("name", f"Workflow {workflow_id}"), - "status": workflow.get("status"), - "progress": 1.0 if workflow.get("status") in ["completed", "failed", "stopped"] else 0.5, - "started_at": workflow.get("started_at"), - "last_activity": workflow.get("last_activity"), - "workflow_complete": workflow.get("status") == "completed", - "current_round": workflow.get("current_round", 1), - "data_stats": workflow.get("data_stats", { - "total_processing_time": 0.0, - "total_token_count": 0, - "total_bytes_sent": 0, - "total_bytes_received": 0 - }) - } - - return status_info - - def get_workflow_logs(self, workflow_id: str) -> Optional[List[Dict[str, Any]]]: - """ - Get logs for a workflow. - - Args: - workflow_id: ID of the workflow - - Returns: - List of logs or None if workflow not found - """ - # Get from memory - workflow = self.workflows.get(workflow_id) - - # If not in memory, load from database - if not workflow and self.lucydom_interface: - try: - logs = self.lucydom_interface.get_workflow_logs(workflow_id) - return logs - except Exception as e: - logger.error(f"Error loading workflow logs from database: {str(e)}") - - # If not in database or no interface available, load from file - if not workflow: - try: - import json - workflow_path = os.path.join(self.results_dir, f"workflow_{workflow_id}.json") - if os.path.exists(workflow_path): - with open(workflow_path, 'r', encoding='utf-8') as f: - workflow = json.load(f) - except Exception as e: - logger.error(f"Error loading workflow logs from file: {str(e)}") - return None - - return workflow.get("logs", []) if workflow else None - - def get_workflow_messages(self, workflow_id: str) -> Optional[List[Dict[str, Any]]]: - """ - Get messages for a workflow. - - Args: - workflow_id: ID of the workflow - - Returns: - List of messages or None if workflow not found - """ - # Get from memory - workflow = self.workflows.get(workflow_id) - - # If not in memory, load from database - if not workflow and self.lucydom_interface: - try: - messages = self.lucydom_interface.get_workflow_messages(workflow_id) - return messages - except Exception as e: - logger.error(f"Error loading workflow messages from database: {str(e)}") - - # If not in database or no interface available, load from file - if not workflow: - try: - import json - workflow_path = os.path.join(self.results_dir, f"workflow_{workflow_id}.json") - if os.path.exists(workflow_path): - with open(workflow_path, 'r', encoding='utf-8') as f: - workflow = json.load(f) - except Exception as e: - logger.error(f"Error loading workflow messages from file: {str(e)}") - return None - - return workflow.get("messages", []) if workflow else None - -# Factory function for WorkflowManager -def get_workflow_manager(mandate_id: int = None, user_id: int = None, ai_service = None, lucydom_interface = None): - """ - Get a WorkflowManager instance for the specified context. - Reuses existing instances and updates dependencies. - - Args: - mandate_id: Mandate ID - user_id: User ID - ai_service: AI service - lucydom_interface: LucyDOM interface - - Returns: - WorkflowManager instance - """ - from modules.lucydom_interface import get_lucydom_interface - - context_key = f"{mandate_id}_{user_id}" - - # Get LucyDOM interface if not provided - if not lucydom_interface: - lucydom_interface = get_lucydom_interface(mandate_id, user_id) - - if context_key not in _workflow_managers: - _workflow_managers[context_key] = WorkflowManager( - mandate_id, - user_id, - ai_service, - lucydom_interface - ) - - # Update services if provided - if ai_service is not None: - _workflow_managers[context_key].ai_service = ai_service - - # NEW: Update document handler's AI service - if hasattr(_workflow_managers[context_key], 'document_handler'): - _workflow_managers[context_key].document_handler.set_ai_service(ai_service) - - # NEW: Update agent registry dependencies - from modules.agentservice_registry import AgentRegistry - registry = AgentRegistry.get_instance() - registry.set_dependencies(ai_service=ai_service) - - return _workflow_managers[context_key] - -# Singleton factory for WorkflowManager instances per context -_workflow_managers = {} \ No newline at end of file diff --git a/modules/auth.py b/modules/auth.py index d423f951..fd4f159c 100644 --- a/modules/auth.py +++ b/modules/auth.py @@ -6,7 +6,7 @@ from jose import JWTError, jwt import logging from modules.gateway_interface import get_gateway_interface -from modules.utility import APP_CONFIG +from gateway.modules.configuration import APP_CONFIG # Get Config Data SECRET_KEY = APP_CONFIG.get("APP_JWT_SECRET_SECRET") diff --git a/modules/backup-lucydom_interface copy.py b/modules/backup-lucydom_interface copy.py deleted file mode 100644 index 9977c61c..00000000 --- a/modules/backup-lucydom_interface copy.py +++ /dev/null @@ -1,1109 +0,0 @@ -import os -import logging -import uuid -from datetime import datetime, timedelta -import mimetypes -from typing import Dict, Any, List, Optional, Union, BinaryIO, Tuple -import importlib -import hashlib -from pathlib import Path - -from connectors.connector_db_json import DatabaseConnector -from modules.utility import APP_CONFIG - -logger = logging.getLogger(__name__) - -# Custom exceptions for file handling -class FileError(Exception): - """Base class for file handling exceptions.""" - pass - -class FileNotFoundError(FileError): - """Exception raised when a file is not found.""" - pass - -class FileStorageError(FileError): - """Exception raised when there's an error storing a file.""" - pass - -class FilePermissionError(FileError): - """Exception raised when there's a permission issue with a file.""" - pass - -class FileDeletionError(FileError): - """Exception raised when there's an error deleting a file.""" - pass - - -class LucyDOMInterface: - """ - Interface zur LucyDOM-Datenbank. - Verwendet den JSON-Konnektor für den Datenzugriff. - """ - - def __init__(self, mandate_id: int, user_id: int): - """ - Initialisiert das LucyDOM-Interface mit Mandanten- und Benutzerkontext. - - Args: - mandate_id: ID des aktuellen Mandanten - user_id: ID des aktuellen Benutzers - """ - self.mandate_id = mandate_id - self.user_id = user_id - - # Upload Verzeichnis aus config.ini lesen - self.upload_dir = APP_CONFIG.get('Module_AgentserviceInterface_UPLOAD_DIR') - os.makedirs(self.upload_dir, exist_ok=True) - - # Datenmodell-Modul importieren - try: - self.model_module = importlib.import_module("modules.lucydom_model") - logger.info("lucydom_model erfolgreich importiert") - except ImportError as e: - logger.error(f"Fehler beim Importieren von lucydom_model: {e}") - raise - - # Datenbank initialisieren, falls nötig - self._initialize_database() - - def _initialize_database(self): - """ - Initialisiert die Datenbank mit minimalen Objekten für den angemeldeten Benutzer im Mandanten, falls sie noch nicht existiert. - Ohne gültigen Benutzer keine Initialisierung. - Erstellt für jede im Datenmodell definierte Tabelle einen initialen Datensatz. - """ - effective_mandate_id = self.mandate_id - effective_user_id = self.user_id - if effective_mandate_id is None or effective_user_id is None: - #data available - return - - self.db = DatabaseConnector( - db_host=APP_CONFIG.get("DB_LUCYDOM_HOST"), - db_database=APP_CONFIG.get("DB_LUCYDOM_DATABASE"), - db_user=APP_CONFIG.get("DB_LUCYDOM_USER"), - db_password=APP_CONFIG.get("DB_LUCYDOM_PASSWORD_SECRET"), - mandate_id=self.mandate_id, - user_id=self.user_id - ) - - # Initialisierung von Standard-Prompts für verschiedene Bereiche - prompts = self.db.get_recordset("prompts") - if not prompts: - logger.info("Erstelle Standard-Prompts") - - # Standard-Prompts definieren - standard_prompts = [ - { - "mandate_id": effective_mandate_id, - "user_id": effective_user_id, - "content": "Recherchiere die aktuellen Markttrends und Entwicklungen im Bereich [THEMA]. Sammle Informationen zu führenden Unternehmen, innovativen Produkten oder Dienstleistungen und aktuellen Herausforderungen. Präsentiere die Ergebnisse in einer strukturierten Übersicht mit relevanten Daten und Quellen.", - "name": "Web Research: Marktforschung" - }, - { - "mandate_id": effective_mandate_id, - "user_id": effective_user_id, - "content": "Analysiere den beigefügten Datensatz zu [THEMA] und identifiziere die wichtigsten Trends, Muster und Auffälligkeiten. Führe statistische Berechnungen durch, um deine Erkenntnisse zu untermauern. Stelle die Ergebnisse in einer klar strukturierten Analyse dar und ziehe relevante Schlussfolgerungen.", - "name": "Analyse: Datenanalyse" - }, - { - "mandate_id": effective_mandate_id, - "user_id": effective_user_id, - "content": "Erstelle ein detailliertes Protokoll unserer Besprechung zum Thema [THEMA]. Erfasse alle besprochenen Punkte, getroffenen Entscheidungen und vereinbarten Maßnahmen. Strukturiere das Protokoll übersichtlich mit Tagesordnungspunkten, Teilnehmerliste und klaren Verantwortlichkeiten für die Follow-up-Aktionen.", - "name": "Protokoll: Besprechungsprotokoll" - }, - { - "mandate_id": effective_mandate_id, - "user_id": effective_user_id, - "content": "Entwickle ein UI/UX-Designkonzept für [ANWENDUNG/WEBSITE]. Berücksichtige die Zielgruppe, Hauptfunktionen und die Markenidentität. Beschreibe die visuelle Gestaltung, Navigation, Interaktionsmuster und Informationsarchitektur. Erläutere, wie das Design die Benutzerfreundlichkeit und das Nutzererlebnis optimiert.", - "name": "Design: UI/UX Design" - } - ] - - # Prompts erstellen - for prompt_data in standard_prompts: - created_prompt = self.db.record_create("prompts", prompt_data) - logger.info(f"Prompt '{prompt_data.get('name', 'Standard')}' wurde erstellt mit ID {created_prompt['id']}") - - - # Utilities - - def get_initial_id(self, table: str) -> Optional[int]: - """ - Gibt die initiale ID für eine Tabelle zurück. - - Args: - table: Name der Tabelle - - Returns: - Die initiale ID oder None, wenn nicht vorhanden - """ - return self.db.get_initial_id(table) - - def _get_current_timestamp(self) -> str: - """Gibt den aktuellen Zeitstempel im ISO-Format zurück""" - return datetime.now().isoformat() - - - # Prompt-Methoden - - def get_all_prompts(self) -> List[Dict[str, Any]]: - """Gibt alle Prompts des aktuellen Mandanten zurück""" - return self.db.get_recordset("prompts") - - def get_prompt(self, prompt_id: int) -> Optional[Dict[str, Any]]: - """Gibt einen Prompt anhand seiner ID zurück""" - prompts = self.db.get_recordset("prompts", record_filter={"id": prompt_id}) - if prompts: - return prompts[0] - return None - - def create_prompt(self, content: str, name: str) -> Dict[str, Any]: - """Erstellt einen neuen Prompt""" - prompt_data = { - "mandate_id": self.mandate_id, - "user_id": self.user_id, - "content": content, - "name": name, - "created_at": self._get_current_timestamp() - } - - return self.db.record_create("prompts", prompt_data) - - def update_prompt(self, prompt_id: int, content: str = None, name: str = None) -> Dict[str, Any]: - """ - Aktualisiert einen vorhandenen Prompt - - Args: - prompt_id: ID des zu aktualisierenden Prompts - content: Neuer Inhalt des Prompts - - Returns: - Das aktualisierte Prompt-Objekt - """ - # Prüfen, ob der Prompt existiert - prompt = self.get_prompt(prompt_id) - if not prompt: - return None - - # Daten für die Aktualisierung vorbereiten - prompt_data = {} - - if content is not None: - prompt_data["content"] = content - if name is not None: - prompt_data["name"] = name - - # Prompt aktualisieren - return self.db.record_modify("prompts", prompt_id, prompt_data) - - def delete_prompt(self, prompt_id: int) -> bool: - """ - Löscht einen Prompt aus der Datenbank - - Args: - prompt_id: ID des zu löschenden Prompts - - Returns: - True, wenn der Prompt erfolgreich gelöscht wurde, sonst False - """ - return self.db.record_delete("prompts", prompt_id) - - - # File Utilities - - def get_mime_type(self, file_path: str) -> str: - """ - Bestimmt den MIME-Typ einer Datei. - - Args: - file_path: Pfad zur Datei - - Returns: - Der erkannte MIME-Typ - """ - # Versuche, den MIME-Typ über den Dateipfad zu erkennen - mime_type, _ = mimetypes.guess_type(file_path) - - # Wenn kein MIME-Typ erkannt wurde, versuche es über die Dateiendung - if not mime_type: - ext = os.path.splitext(file_path)[1].lower()[1:] - mime_type = self.get_mime_type_from_extension(ext) - - return mime_type - - def get_mime_type_from_extension(self, extension: str) -> str: - """ - Bestimmt den MIME-Typ basierend auf der Dateiendung. - - Args: - extension: Die Dateiendung ohne Punkt - - Returns: - Der entsprechende MIME-Typ - """ - extension_to_mime = { - "pdf": "application/pdf", - "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - "doc": "application/msword", - "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - "xls": "application/vnd.ms-excel", - "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", - "ppt": "application/vnd.ms-powerpoint", - "csv": "text/csv", - "txt": "text/plain", - "json": "application/json", - "xml": "application/xml", - "html": "text/html", - "htm": "text/html", - "jpg": "image/jpeg", - "jpeg": "image/jpeg", - "png": "image/png", - "gif": "image/gif", - "webp": "image/webp", - "svg": "image/svg+xml", - "py": "text/x-python", - "js": "application/javascript", - "css": "text/css" - } - return extension_to_mime.get(extension.lower(), "application/octet-stream") - - def calculate_file_hash(self, file_content: bytes) -> str: - """ - Calculate SHA-256 hash of file content for deduplication - - Args: - file_content: Binary content of the file - - Returns: - SHA-256 hash as a hexadecimal string - """ - return hashlib.sha256(file_content).hexdigest() - - def check_for_duplicate_file(self, file_hash: str) -> Optional[Dict[str, Any]]: - """ - Check if a file with the same hash already exists - - Args: - file_hash: SHA-256 hash of the file content - - Returns: - File record if a duplicate exists, None otherwise - """ - files = self.db.get_recordset("files", record_filter={"file_hash": file_hash}) - if files: - return files[0] - return None - - - # File Methoden - - def get_all_files(self) -> List[Dict[str, Any]]: - """Gibt alle Dateien des aktuellen Mandanten zurück""" - return self.db.get_recordset("files") - - def get_file(self, file_id: int) -> Optional[Dict[str, Any]]: - """Gibt eine Datei anhand ihrer ID zurück""" - files = self.db.get_recordset("files", record_filter={"id": file_id}) - if files: - return files[0] - return None - - def create_file(self, - name: str, - mime_type: str, - size: int = None, - path: str = None, - file_hash: str = None) -> Dict[str, Any]: - """Erstellt einen neuen Dateieintrag""" - file_data = { - "mandate_id": self.mandate_id, - "user_id": self.user_id, - "name": name, - "mime_type": mime_type, - "size": size, - "path": path, - "file_hash": file_hash, - "upload_date": self._get_current_timestamp() - } - return self.db.record_create("files", file_data) - - def update_file(self, file_id: int, update_data: Dict[str, Any]) -> Dict[str, Any]: - """ - Aktualisiert eine vorhandene Datei - - Args: - file_id: ID der zu aktualisierenden Datei - update_data: Dictionary mit zu aktualisierenden Feldern - - Returns: - Das aktualisierte Datei-Objekt - """ - # Prüfen, ob die Datei existiert - file = self.get_file(file_id) - if not file: - raise FileNotFoundError(f"Datei mit ID {file_id} nicht gefunden") - - # Datei aktualisieren - return self.db.record_modify("files", file_id, update_data) - - def delete_file(self, file_id: int) -> bool: - """ - Löscht eine Datei aus der Datenbank und dem Dateisystem. - - Args: - file_id: ID der Datei - - Returns: - True bei Erfolg, False bei Fehler - """ - try: - # Suche die Datei in der Datenbank - file = self.get_file(file_id) - - if not file: - raise FileNotFoundError(f"Datei mit ID {file_id} nicht gefunden") - - # Prüfe, ob die Datei zum aktuellen Mandanten gehört - if file.get("mandate_id") != self.mandate_id: - raise FilePermissionError(f"Keine Berechtigung zum Löschen der Datei {file_id}") - - # Speichere den Dateipfad - file_path = file.get("path") - - # Check for other references to this file (by hash) - file_hash = file.get("file_hash") - if file_hash: - other_references = [f for f in self.db.get_recordset("files", record_filter={"file_hash": file_hash}) - if f.get("id") != file_id] - - # If other files reference this content, only delete the database entry - if other_references: - logger.info(f"Andere Referenzen auf den Dateiinhalt gefunden, nur DB-Eintrag wird gelöscht: {file_id}") - return self.db.record_delete("files", file_id) - - # Lösche den Datenbankeintrag - db_success = self.db.record_delete("files", file_id) - - # Wenn der Datenbankeintrag erfolgreich gelöscht wurde und ein Dateipfad vorhanden ist, - # lösche auch die Datei - if db_success and file_path and os.path.exists(file_path): - try: - os.remove(file_path) - return True - except Exception as e: - logger.error(f"Fehler beim physischen Löschen der Datei {file_path}: {str(e)}") - # Datenbankdatei wurde gelöscht, physische Datei nicht - trotzdem Erfolg melden - return True - - return db_success - except FileNotFoundError as e: - # Pass through FileNotFoundError - raise - except FilePermissionError as e: - # Pass through FilePermissionError - raise - except Exception as e: - logger.error(f"Fehler beim Löschen der Datei {file_id}: {str(e)}") - raise FileDeletionError(f"Fehler beim Löschen der Datei: {str(e)}") - - def save_uploaded_file(self, file_content: bytes, file_name: str) -> Dict[str, Any]: - """ - Speichert eine hochgeladene Datei und erstellt einen Datenbankeintrag. - - Args: - file_content: Binärdaten der Datei - file_name: Name der Datei - - Returns: - Dictionary mit Metadaten der gespeicherten Datei - """ - try: - # Debug: Log the start of the file upload process - logger.info(f"Starting upload process for file: {file_name}") - logger.info(f"Upload directory: {self.upload_dir}, Mandate ID: {self.mandate_id}") - - # Debug: Check if file_content is valid bytes - if not isinstance(file_content, bytes): - logger.error(f"Invalid file_content type: {type(file_content)}") - raise ValueError(f"file_content must be bytes, got {type(file_content)}") - - # Calculate file hash for deduplication - file_hash = self.calculate_file_hash(file_content) - logger.debug(f"Calculated file hash: {file_hash}") - - # Check for duplicate - existing_file = self.check_for_duplicate_file(file_hash) - if existing_file: - # Simply return the existing file metadata - logger.info(f"Duplikat gefunden für {file_name}: {existing_file['id']}") - return existing_file - - # Generiere eindeutige NameID - name_id = f"file_{uuid.uuid4()}" - logger.debug(f"Generated filename ID: {name_id}") - - # Sanitize filename - safe_filename = Path(file_name).name # Get only the filename part - logger.debug(f"Sanitized filename: {safe_filename}") - - # Create parent directories if needed - mandate_upload_dir = os.path.join(self.upload_dir, str(self.mandate_id)) - logger.debug(f"Mandate upload directory: {mandate_upload_dir}") - - # Debug: Check if mandate upload directory exists - if not os.path.exists(mandate_upload_dir): - logger.info(f"Creating mandate upload directory: {mandate_upload_dir}") - - os.makedirs(mandate_upload_dir, exist_ok=True) - - # Dateipfad erstellen mit Mandant als Unterverzeichnis - file_path = os.path.join(mandate_upload_dir, f"{name_id}_{safe_filename}") - logger.debug(f"Full file path: {file_path}") - - # Datei speichern - logger.info(f"Writing file content to: {file_path}") - with open(file_path, "wb") as f: - f.write(file_content) - - # Verify file was created - if not os.path.exists(file_path): - logger.error(f"File was not created at path: {file_path}") - raise FileStorageError(f"File could not be created at {file_path}") - else: - logger.info(f"File successfully saved to: {file_path}") - - # Dateigröße bestimmen - file_size = len(file_content) - - # MIME-Typ und Dateityp bestimmen - mime_type = self.get_mime_type(file_path) - - # Speichere in der Datenbank - logger.info(f"Saving file metadata to database for file: {name_id}") - db_file = self.create_file( - name=file_name, - mime_type=mime_type, - size=file_size, - path=file_path, - file_hash=file_hash - ) - - # Debug: Verify database record was created - if not db_file: - logger.warning(f"Database record for file {name_id} was not created properly") - else: - logger.info(f"Database record created for file {name_id}") - - logger.info(f"File upload process completed for: {file_name}") - return db_file - - except Exception as e: - # If an error occurs, clean up any partial file - if 'file_path' in locals() and os.path.exists(file_path): - try: - logger.warning(f"Cleaning up partial file: {file_path}") - os.remove(file_path) - except Exception as cleanup_error: - logger.error(f"Error cleaning up partial file: {cleanup_error}") - - logger.error(f"Error in save_uploaded_file for {file_name}: {str(e)}", exc_info=True) - raise FileStorageError(f"Fehler beim Speichern der Datei: {str(e)}") - - def download_file(self, file_id: int) -> Optional[Dict[str, Any]]: - """ - Gibt eine Datei zum Download zurück. - - Args: - file_id: ID der Datei - - Returns: - Dictionary mit Dateidaten und -metadaten oder None, wenn nicht gefunden - """ - try: - # Suche die Datei in der Datenbank - file = self.get_file(file_id) - - if not file or "path" not in file: - raise FileNotFoundError(f"Datei mit ID {file_id} nicht gefunden") - - file_path = file["path"] - - # Prüfe, ob die Datei existiert - if not os.path.exists(file_path): - raise FileNotFoundError(f"Datei nicht gefunden: {file_path}") - - # Lese die Datei - with open(file_path, "rb") as f: - file_content = f.read() - - return { - "id": file_id, - "name": file.get("name", os.path.basename(file_path)), - "mime_type": file.get("mime_type", self.get_mime_type(file_path)), - "size": file.get("size", len(file_content)), - "path": file_path, - "content": file_content - } - except FileNotFoundError as e: - # Re-raise FileNotFoundError as is - raise - except Exception as e: - logger.error(f"Fehler beim Herunterladen der Datei {file_id}: {str(e)}") - raise FileError(f"Fehler beim Herunterladen der Datei: {str(e)}") - - - # Workflow Methoden - - def get_all_workflows(self) -> List[Dict[str, Any]]: - """Gibt alle Workflows des aktuellen Mandanten zurück""" - return self.db.get_recordset("workflows") - - def get_workflows_by_user(self, user_id: int) -> List[Dict[str, Any]]: - """Gibt alle Workflows eines Benutzers zurück""" - return self.db.get_recordset("workflows", record_filter={"user_id": user_id}) - - def get_workflow(self, workflow_id: str) -> Optional[Dict[str, Any]]: - """Gibt einen Workflow anhand seiner ID zurück""" - workflows = self.db.get_recordset("workflows", record_filter={"id": workflow_id}) - if workflows: - return workflows[0] - return None - - def create_workflow(self, workflow_data: Dict[str, Any]) -> Dict[str, Any]: - """Erstellt einen neuen Workflow in der Datenbank""" - # Stellen Sie sicher, dass mandate_id und user_id gesetzt sind - if "mandate_id" not in workflow_data: - workflow_data["mandate_id"] = self.mandate_id - - if "user_id" not in workflow_data: - workflow_data["user_id"] = self.user_id - - # Zeitstempel setzen, falls nicht vorhanden - current_time = self._get_current_timestamp() - if "started_at" not in workflow_data: - workflow_data["started_at"] = current_time - - if "last_activity" not in workflow_data: - workflow_data["last_activity"] = current_time - - # Stelle sicher, dass last_message_id gesetzt ist, falls nicht vorhanden - if "last_message_id" not in workflow_data: - workflow_data["last_message_id"] = "" - - return self.db.record_create("workflows", workflow_data) - - def update_workflow(self, workflow_id: str, workflow_data: Dict[str, Any]) -> Dict[str, Any]: - """ - Aktualisiert einen vorhandenen Workflow. - - Args: - workflow_id: ID des zu aktualisierenden Workflows - workflow_data: Neue Daten für den Workflow - - Returns: - Das aktualisierte Workflow-Objekt - """ - # Prüfen, ob der Workflow existiert - workflow = self.get_workflow(workflow_id) - if not workflow: - return None - - # Aktualisierungszeit setzen - workflow_data["last_activity"] = self._get_current_timestamp() - - # Workflow aktualisieren - return self.db.record_modify("workflows", workflow_id, workflow_data) - - def delete_workflow(self, workflow_id: str) -> bool: - """ - Löscht einen Workflow aus der Datenbank. - - Args: - workflow_id: ID des zu löschenden Workflows - - Returns: - True bei Erfolg, False wenn der Workflow nicht existiert - """ - # Prüfen, ob der Workflow existiert - workflow = self.get_workflow(workflow_id) - if not workflow: - return False - - # Prüfen, ob der Benutzer der Eigentümer ist oder Admin-Rechte hat - if workflow.get("user_id") != self.user_id: - # Hier könnte eine Prüfung auf Admin-Rechte erfolgen - return False - - # Workflow löschen - return self.db.record_delete("workflows", workflow_id) - - - # Workflow Messages - - def get_workflow_messages(self, workflow_id: str) -> List[Dict[str, Any]]: - """Gibt alle Nachrichten eines Workflows zurück""" - return self.db.get_recordset("workflow_messages", record_filter={"workflow_id": workflow_id}) - - def create_workflow_message(self, message_data: Dict[str, Any]) -> Dict[str, Any]: - """Erstellt eine neue Nachricht für einen Workflow - - Args: - message_data: Die Nachrichtendaten - - Returns: - Die erstellte Nachricht oder None bei Fehler - """ - try: - # Check if required fields are present - required_fields = ["id", "workflow_id"] - for field in required_fields: - if field not in message_data: - logger.error(f"Pflichtfeld '{field}' fehlt in message_data") - raise ValueError(f"Pflichtfeld '{field}' fehlt in den Nachrichtendaten") - - # Validate that ID is not None - if message_data["id"] is None: - message_data["id"] = f"msg_{uuid.uuid4()}" - logger.warning(f"Automatisch generierte ID für Workflow-Nachricht: {message_data['id']}") - - # Stellen Sie sicher, dass die benötigten Felder vorhanden sind - if "started_at" not in message_data and "created_at" not in message_data: - message_data["started_at"] = self._get_current_timestamp() - - # Wenn "created_at" vorhanden ist, übertrage es nach "started_at" - if "created_at" in message_data and "started_at" not in message_data: - message_data["started_at"] = message_data["created_at"] - del message_data["created_at"] - - # Status setzen, falls nicht vorhanden - if "status" not in message_data: - message_data["status"] = "completed" - - # Sequenznummer setzen, falls nicht vorhanden - if "sequence_no" not in message_data: - # Hole aktuelle Nachrichten, um die nächste Sequenznummer zu bestimmen - existing_messages = self.get_workflow_messages(message_data["workflow_id"]) - message_data["sequence_no"] = len(existing_messages) + 1 - - # Debug-Log für die zu erstellenden Daten - logger.debug(f"Erstelle Workflow-Nachricht mit Daten: {message_data}") - - return self.db.record_create("workflow_messages", message_data) - except Exception as e: - logger.error(f"Fehler beim Erstellen der Workflow-Nachricht: {str(e)}") - # Return None instead of raising to avoid cascading failures - return None - - def update_workflow_message(self, message_id: str, message_data: Dict[str, Any]) -> Dict[str, Any]: - """ - Aktualisiert eine bestehende Workflow-Nachricht in der Datenbank - with improved document handling. - - Args: - message_id: ID der Nachricht - message_data: Zu aktualisierende Daten - - Returns: - Das aktualisierte Nachrichtenobjekt oder None bei Fehler - """ - try: - # Print debug info - print(f"Updating message {message_id} in database") - - # Ensure message_id is provided - if not message_id: - logger.error("No message_id provided for update_workflow_message") - raise ValueError("message_id cannot be empty") - - # Check if message exists in database - messages = self.db.get_recordset("workflow_messages", record_filter={"id": message_id}) - if not messages: - logger.warning(f"Message with ID {message_id} does not exist in database") - - # If message doesn't exist but we have workflow_id, create it - if "workflow_id" in message_data: - logger.info(f"Creating new message with ID {message_id} for workflow {message_data.get('workflow_id')}") - return self.db.record_create("workflow_messages", message_data) - else: - logger.error(f"Workflow ID missing for new message {message_id}") - return None - - # Ensure documents array is handled properly - if "documents" in message_data: - logger.info(f"Message {message_id} has {len(message_data['documents'])} documents") - - # Make sure we're not storing huge content in the database - # For each document, ensure content size is reasonable - documents_to_store = [] - for doc in message_data["documents"]: - doc_copy = doc.copy() - - # Process contents array if it exists - if "contents" in doc_copy: - # Ensure contents is not too large - limit text size - for content in doc_copy["contents"]: - if content.get("type") == "text" and "text" in content: - text = content["text"] - if len(text) > 1000: # Limit text preview to 1000 chars - content["text"] = text[:1000] + "... [truncated]" - - documents_to_store.append(doc_copy) - - # Replace with the processed documents - message_data["documents"] = documents_to_store - - # Log the update data size for debugging - update_data_size = len(str(message_data)) - logger.debug(f"Update data size: {update_data_size} bytes") - - # Ensure ID is in the dataset - if 'id' not in message_data: - message_data['id'] = message_id - - # Konvertiere created_at zu started_at falls nötig - if "created_at" in message_data and "started_at" not in message_data: - message_data["started_at"] = message_data["created_at"] - del message_data["created_at"] - - # Update the message - updated_message = self.db.record_modify("workflow_messages", message_id, message_data) - if updated_message: - logger.info(f"Message {message_id} updated successfully") - else: - logger.warning(f"Failed to update message {message_id}") - - return updated_message - except Exception as e: - logger.error(f"Error updating message {message_id}: {str(e)}", exc_info=True) - # Re-raise with full information - raise ValueError(f"Error updating message {message_id}: {str(e)}") - - def delete_workflow_message(self, workflow_id: str, message_id: str) -> bool: - """ - Löscht eine Nachricht aus einem Workflow in der Datenbank. - - Args: - workflow_id: ID des zugehörigen Workflows - message_id: ID der zu löschenden Nachricht - - Returns: - True bei Erfolg, False bei Fehler - """ - try: - # Prüfen, ob die Nachricht existiert - messages = self.get_workflow_messages(workflow_id) - message = next((m for m in messages if m.get("id") == message_id), None) - - if not message: - logger.warning(f"Nachricht {message_id} für Workflow {workflow_id} nicht gefunden") - return False - - # Nachricht aus der Datenbank löschen - return self.db.record_delete("workflow_messages", message_id) - except Exception as e: - logger.error(f"Fehler beim Löschen der Nachricht {message_id}: {str(e)}") - return False - - def delete_file_from_message(self, workflow_id: str, message_id: str, file_id: int) -> bool: - """ - Entfernt eine Dateireferenz aus einer Nachricht. - Die Datei selbst wird nicht gelöscht, nur die Referenz in der Nachricht. - Enhanced version with improved file matching. - - Args: - workflow_id: ID des zugehörigen Workflows - message_id: ID der Nachricht - file_id: ID der zu entfernenden Datei - - Returns: - True bei Erfolg, False bei Fehler - """ - try: - # Log operation - logger.info(f"Removing file {file_id} from message {message_id} in workflow {workflow_id}") - - # Get all workflow messages - all_messages = self.get_workflow_messages(workflow_id) - logger.debug(f"Workflow {workflow_id} has {len(all_messages)} messages") - - # Try different approaches to find the message - message = None - - # Exact match - message = next((m for m in all_messages if m.get("id") == message_id), None) - - # Case-insensitive match - if not message and isinstance(message_id, str): - message = next((m for m in all_messages - if isinstance(m.get("id"), str) and m.get("id").lower() == message_id.lower()), None) - - # Partial match (starts with) - if not message and isinstance(message_id, str): - message = next((m for m in all_messages - if isinstance(m.get("id"), str) and m.get("id").startswith(message_id)), None) - - if not message: - logger.warning(f"Message {message_id} not found in workflow {workflow_id}") - return False - - # Log the found message - logger.info(f"Found message: {message.get('id')}") - - # Check if message has documents - if "documents" not in message or not message["documents"]: - logger.warning(f"No documents in message {message_id}") - return False - - # Log existing documents - documents = message.get("documents", []) - logger.debug(f"Message has {len(documents)} documents") - for i, doc in enumerate(documents): - doc_id = doc.get("id", "unknown") - source = doc.get("source", {}) - source_id = source.get("id", "unknown") - logger.debug(f"Document {i}: doc_id={doc_id}, source_id={source_id}") - - # Create a new list of documents without the one to delete - updated_documents = [] - removed = False - - for doc in documents: - doc_id = doc.get("id") - source = doc.get("source", {}) - source_id = source.get("id") - - # Flexible matching approach - should_remove = ( - (doc_id == file_id) or - (source_id == file_id) or - (isinstance(doc_id, str) and file_id in doc_id) or - (isinstance(source_id, str) and file_id in source_id) - ) - - if should_remove: - removed = True - logger.info(f"Found file to remove: doc_id={doc_id}, source_id={source_id}") - else: - updated_documents.append(doc) - - if not removed: - logger.warning(f"No matching file {file_id} found in message {message_id}") - return False - - # Update message with modified documents array - message_update = { - "documents": updated_documents - } - - # Apply the update directly to the database - updated = self.db.record_modify("workflow_messages", message["id"], message_update) - - if updated: - logger.info(f"Successfully removed file {file_id} from message {message_id}") - return True - else: - logger.warning(f"Failed to update message {message_id} in database") - return False - - except Exception as e: - logger.error(f"Error removing file {file_id} from message {message_id}: {str(e)}") - return False - - - # Workflow Logs - - def get_workflow_logs(self, workflow_id: str) -> List[Dict[str, Any]]: - """Gibt alle Log-Einträge eines Workflows zurück""" - return self.db.get_recordset("workflow_logs", record_filter={"workflow_id": workflow_id}) - - def create_workflow_log(self, log_data: Dict[str, Any]) -> Dict[str, Any]: - """Erstellt einen neuen Log-Eintrag für einen Workflow""" - # Stellen Sie sicher, dass die benötigten Felder vorhanden sind - if "timestamp" not in log_data: - log_data["timestamp"] = self._get_current_timestamp() - - return self.db.record_create("workflow_logs", log_data) - - - # Workflow Management - - def save_workflow_state(self, workflow: Dict[str, Any], save_messages: bool = True, save_logs: bool = True) -> bool: - """ - Speichert den kompletten Zustand eines Workflows in der Datenbank. - Dies umfasst den Workflow selbst, Nachrichten und Logs. - - Args: - workflow: Das vollständige Workflow-Objekt - save_messages: Flag, ob Nachrichten gespeichert werden sollen - save_logs: Flag, ob Logs gespeichert werden sollen - - Returns: - True bei Erfolg, False bei Fehler - """ - try: - workflow_id = workflow.get("id") - if not workflow_id: - return False - - # Extrahiere nur die für die Datenbank relevanten Workflow-Felder - workflow_db_data = { - "id": workflow_id, - "mandate_id": workflow.get("mandate_id", self.mandate_id), - "user_id": workflow.get("user_id", self.user_id), - "name": workflow.get("name", f"Workflow {workflow_id}"), - "status": workflow.get("status", "unknown"), - "started_at": workflow.get("started_at", self._get_current_timestamp()), - "last_activity": workflow.get("last_activity", self._get_current_timestamp()), - "last_message_id": workflow.get("last_message_id", ""), - "data_stats": workflow.get("data_stats", {}) - } - - # Prüfen, ob der Workflow bereits existiert - existing_workflow = self.get_workflow(workflow_id) - if existing_workflow: - self.update_workflow(workflow_id, workflow_db_data) - else: - self.create_workflow(workflow_db_data) - - - # Nachrichten speichern - if save_messages and "messages" in workflow: - # Bestehende Nachrichten abrufen - existing_messages = {msg["id"]: msg for msg in self.get_workflow_messages(workflow_id)} - - for message in workflow["messages"]: - message_id = message.get("id") - if not message_id: - continue - - # Nur relevante Daten für die Datenbank extrahieren - message_data = { - "id": message_id, - "workflow_id": workflow_id, - "sequence_no": message.get("sequence_no", 0), - "role": message.get("role", "unknown"), - "content": message.get("content"), - "agent_name": message.get("agent_name"), - "status": message.get("status", "completed"), - "started_at": message.get("started_at", self._get_current_timestamp()), - "finished_at": message.get("finished_at"), - "parent_message_id": message.get("parent_message_id"), - # IMPORTANT: Include documents field to persist file attachments - "documents": message.get("documents", []) - } - - # Debug logging for documents - doc_count = len(message.get("documents", [])) - if doc_count > 0: - logger.info(f"Message {message_id} has {doc_count} documents to save") - - # Nachricht erstellen oder aktualisieren - if message_id in existing_messages: - self.db.record_modify("workflow_messages", message_id, message_data) - else: - self.db.record_create("workflow_messages", message_data) - - # Logs speichern - if save_logs and "logs" in workflow: - # Bestehende Logs abrufen - existing_logs = {log["id"]: log for log in self.get_workflow_logs(workflow_id)} - - for log in workflow["logs"]: - log_id = log.get("id") - if not log_id: - continue - - # Nur relevante Daten für die Datenbank extrahieren - log_data = { - "id": log_id, - "workflow_id": workflow_id, - "message": log.get("message", ""), - "type": log.get("type", "info"), - "timestamp": log.get("timestamp", self._get_current_timestamp()), - "agent_id": log.get("agent_id"), - "agent_name": log.get("agent_name") - } - - # Log erstellen oder aktualisieren - if log_id in existing_logs: - self.db.record_modify("workflow_logs", log_id, log_data) - else: - self.db.record_create("workflow_logs", log_data) - - return True - except Exception as e: - logger.error(f"Fehler beim Speichern des Workflow-Zustands: {str(e)}") - return False - - def load_workflow_state(self, workflow_id: str) -> Optional[Dict[str, Any]]: - """ - Lädt den kompletten Zustand eines Workflows aus der Datenbank. - Dies umfasst den Workflow selbst, Nachrichten und Logs. - - Args: - workflow_id: ID des zu ladenden Workflows - - Returns: - Das vollständige Workflow-Objekt oder None bei Fehler - """ - try: - # Basis-Workflow laden - workflow = self.get_workflow(workflow_id) - if not workflow: - return None - - # Log the workflow base retrieval - logger.debug(f"Loaded base workflow {workflow_id} from database") - - # Nachrichten laden - messages = self.get_workflow_messages(workflow_id) - # Nach Sequenznummer sortieren - messages.sort(key=lambda x: x.get("sequence_no", 0)) - - # Debug log for messages and document counts - message_count = len(messages) - logger.debug(f"Loaded {message_count} messages for workflow {workflow_id}") - - # Log document counts for each message - for msg in messages: - doc_count = len(msg.get("documents", [])) - if doc_count > 0: - logger.info(f"Message {msg.get('id')} has {doc_count} documents loaded from database") - # Log document details for debugging - for i, doc in enumerate(msg.get("documents", [])): - source = doc.get("source", {}) - logger.debug(f"Document {i+1}: {source.get('name', 'unnamed')} (ID: {source.get('id', 'unknown')})") - - # Logs laden - logs = self.get_workflow_logs(workflow_id) - # Nach Zeitstempel sortieren - logs.sort(key=lambda x: x.get("timestamp", "")) - - # Vollständiges Workflow-Objekt zusammenbauen - complete_workflow = workflow.copy() - complete_workflow["messages"] = messages - complete_workflow["logs"] = logs - - return complete_workflow - except Exception as e: - logger.error(f"Fehler beim Laden des Workflow-Zustands: {str(e)}") - return None - - -# Singleton-Factory für LucyDOMInterface-Instanzen pro Kontext -_lucydom_interfaces = {} - -def get_lucydom_interface(mandate_id: int = 0, user_id: int = 0) -> LucyDOMInterface: - """ - Gibt eine LucyDOMInterface-Instanz für den angegebenen Kontext zurück. - Wiederverwendet bestehende Instanzen. - """ - context_key = f"{mandate_id}_{user_id}" - if context_key not in _lucydom_interfaces: - _lucydom_interfaces[context_key] = LucyDOMInterface(mandate_id, user_id) - return _lucydom_interfaces[context_key] - -# Init -get_lucydom_interface() \ No newline at end of file diff --git a/modules/chat.py b/modules/chat.py index 63b9622c..7d620728 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -4,6 +4,7 @@ Implementiert eine kompakte und modulare Architektur für die Verarbeitung von Benutzeranfragen, Agentenausführung und Ergebnisformatierung. """ +import os import logging import json import uuid @@ -14,6 +15,7 @@ from typing import Dict, Any, List, Optional, Union from connectors.connector_aichat_openai import ChatService from modules.chat_registry import get_agent_registry from modules.lucydom_interface import get_lucydom_interface +from modules.chat_content_extraction import get_document_contents # Logger konfigurieren logger = logging.getLogger(__name__) @@ -46,75 +48,48 @@ class ChatManager: Hauptfunktion zur Integration von Benutzeranfragen in den Workflow. Args: - user_input, which will be parsed to message_user: Message-Objekt mit Benutzeranfrage und Dokumenten + user_input: Dictionary mit Benutzeranfrage und Datei-IDs workflow_id: Optional - ID des Workflows (None für neue Workflows) Returns: Workflow-Objekt mit aktualisiertem Zustand """ - logger.info(f"User message object: {self.parse_json2text(message_user)}") - - # 0. User-Input mit file id's in Message User als message object transformieren und alle contents vorbereiten - message_user = self.chat_user_message_integration(user_input) - # 1. Workflow initialisieren oder bestehenden laden workflow = self.workflow_init(workflow_id) - - # 2. Benutzer-Message im Workflow speichern - self.message_add(workflow, message_user) + + # 2. User-Input in Message-Objekt transformieren und im Workflow speichern + message_user = self.chat_message_to_workflow("user", "", user_input, workflow) # 3. Projektleiter-Prompt erstellen und Antwort analysieren project_manager_response = await self.chat_prompt(message_user, workflow) - - # 3.1. Extrahiere die benötigten Informationen aus der Antwort - obj_answer = project_manager_response.get("obj_answer", []) + obj_final_documents = project_manager_response.get("obj_final_documents", []) obj_workplan = project_manager_response.get("obj_workplan", []) - user_response = project_manager_response.get("user_response", "") + obj_user_response = project_manager_response.get("obj_user_response", "") - # 3.2. Speichere die Antwort als Message im Workflow und füge Log-Einträge hinzu + # 4. Speichere die Antwort als Message im Workflow und füge Log-Einträge hinzu response_message = { "role": "assistant", "agent_type": "project_manager", - "content": user_response + "content": obj_user_response } self.message_add(workflow, response_message) - # 3.3. Log-Eintrag für den Workplan und die geplanten Ergebnisse + self.log_add(workflow, f"Geplante Ergebnisse: {self.parse_json2text(obj_final_documents)}") self.log_add(workflow, f"Arbeitsplan: {self.parse_json2text(obj_workplan)}") - self.log_add(workflow, f"Geplante Ergebnisse: {self.parse_json2text(obj_answer)}") - - # 4. Agenten gemäss Workplan ausführen + self.log_add(workflow, f"Info an den User: {obj_user_response}") + + # 5. Agenten gemäss Workplan ausführen obj_results = [] if obj_workplan: for task in obj_workplan: - # Informiere Benutzer über aktuellen Schritt - agent_name = task.get("agent") - step_info = f"Führe Agent '{agent_name}' aus um {', '.join([d.get('label') for d in task.get('doc_output', [])])} zu erstellen" - self.log_add(workflow, step_info) - - # Bereite Eingabedokumente für den Agenten vor - input_docs = self.agent_input_documents(task.get('doc_input', []), workflow) - - # Führe den Agenten aus - agent_results = await self.agent_execute( - agent_name=agent_name, - prompt=task.get("prompt", ""), - input_docs=input_docs, - output_format=task.get("doc_output", []) - ) - - # Sammle Ergebnisse - obj_results.extend(agent_results) - - # Speichere Zwischenergebnisse - for result in agent_results: - self.log_add(workflow, f"Ergebnis erstellt: {result.get('label')}") - - # 5. Erstelle die finale Antwort mit den gesammelten Dokumenten - final_message = self.chat_final_message(user_response, obj_results, obj_answer) + task_results = await self.agent_processing(task, workflow) + obj_results.extend(task_results) + + # 6. Erstelle die finale Antwort mit den relevanten Dokumenten aus obj_final_documents + final_message = self.chat_final_message(obj_user_response, obj_results, obj_final_documents) self.message_add(workflow, final_message) - # 6. Finalisiere den Workflow + # 7. Finalisiere den Workflow self.workflow_finish(workflow) return workflow @@ -128,179 +103,200 @@ class ChatManager: workflow: Aktuelles Workflow-Objekt Returns: - Antwort des Projektleiters mit obj_answer, obj_workplan und user_response + Antwort des Projektleiters mit obj_final_documents, obj_workplan und obj_user_response """ - # Verfügbare Dokumenttypen aus der Funktion holen - doc_types = self.document_types_accepted() - doc_types_str = ", ".join(doc_types) - # Verfügbare Agenten mit ihren Fähigkeiten abrufen available_agents = self.agent_profiles() # Erstelle eine Zusammenfassung des Workflows - workflow_summary = await self.workflow_summarize(workflow, "Fasse den bisherigen Verlauf kurz und prägnant zusammen") - - # Erstelle eine Zusammenfassung der vom Benutzer bereitgestellten Dokumente - user_docs_summary = await self.message_summarize_documents(message_user, "Fasse den Inhalt des Dokuments kurz zusammen") + workflow_summary = await self.workflow_summarize(workflow, message_user) # Liste der aktuell verfügbaren Dokumente aus User-Input oder bereits generierten Dokumenten erstellen - available_documents = self.available_documents_get(message_user, workflow) - available_docs_str = self.available_documents_format(available_documents) + available_documents = self.available_documents_get(workflow, message_user) + available_docs_str = json.dumps(available_documents, indent=2) # Erstelle den Prompt für den Projektleiter prompt = f""" -Basierend auf der Benutzeranfrage: "{message_user.get('content')}" und den bereitgestellten Dokumenten, -analysiere bitte die Anforderungen und erstelle einen Plan zur Bearbeitung. +Based on the user request and the provided documents, please analyze the requirements and create a processing plan. + +