gateway/modules/chat/handling/handlingActions.py
2025-07-22 18:15:02 +02:00

232 lines
15 KiB
Python

# handlingActions.py
# Contains all action handling functions extracted from managerChat.py
import logging
import json
import time
from typing import Dict, Any, Optional, List, Union
from datetime import datetime, UTC
from modules.interfaces.interfaceChatModel import ReviewResult, ActionResult
from .promptFactory import createResultReviewPrompt
from modules.chat.documents.documentGeneration import DocumentGenerator
logger = logging.getLogger(__name__)
class HandlingActions:
def __init__(self, service, chatInterface):
self.service = service
self.chatInterface = chatInterface
self.documentGenerator = DocumentGenerator(service)
async def executeSingleAction(self, action, workflow):
"""Execute a single action and return ActionResult with enhanced document processing"""
try:
enhanced_parameters = action.execParameters.copy()
if action.expectedDocumentFormats:
enhanced_parameters['expectedDocumentFormats'] = action.expectedDocumentFormats
logger.info(f"Action {action.execMethod}.{action.execAction} expects formats: {action.expectedDocumentFormats}")
result = await self.service.executeAction(
methodName=action.execMethod,
actionName=action.execAction,
parameters=enhanced_parameters
)
result_label = action.execResultLabel
if result.success:
action.setSuccess()
action.result = result.data.get("result", "")
action.execResultLabel = result_label
await self.createActionMessage(action, result, workflow, result_label)
else:
action.setError(result.error or "Action execution failed")
processed_documents = self.documentGenerator.processActionResultDocuments(result, action, workflow)
return ActionResult(
success=result.success,
data={
"result": result.data.get("result", ""),
"documents": processed_documents,
"actionId": action.id,
"actionMethod": action.execMethod,
"actionName": action.execAction,
"resultLabel": result_label
},
metadata={
"actionId": action.id,
"actionMethod": action.execMethod,
"actionName": action.execAction,
"resultLabel": result_label
},
validation=[],
error=result.error or ""
)
except Exception as e:
logger.error(f"Error executing single action: {str(e)}")
action.setError(str(e))
return ActionResult(
success=False,
data={
"actionId": action.id,
"actionMethod": action.execMethod,
"actionName": action.execAction,
"documents": []
},
metadata={
"actionId": action.id,
"actionMethod": action.execMethod,
"actionName": action.execAction
},
validation=[],
error=str(e)
)
async def validateActionResult(self, action_result, action, context) -> dict:
try:
prompt = self._createGenericValidationPrompt(action_result, action, context)
response = await self.service.callAiTextAdvanced(prompt, "action_validation")
validation = self._parseValidationResponse(response)
validation['action_id'] = action.id
validation['action_method'] = action.execMethod
validation['action_name'] = action.execAction
validation['result_label'] = action.execResultLabel
return validation
except Exception as e:
logger.error(f"Error validating action result: {str(e)}")
return {
'status': 'success',
'reason': f'Validation failed: {str(e)}',
'confidence': 0.5,
'improvements': [],
'action_id': action.id,
'action_method': action.execMethod,
'action_name': action.execAction,
'result_label': action.execResultLabel
}
async def createActionMessage(self, action, result, workflow, result_label=None):
"""Create and store a message for the action result in the workflow with enhanced document processing"""
try:
if result_label is None:
result_label = action.execResultLabel
message_data = {
"workflowId": workflow.id,
"role": "assistant",
"message": f"Executed action {action.execMethod}.{action.execAction}",
"status": "step",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": datetime.now(UTC).isoformat(),
"actionId": action.id,
"actionMethod": action.execMethod,
"actionName": action.execAction,
"documentsLabel": result_label,
"documents": []
}
# Use the local createDocumentsFromActionResult method
created_documents = self.documentGenerator.createDocumentsFromActionResult(result, action, workflow)
message_data["documents"] = created_documents
message = self.chatInterface.createWorkflowMessage(message_data)
if message:
workflow.messages.append(message)
logger.info(f"Created action message for {action.execMethod}.{action.execAction} with {len(created_documents)} documents")
logger.debug(f"WORKFLOW STATE after createActionMessage: id={id(workflow)}, message_count={len(workflow.messages)}")
for idx, msg in enumerate(workflow.messages):
label = getattr(msg, 'documentsLabel', None)
docs = getattr(msg, 'documents', None)
logger.debug(f" Message {idx}: label='{label}', documents_count={len(docs) if docs else 0}")
else:
logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
except Exception as e:
logger.error(f"Error creating action message: {str(e)}")
def parseActionResponse(self, response: str) -> list:
try:
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
action_data = json.loads(json_str)
if 'actions' not in action_data:
raise ValueError("Action response missing 'actions' field")
return action_data['actions']
except Exception as e:
logger.error(f"Error parsing action response: {str(e)}")
return []
def parseReviewResponse(self, response: str) -> dict:
try:
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
review = json.loads(json_str)
if 'status' not in review:
raise ValueError("Review response missing 'status' field")
return review
except Exception as e:
logger.error(f"Error parsing review response: {str(e)}")
return {'status': 'failed', 'reason': f'Parse error: {str(e)}'}
# Internal helper methods
def _createGenericValidationPrompt(self, action_result, action, context) -> str:
success = action_result.success
result_data = action_result.data
error = action_result.error
validation_messages = action_result.validation
result_text = result_data.get("result", "") if isinstance(result_data, dict) else str(result_data)
documents = result_data.get("documents", []) if isinstance(result_data, dict) else []
doc_count = len(documents)
expected_result_label = action.execResultLabel
expected_format = action.execParameters.get('outputFormat', 'unknown')
expected_document_formats = action.expectedDocumentFormats or []
actual_result_label = result_data.get("resultLabel", "") if isinstance(result_data, dict) else ""
result_label_match = actual_result_label == expected_result_label
# Use DocumentGenerator for file/format extraction
delivered_files, delivered_formats = DocumentGenerator.get_delivered_files_and_formats(documents)
content_items = []
if isinstance(result_data, dict):
if 'extractedContent' in result_data:
extracted_content = result_data['extractedContent']
if hasattr(extracted_content, 'contents'):
content_items = extracted_content.contents
elif 'contents' in result_data:
content_items = result_data['contents']
if delivered_files and not content_items:
content_items = [f"File content available in: {', '.join(delivered_files)}"]
content_summary = []
for item in content_items:
if hasattr(item, 'label') and hasattr(item, 'metadata'):
content_summary.append(f"{item.label}: {item.metadata.mimeType if hasattr(item.metadata, 'mimeType') else 'unknown'}")
elif isinstance(item, str):
content_summary.append(item)
else:
content_summary.append(str(item))
return f"""You are an action result validator. Your primary focus is to validate that the action delivered the promised result files in the promised format.\n\nACTION DETAILS:\n- Method: {action.execMethod}\n- Action: {action.execAction}\n- Expected Result Label: {expected_result_label}\n- Actual Result Label: {actual_result_label}\n- Result Label Match: {result_label_match}\n- Expected Format: {expected_format}\n- Expected Document Formats: {json.dumps(expected_document_formats, indent=2) if expected_document_formats else 'None specified'}\n- Parameters: {json.dumps(action.execParameters, indent=2)}\n\nRESULT TO VALIDATE:\n- Success: {success}\n- Result Data: {result_text[:500]}{'...' if len(result_text) > 500 else ''}\n- Error: {error}\n- Validation Messages: {', '.join(validation_messages) if validation_messages else 'None'}\n- Documents Produced: {doc_count}\n- Delivered Files: {', '.join(delivered_files) if delivered_files else 'None'}\n- Delivered Formats: {json.dumps(delivered_formats, indent=2) if delivered_formats else 'None'}\n- Content Items: {', '.join(content_summary) if content_summary else 'None'}\n\nCRITICAL VALIDATION CRITERIA:\n1. **Result Label Match**: Does the action result contain the expected result label?\n2. **File Delivery**: Did the action deliver the promised result file(s)?\n3. **Format Compliance**: If expected document formats were specified, do the delivered files match the expected formats?\n4. **Content Quality**: Is the content of the delivered files usable and complete?\n5. **Content Processing**: If content extraction was expected, was it performed correctly?\n\nCONTEXT:\n- Task Description: {context.task_step.description if context.task_step else 'Unknown'}\n- Previous Results: {', '.join(context.previous_results) if context.previous_results else 'None'}\n\nVALIDATION INSTRUCTIONS:\n1. **Result Label Check**: Verify that the expected result label \"{expected_result_label}\" is present in the action result data. This is the primary success criterion.\n2. **File Delivery**: Check if files were delivered when expected. The individual filenames don't need to match the result label - focus on whether content was actually produced.\n3. **Format Compliance**: If expected document formats were specified, check if delivered files match the expected extensions and MIME types. If no formats were specified, this criterion is satisfied.\n4. **Content Quality**: If files were delivered, consider the action successful. The presence of delivered files indicates content was processed and stored.\n5. **Content Processing**: If files were delivered, assume content extraction was performed correctly. The file delivery is evidence of successful processing.\n6. **Success Criteria**: The action is successful if the result label matches AND files were delivered. If expected formats were specified, they should also match.\n\nIMPORTANT NOTES:\n- The result label must be present in the action result data for success\n- Individual filenames can be different from the result label\n- If files were delivered, consider the action successful even if content details are not provided\n- Focus on whether the action accomplished its intended purpose (file delivery)\n- Empty files should be considered failures, but delivered files indicate success\n\nREQUIRED JSON RESPONSE:\n{{\n \"status\": \"success|retry|fail\",\n \"reason\": \"Detailed explanation focusing on result label match and content quality\",\n \"confidence\": 0.0-1.0,\n \"improvements\": [\"specific improvements if needed\"],\n \"quality_score\": 1-10,\n \"missing_elements\": [\"missing result label\", \"missing files\", \"content issues\"],\n \"suggested_retry_approach\": \"Specific approach for retry if status is retry\"\n}}\n\nNOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
def _parseValidationResponse(self, response: str) -> dict:
try:
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in validation response")
json_str = response[json_start:json_end]
validation = json.loads(json_str)
if 'status' not in validation:
raise ValueError("Validation response missing 'status' field")
validation.setdefault('confidence', 0.5)
validation.setdefault('improvements', [])
validation.setdefault('quality_score', 5)
validation.setdefault('missing_elements', [])
validation.setdefault('suggested_retry_approach', '')
return validation
except Exception as e:
logger.error(f"Error parsing validation response: {str(e)}")
return {
'status': 'success',
'reason': f'Parse error: {str(e)}',
'confidence': 0.5,
'improvements': [],
'quality_score': 5,
'missing_elements': [],
'suggested_retry_approach': ''
}