From efd5736910282e1fb298fe26c9409310d9d47808 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Mon, 25 Aug 2025 23:28:12 +0200
Subject: [PATCH] FInal release 2 - integration testing completed - ui and
backend fixes
---
modules/chat/handling/handlingTasks.py | 13 -------
modules/chat/handling/promptFactory.py | 50 +++++++++++++++++++++++---
2 files changed, 46 insertions(+), 17 deletions(-)
diff --git a/modules/chat/handling/handlingTasks.py b/modules/chat/handling/handlingTasks.py
index 361b6fff..27ef1e26 100644
--- a/modules/chat/handling/handlingTasks.py
+++ b/modules/chat/handling/handlingTasks.py
@@ -432,19 +432,6 @@ class HandlingTasks:
# Generate the action definition prompt
action_prompt = await createActionDefinitionPrompt(action_context, self.service)
-
- # Log the full prompt being sent to AI for debugging
- logger.info("=== ACTION DEFINITION PROMPT SENT TO AI ===")
- logger.info(f"Task: {task_step.objective}")
- logger.info(f"Retry Count: {action_context.retry_count}")
- logger.info(f"Previous Results: {action_context.previous_results}")
- logger.info(f"Improvements: {action_context.improvements}")
- logger.info(f"Previous Review Result: {action_context.previous_review_result}")
- logger.info(f"Criteria Progress: {action_context.criteria_progress}")
- logger.info("=== FULL PROMPT ===")
- logger.info(action_prompt)
- logger.info("=== END PROMPT ===")
-
prompt = await self.service.callAiTextAdvanced(action_prompt)
# Check if AI response is valid
diff --git a/modules/chat/handling/promptFactory.py b/modules/chat/handling/promptFactory.py
index 71569556..28dfb402 100644
--- a/modules/chat/handling/promptFactory.py
+++ b/modules/chat/handling/promptFactory.py
@@ -427,20 +427,43 @@ def createResultReviewPrompt(context: ReviewContext, service) -> str:
else:
action_summary += f" Documents: None\n"
- # Build result summary
+ # Build result summary with SIMPLE DOCUMENT VALIDATION
result_summary = ""
+ document_validation_summary = ""
+ document_access_warnings = []
+
if context.action_results:
for i, result in enumerate(context.action_results):
result_summary += f"\nRESULT {i+1}:\n"
result_summary += f" Success: {result.success}\n"
if result.error:
result_summary += f" Error: {result.error}\n"
+
if result.documents:
result_summary += f" Documents: {len(result.documents)} document(s)\n"
for doc in result.documents:
- result_summary += f" - {doc.documentName} ({doc.mimeType})\n"
+ # Use correct ActionDocument attributes
+ doc_name = getattr(doc, 'documentName', 'Unknown')
+ doc_mime = getattr(doc, 'mimeType', 'Unknown')
+ doc_data = getattr(doc, 'documentData', None)
+
+ result_summary += f" - {doc_name} ({doc_mime})\n"
+
+ # SIMPLE VALIDATION: Check if documents exist and have basic properties
+ validation_status = "✅ Valid"
+ if not doc_name or str(doc_name).strip() == "":
+ validation_status = "❌ Missing document name"
+ elif not doc_mime or str(doc_mime).strip() == "":
+ validation_status = "❌ Missing MIME type"
+ elif doc_data is None:
+ validation_status = "⚠️ No document data"
+ elif hasattr(doc_data, '__len__') and len(doc_data) == 0:
+ validation_status = "⚠️ Empty document data"
+
+ document_validation_summary += f" - {doc_name}: {validation_status}\n"
else:
result_summary += f" Documents: None\n"
+ document_validation_summary += f" - No documents produced\n"
# Get enhanced document context using the new method
document_context = service.getEnhancedDocumentContext()
@@ -448,6 +471,14 @@ def createResultReviewPrompt(context: ReviewContext, service) -> str:
# Get user language from service
user_language = service.user.language if service and service.user else 'en'
+ # Build warnings section (only for critical issues)
+ warnings_section = ""
+ if document_access_warnings:
+ warnings_section = f"""
+⚠️ DOCUMENT VALIDATION ISSUES:
+{chr(10).join(f"- {warning}" for warning in document_access_warnings)}
+"""
+
prompt = f"""
You are a result review AI that evaluates task execution results and provides feedback with user-friendly messages.
@@ -460,7 +491,12 @@ EXECUTION SUMMARY:
RESULT SUMMARY:
{result_summary}
-DOCUMENT CONTEXT:
+{warnings_section}
+
+DOCUMENT VALIDATION SUMMARY:
+{document_validation_summary if document_validation_summary else "No documents to validate"}
+
+DOCUMENT CONTEXT (Available Documents):
{document_context}
PREVIOUS RESULTS: {', '.join(context.previous_results) if context.previous_results else 'None'}
@@ -474,6 +510,12 @@ REVIEW INSTRUCTIONS:
6. Generate user-friendly messages explaining the results
7. Return a JSON object with the exact structure shown below
+DOCUMENT VALIDATION FOCUS:
+- Check if the agreed result documents label is correct (matches expected format)
+- Verify that documents are actually present and have basic properties
+- Do NOT attempt to analyze document content deeply
+- Focus on document existence and basic metadata validation
+
REQUIRED JSON STRUCTURE:
{{
"status": "success|retry|failed",
@@ -483,7 +525,7 @@ REQUIRED JSON STRUCTURE:
"missing_outputs": ["missing_output1", "missing_output2"],
"met_criteria": ["criteria1", "criteria2"],
"unmet_criteria": ["criteria3", "criteria4"],
- "confidence": 0.85, // 0.0-1.0 scale
+ "confidence": 0.85, // 0.0-1.0 confidence level in this assessment
"userMessage": "User-friendly message explaining the review results in the user's language"
}}