chat trace file log ready

This commit is contained in:
ValueOn AG 2025-10-07 00:17:11 +02:00
parent bf1100410c
commit 0cb1e75daf
13 changed files with 526 additions and 608 deletions

View file

@ -1053,7 +1053,7 @@ class ChatObjects:
def _storeDebugMessageAndDocuments(self, message: ChatMessage) -> None:
"""
Store message and documents for debugging purposes in fileshare.
Structure: gateway/test-chat/obj/m_round_task_action_timestamp/documentlist_label/documents
Structure: gateway/test-chat/messages/m_round_task_action_timestamp/documentlist_label/documents
Args:
message: ChatMessage object to store
@ -1061,21 +1061,21 @@ class ChatObjects:
try:
import os
import json
from datetime import datetime
from datetime import datetime, UTC
# Create base debug directory
debug_root = "./test-chat/obj"
debug_root = "./test-chat/messages"
os.makedirs(debug_root, exist_ok=True)
# Generate timestamp
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
timestamp = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3]
# Create message folder name: m_round_task_action_timestamp
# Use actual values from message, not defaults
round_str = str(message.roundNumber) if message.roundNumber is not None else "0"
task_str = str(message.taskNumber) if message.taskNumber is not None else "0"
action_str = str(message.actionNumber) if message.actionNumber is not None else "0"
message_folder = f"m{timestamp}_{round_str}_{task_str}_{action_str}"
message_folder = f"{timestamp}_m_{round_str}_{task_str}_{action_str}"
message_path = os.path.join(debug_root, message_folder)
os.makedirs(message_path, exist_ok=True)

View file

@ -559,10 +559,10 @@ class AiService:
# Prepare debug directory TODO TO REMOVE
import os
from datetime import datetime
debug_root = "./test-chat/extraction"
ts = datetime.now().strftime("%Y%m%d-%H%M%S")
debug_dir = os.path.join(debug_root, f"per_chunk_{ts}")
from datetime import datetime, UTC
debug_root = "./test-chat/ai"
ts = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3]
debug_dir = os.path.join(debug_root, f"{ts}_extraction_per_chunk")
try:
os.makedirs(debug_dir, exist_ok=True)
except Exception:
@ -744,26 +744,93 @@ class AiService:
call_type = self._determineCallType(documents, options.operationType)
options.callType = call_type
# Log the prompt being sent to AI for debugging (before routing) TODO TO REMOVE
try:
# Build the full prompt that will be sent to AI
if placeholders:
full_prompt = prompt
for p in placeholders:
placeholder = f"{{{{KEY:{p.label}}}}}"
full_prompt = full_prompt.replace(placeholder, p.content)
else:
full_prompt = prompt
self._writeAiResponseDebug(
label='ai_prompt_debug',
content=full_prompt,
partIndex=1,
modelName=None,
continuation=False
)
except Exception:
pass
# Handle document generation with specific output format
if outputFormat:
return await self._callAiWithDocumentGeneration(prompt, documents, options, outputFormat, title)
result = await self._callAiWithDocumentGeneration(prompt, documents, options, outputFormat, title)
# Log AI response for debugging TODO TO REMOVE
try:
if isinstance(result, dict) and 'content' in result:
self._writeAiResponseDebug(
label='ai_document_generation',
content=result['content'],
partIndex=1,
modelName=None, # Document generation doesn't return model info
continuation=False
)
except Exception:
pass
return result
if call_type == "planning":
return await self._callAiPlanning(prompt, placeholders_dict, placeholders_meta, options)
result = await self._callAiPlanning(prompt, placeholders_dict, placeholders_meta, options)
# Log AI response for debugging TODO TO REMOVE
try:
self._writeAiResponseDebug(
label='ai_planning',
content=result or "",
partIndex=1,
modelName=None, # Planning doesn't return model info
continuation=False
)
except Exception:
pass
return result
else:
# Set processDocumentsIndividually from the legacy parameter if not set in options
if options.processDocumentsIndividually is None and documents:
options.processDocumentsIndividually = False # Default to batch processing
return await self._callAiText(prompt, documents, options)
# For text calls, we need to build the full prompt with placeholders here
# since _callAiText doesn't handle placeholders directly
if placeholders_dict:
full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders_dict)
else:
full_prompt = prompt
result = await self._callAiText(full_prompt, documents, options)
# Log AI response for debugging (additional logging for text calls) TODO TO REMOVE
try:
self._writeAiResponseDebug(
label='ai_text_main',
content=result or "",
partIndex=1,
modelName=None, # Text calls already log internally
continuation=False
)
except Exception:
pass
return result
def _determineCallType(self, documents: Optional[List[ChatDocument]], operation_type: str) -> str:
"""
Determine call type based on documents and operation type.
Criteria: no documents AND (operationType is "generate_plan" or "analyse_content") -> planning
Criteria: no documents AND operationType is "generate_plan" -> planning
All other cases -> text
"""
has_documents = documents is not None and len(documents) > 0
is_planning_operation = operation_type in [OperationType.GENERATE_PLAN, OperationType.ANALYSE_CONTENT]
is_planning_operation = operation_type == OperationType.GENERATE_PLAN
if not has_documents and is_planning_operation:
return "planning"
@ -857,24 +924,6 @@ class AiService:
logger.debug(f"AI model selected (planning): {getattr(response, 'modelName', 'unknown')}")
except Exception:
pass
# Write full planning response as JSON dump when possible (no duplicates)
try:
import json
content = response.content
cleaned = content.strip()
if cleaned.startswith('```json'):
cleaned = cleaned[7:]
if cleaned.endswith('```'):
cleaned = cleaned[:-3]
cleaned = cleaned.strip()
obj = json.loads(cleaned)
self._writeTraceLog("AI Planning Raw Response", obj)
except Exception:
# Fallback to plain text once
try:
self._writeTraceLog("AI Planning Raw Response", response.content)
except Exception:
pass
return response.content
async def _callAiText(
@ -1027,16 +1076,6 @@ class AiService:
pass
content_first = response.content or ""
merged_content, needs_more = _split_content_and_flag(content_first)
try:
self._writeAiResponseDebug(
label='ai_text',
content=content_first,
partIndex=1,
modelName=getattr(response, 'modelName', None),
continuation=needs_more
)
except Exception:
pass
# Iteratively request next parts if flagged
# Allow configurable max parts via options; default = 1000
@ -1064,16 +1103,6 @@ class AiService:
next_response = await self.aiObjects.call(next_request)
part_text = next_response.content or ""
part_clean, needs_more = _split_content_and_flag(part_text)
try:
self._writeAiResponseDebug(
label='ai_text',
content=part_text,
partIndex=part_index,
modelName=getattr(next_response, 'modelName', None),
continuation=needs_more
)
except Exception:
pass
if part_clean:
# Separate parts clearly
merged_content = (merged_content + "\n\n" + part_clean).strip()
@ -1247,14 +1276,14 @@ class AiService:
pass
def _writeAiResponseDebug(self, label: str, content: str, partIndex: int = 1, modelName: str = None, continuation: bool = None) -> None:
"""Persist raw AI response parts for debugging under test-chat/ai-responses."""
"""Persist raw AI response parts for debugging under test-chat/ai."""
try:
import os
from datetime import datetime, UTC
# Base dir: gateway/test-chat/ai-responses (go up 4 levels from this file)
# Base dir: gateway/test-chat/ai (go up 4 levels from this file)
# .../gateway/modules/services/serviceAi/mainServiceAi.py -> up to gateway root
gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
outDir = os.path.join(gatewayDir, 'test-chat', 'ai-responses')
outDir = os.path.join(gatewayDir, 'test-chat', 'ai')
os.makedirs(outDir, exist_ok=True)
ts = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3]
suffix = []
@ -1266,7 +1295,7 @@ class AiService:
safeModel = ''.join(c if c.isalnum() or c in ('-', '_') else '-' for c in modelName)
suffix.append(safeModel)
suffixStr = ('_' + '_'.join(suffix)) if suffix else ''
fname = f"{label}_{ts}{suffixStr}.txt"
fname = f"{ts}_{label}{suffixStr}.txt"
fpath = os.path.join(outDir, fname)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(content or '')

View file

@ -92,11 +92,15 @@ def runExtraction(extractorRegistry: ExtractorRegistry, chunkerRegistry: Chunker
parts = non_chunk_parts + chunk_parts
logger.debug(f"runExtraction: Final parts after merging: {len(parts)} (chunks: {len(chunk_parts)})")
# DEBUG: dump parts and chunks to files under @testing_extraction/ TODO TO REMOVE
# DEBUG: dump parts and chunks to files TODO TO REMOVE
try:
base_dir = "./test-chat/extraction"
doc_dir = os.path.join(base_dir, f"extraction_{fileName}")
os.makedirs(doc_dir, exist_ok=True)
base_dir = "./test-chat/ai"
os.makedirs(base_dir, exist_ok=True)
# Generate timestamp for consistent naming
from datetime import datetime, UTC
ts = datetime.now(UTC).strftime('%Y%m%d-%H%M%S-%f')[:-3]
# Write a summary file
summary_lines: List[str] = [f"fileName: {fileName}", f"mimeType: {mimeType}", f"totalParts: {len(parts)}"]
text_index = 0
@ -109,12 +113,16 @@ def runExtraction(extractorRegistry: ExtractorRegistry, chunkerRegistry: Chunker
)
if is_texty and getattr(part, "data", None):
text_index += 1
fname = f"part_{idx:03d}_{'chunk' if is_chunk else 'full'}_{text_index:03d}.txt"
fpath = os.path.join(doc_dir, fname)
fname = f"{ts}_extract_{fileName}_part_{idx:03d}_{'chunk' if is_chunk else 'full'}_{text_index:03d}.txt"
fpath = os.path.join(base_dir, fname)
with open(fpath, "w", encoding="utf-8") as f:
f.write(f"# typeGroup: {part.typeGroup}\n# label: {part.label}\n# chunk: {is_chunk}\n# size: {size}\n\n")
f.write(str(part.data))
with open(os.path.join(doc_dir, "summary.txt"), "w", encoding="utf-8") as f:
# Write summary file
summary_fname = f"{ts}_extract_{fileName}_summary.txt"
summary_fpath = os.path.join(base_dir, summary_fname)
with open(summary_fpath, "w", encoding="utf-8") as f:
f.write("\n".join(summary_lines))
except Exception as _e:
logger.debug(f"Debug dump skipped: {_e}")

View file

@ -309,11 +309,11 @@ class GenerationService:
tuple: (rendered_content, mime_type)
"""
try:
# DEBUG: dump renderer input to @testing_extraction to diagnose JSON+HTML mixtures TODO REMOVE
# DEBUG: dump renderer input to diagnose JSON+HTML mixtures TODO REMOVE
try:
import os
ts = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
debug_root = "./test-chat/extraction"
debug_root = "./test-chat/ai"
debug_dir = os.path.join(debug_root, f"render_input_{ts}")
os.makedirs(debug_dir, exist_ok=True)
with open(os.path.join(debug_dir, "meta.txt"), "w", encoding="utf-8") as f:

View file

@ -1,8 +0,0 @@
```json
{
"detectedLanguage": "de",
"intent": "Erstelle ein Word-Dokument mit den ersten 1000 Primzahlen.",
"contextItems": [],
"CONTINUATION": false
}
```

View file

@ -1,78 +0,0 @@
FILENAME: first-1000-primes.docx
AI Generated Document
Title Page
AI Generated Document
---
Page 1
**List of the First 1000 Prime Numbers**
This document contains the first 1000 prime numbers, organized into sections of 100 numbers each. Each section is presented in a table format with 5 columns and column headers for clarity and readability. Page numbers are included at the bottom of each page.
---
Section 1: Primes 1 to 100
| Index | Prime Number | Index | Prime Number | Index | Prime Number | Index | Prime Number | Index | Prime Number |
|-------|--------------|-------|--------------|-------|--------------|-------|--------------|-------|--------------|
| 1 | 2 | 2 | 3 | 3 | 5 | 4 | 7 | 5 | 11 |
| 6 | 13 | 7 | 17 | 8 | 19 | 9 | 23 | 10 | 29 |
| 11 | 31 | 12 | 37 | 13 | 41 | 14 | 43 | 15 | 47 |
| 16 | 53 | 17 | 59 | 18 | 61 | 19 | 67 | 20 | 71 |
| 21 | 73 | 22 | 79 | 23 | 83 | 24 | 89 | 25 | 97 |
| 26 | 101 | 27 | 103 | 28 | 107 | 29 | 109 | 30 | 113 |
| 31 | 127 | 32 | 131 | 33 | 137 | 34 | 139 | 35 | 149 |
| 36 | 151 | 37 | 157 | 38 | 163 | 39 | 167 | 40 | 173 |
| 41 | 179 | 42 | 181 | 43 | 191 | 44 | 193 | 45 | 197 |
| 46 | 199 | 47 | 211 | 48 | 223 | 49 | 227 | 50 | 229 |
| 51 | 233 | 52 | 239 | 53 | 241 | 54 | 251 | 55 | 257 |
| 56 | 263 | 57 | 269 | 58 | 271 | 59 | 277 | 60 | 281 |
| 61 | 283 | 62 | 293 | 63 | 307 | 64 | 311 | 65 | 313 |
| 66 | 317 | 67 | 331 | 68 | 337 | 69 | 347 | 70 | 349 |
| 71 | 353 | 72 | 359 | 73 | 367 | 74 | 373 | 75 | 379 |
| 76 | 383 | 77 | 389 | 78 | 397 | 79 | 401 | 80 | 409 |
| 81 | 419 | 82 | 421 | 83 | 431 | 84 | 433 | 85 | 439 |
| 86 | 443 | 87 | 449 | 88 | 457 | 89 | 461 | 90 | 463 |
| 91 | 467 | 92 | 479 | 93 | 487 | 94 | 491 | 95 | 499 |
| 96 | 503 | 97 | 509 | 98 | 521 | 99 | 523 | 100 | 541 |
---
Section 2: Primes 101 to 200
| Index | Prime Number | Index | Prime Number | Index | Prime Number | Index | Prime Number | Index | Prime Number |
|-------|--------------|-------|--------------|-------|--------------|-------|--------------|-------|--------------|
| 101 | 547 | 102 | 557 | 103 | 563 | 104 | 569 | 105 | 571 |
| 106 | 577 | 107 | 587 | 108 | 593 | 109 | 599 | 110 | 601 |
| 111 | 607 | 112 | 613 | 113 | 617 | 114 | 619 | 115 | 631 |
| 116 | 641 | 117 | 643 | 118 | 647 | 119 | 653 | 120 | 659 |
| 121 | 661 | 122 | 673 | 123 | 677 | 124 | 683 | 125 | 691 |
| 126 | 701 | 127 | 709 | 128 | 719 | 129 | 727 | 130 | 733 |
| 131 | 739 | 132 | 743 | 133 | 751 | 134 | 757 | 135 | 761 |
| 136 | 769 | 137 | 773 | 138 | 787 | 139 | 797 | 140 | 809 |
| 141 | 811 | 142 | 821 | 143 | 823 | 144 | 827 | 145 | 829 |
| 146 | 839 | 147 | 853 | 148 | 857 | 149 | 859 | 150 | 863 |
| 151 | 877 | 152 | 881 | 153 | 883 | 154 | 887 | 155 | 907 |
| 156 | 911 | 157 | 919 | 158 | 929 | 159 | 937 | 160 | 941 |
| 161 | 947 | 162 | 953 | 163 | 967 | 164 | 971 | 165 | 977 |
| 166 | 983 | 167 | 991 | 168 | 997 | 169 | 1009 | 170 | 1013 |
| 171 | 1019 | 172 | 1021 | 173 | 1031 | 174 | 1033 | 175 | 1039 |
| 176 | 1049 | 177 | 1051 | 178 | 1061 | 179 | 1063 | 180 | 1069 |
| 181 | 1087 | 182 | 1091 | 183 | 1093 | 184 | 1097 | 185 | 1103 |
| 186 | 1109 | 187 | 1117 | 188 | 1123 | 189 | 1129 | 190 | 1151 |
| 191 | 1153 | 192 | 1163 | 193 | 1171 | 194 | 1181 | 195 | 1187 |
| 196 | 1193 | 197 | 1201 | 198 | 1213 | 199 | 1217 | 200 | 1223 |
---
Section 3: Primes 201 to 300
| Index | Prime Number | Index | Prime Number | Index | Prime Number | Index | Prime Number | Index | Prime Number |
|-------|--------------|-------|--------------|-------|--------------|-------|--------------|-------|--------------|
| 201 | 1229 | 202 | 1231 | 203 | 1237 | 204 | 1249 | 205 | 1259 |
| 206 | 1277 | 207

View file

@ -127,7 +127,9 @@ class MethodOutlook(MethodBase):
clean_query = clean_query.replace('"', '')
# Handle common search operators
if any(op in clean_query.lower() for op in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:']):
# Recognize Graph operators including both singular and plural forms for hasAttachments
lowered = clean_query.lower()
if any(op in lowered for op in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:', 'hasattachments:']):
# This is an advanced search query, return as-is
return clean_query
@ -170,7 +172,9 @@ class MethodOutlook(MethodBase):
return params
# Check if this is a complex search query with multiple operators
if any(op in clean_query.lower() for op in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:']):
# Recognize Graph operators including both singular and plural forms for hasAttachments
lowered = clean_query.lower()
if any(op in lowered for op in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:', 'hasattachments:']):
# This is an advanced search query, use $search
# Microsoft Graph API supports complex search syntax
params["$search"] = f'"{clean_query}"'
@ -222,7 +226,9 @@ class MethodOutlook(MethodBase):
return {}
# Handle search queries (from:, to:, subject:, etc.) - check this FIRST
if any(filter_text.startswith(prefix) for prefix in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:']):
# Support both singular and plural forms for hasAttachments
lt = filter_text.lower()
if any(lt.startswith(prefix) for prefix in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:', 'hasattachments:']):
return {"$search": f'"{filter_text}"'}
# Handle email address filters (only if it's NOT a search query)
@ -1037,165 +1043,6 @@ class MethodOutlook(MethodBase):
logger.error(f"Error checking Drafts folder: {str(e)}")
return ActionResult.isFailure(error=str(e))
@action
async def composeAndSendEmailDirect(self, parameters: Dict[str, Any]) -> ActionResult:
"""
GENERAL:
- Purpose: Create and send/prepare email using provided subject, body, and recipients.
- Input requirements: connectionReference (required); to (required); subject (required); body (required); optional cc, bcc, attachmentDocumentList.
- Output format: JSON confirmation with draft/send metadata.
Parameters:
- connectionReference (str, required): Microsoft connection label.
- to (list, required): Recipient email addresses.
- subject (str, required): Email subject.
- body (str, required): Email body (plain text or HTML).
- cc (list, optional): CC recipients.
- bcc (list, optional): BCC recipients.
- attachmentDocumentList (list, optional): Attachment document references.
"""
try:
connectionReference = parameters.get("connectionReference")
to = parameters.get("to")
subject = parameters.get("subject")
body = parameters.get("body")
cc = parameters.get("cc", [])
bcc = parameters.get("bcc", [])
attachmentDocumentList = parameters.get("attachmentDocumentList", [])
if not connectionReference or not to or not subject or not body:
return ActionResult.isFailure(error="connectionReference, to, subject, and body are required")
# Convert single values to lists for all recipient parameters
if isinstance(to, str):
to = [to]
if isinstance(cc, str):
cc = [cc]
if isinstance(bcc, str):
bcc = [bcc]
if isinstance(attachmentDocumentList, str):
attachmentDocumentList = [attachmentDocumentList]
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.isFailure(error="No valid Microsoft connection found")
# Check permissions
permissions_ok = await self._checkPermissions(connection)
if not permissions_ok:
return ActionResult.isFailure(error="Connection lacks necessary permissions for Outlook operations")
# Create and send the email message
try:
graph_url = "https://graph.microsoft.com/v1.0"
headers = {
"Authorization": f"Bearer {connection['accessToken']}",
"Content-Type": "application/json"
}
# Clean and format body content
cleaned_body = body.strip()
# Check if body is already HTML
if cleaned_body.startswith('<html>') or cleaned_body.startswith('<body>') or '<br>' in cleaned_body:
html_body = cleaned_body
else:
# Convert plain text to proper HTML formatting
html_body = cleaned_body.replace('\n', '<br>')
html_body = f"<html><body>{html_body}</body></html>"
# Build the email message
message = {
"subject": subject,
"body": {
"contentType": "HTML",
"content": html_body
},
"toRecipients": [{"emailAddress": {"address": email}} for email in to],
"ccRecipients": [{"emailAddress": {"address": email}} for email in cc] if cc else [],
"bccRecipients": [{"emailAddress": {"address": email}} for email in bcc] if bcc else []
}
# Add attachments if provided
if attachmentDocumentList:
message["attachments"] = []
for attachment_ref in attachmentDocumentList:
# Get attachment document from service center
attachment_docs = self.services.workflow.getChatDocumentsFromDocumentList([attachment_ref])
if attachment_docs:
for doc in attachment_docs:
file_id = getattr(doc, 'fileId', None)
if file_id:
try:
file_content = self.services.workflow.getFileData(file_id)
if file_content:
if isinstance(file_content, bytes):
content_bytes = file_content
else:
content_bytes = str(file_content).encode('utf-8')
base64_content = base64.b64encode(content_bytes).decode('utf-8')
attachment = {
"@odata.type": "#microsoft.graph.fileAttachment",
"name": doc.fileName,
"contentType": doc.mimeType or "application/octet-stream",
"contentBytes": base64_content
}
message["attachments"].append(attachment)
except Exception as e:
logger.error(f"Error reading attachment file {doc.fileName}: {str(e)}")
# Create the draft message
drafts_folder_id = self._getFolderId("Drafts", connection)
if drafts_folder_id:
api_url = f"{graph_url}/me/mailFolders/{drafts_folder_id}/messages"
else:
api_url = f"{graph_url}/me/messages"
logger.warning("Could not find Drafts folder, creating draft in default location")
response = requests.post(api_url, headers=headers, json=message)
if response.status_code in [200, 201]:
draft_data = response.json()
draft_id = draft_data.get("id", "Unknown")
result_data = {
"status": "success",
"message": "Email draft created successfully",
"draftId": draft_id,
"folder": "Drafts (Entwürfe)",
"mailbox": connection.get('userEmail', 'Unknown'),
"subject": subject,
"recipients": to,
"cc": cc,
"bcc": bcc,
"attachments": len(attachmentDocumentList),
"timestamp": self.services.utils.getUtcTimestamp()
}
return ActionResult(
success=True,
documents=[ActionDocument(
documentName=f"email_draft_created_{self._format_timestamp_for_filename()}.json",
documentData=json.dumps(result_data, indent=2),
mimeType="application/json"
)]
)
else:
logger.error(f"Failed to create draft. Status: {response.status_code}, Response: {response.text}")
return ActionResult.isFailure(error=f"Failed to create email draft: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Error creating email via Microsoft Graph API: {str(e)}")
return ActionResult.isFailure(error=f"Failed to create email: {str(e)}")
except Exception as e:
logger.error(f"Error in composeAndSendEmailDirect: {str(e)}")
return ActionResult.isFailure(error=str(e))
@action
async def composeAndSendEmailWithContext(self, parameters: Dict[str, Any]) -> ActionResult:
"""
@ -1207,7 +1054,7 @@ class MethodOutlook(MethodBase):
Parameters:
- connectionReference (str, required): Microsoft connection label.
- to (list, required): Recipient email addresses.
- context (str, required): Context for composing the email.
- context (str, required): Detailled context for composing the email.
- documentList (list, optional): Document references for context/attachments.
- cc (list, optional): CC recipients.
- bcc (list, optional): BCC recipients.
@ -1253,6 +1100,14 @@ class MethodOutlook(MethodBase):
chatDocuments = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
# Create AI prompt for email composition
# Build document reference list for AI
doc_references = documentList
doc_list_text = ""
if doc_references:
doc_list_text = f"Available_Document_References: {', '.join(doc_references)}"
else:
doc_list_text = "Available_Document_References: (No documents available for attachment)"
ai_prompt = f"""
Compose a professional email based on the following context and requirements:
@ -1263,15 +1118,19 @@ RECIPIENT: {to}
EMAIL STYLE: {emailStyle}
MAX LENGTH: {maxLength} characters
{doc_list_text}
Please generate:
1. A clear, professional subject line
2. A well-structured email body that addresses the context appropriately
3. Use the {emailStyle} tone throughout
4. Decide which documents from Available_Document_References (if any) should be attached to the email
Return your response in the following JSON format:
{{
"subject": "Your generated subject line here",
"body": "Your generated email body here (can include HTML formatting like <br> for line breaks)"
"body": "Your generated email body here (can include HTML formatting like <br> for line breaks)",
"attachments": ["document_reference", "document_reference", ...]
}}
Make sure the email is:
@ -1279,6 +1138,7 @@ Make sure the email is:
- Clear and concise
- Well-structured with proper greeting and closing
- Relevant to the provided context
- Include only relevant documents as attachments (use EXACT document references from the Available_Document_References)
"""
# Call AI service to generate email content
@ -1291,7 +1151,7 @@ Make sure the email is:
priority="normal",
compressPrompt=False,
compressContext=True,
processDocumentsIndividually=True,
processDocumentsIndividually=False, # Process all documents together for email composition
processingMode="detailed",
resultFormat="json",
maxCost=0.50,
@ -1317,10 +1177,23 @@ Make sure the email is:
email_data = json.loads(json_content)
subject = email_data.get("subject", "")
body = email_data.get("body", "")
ai_attachments = email_data.get("attachments", [])
if not subject or not body:
return ActionResult.isFailure(error="AI did not generate valid subject and body")
# Use AI-selected attachments if provided, otherwise use all documents
if ai_attachments:
# Filter documentList to only include AI-selected attachments
selected_docs = [doc_ref for doc_ref in documentList if doc_ref in ai_attachments]
if selected_docs:
documentList = selected_docs
logger.info(f"AI selected {len(selected_docs)} documents for attachment: {selected_docs}")
else:
logger.warning("AI selected attachments not found in available documents, using all documents")
else:
logger.info("AI did not specify attachments, using all available documents")
except json.JSONDecodeError as e:
logger.error(f"Failed to parse AI response as JSON: {str(e)}")
logger.error(f"AI response content: {ai_response}")
@ -1418,6 +1291,7 @@ Make sure the email is:
"cc": cc,
"bcc": bcc,
"attachments": len(documentList),
"aiSelectedAttachments": ai_attachments if ai_attachments else "all documents",
"aiGenerated": True,
"context": context,
"emailStyle": emailStyle,

View file

@ -155,6 +155,12 @@ class MessageCreator:
messageText += f"{taskObjective}\n\n"
messageText += f"{errorDetails}\n\n"
# Build concise summary to persist for history context
doc_count = len(createdDocuments) if createdDocuments else 0
trimmed_msg = (messageText or "").strip().replace("\n", " ")
if len(trimmed_msg) > 160:
trimmed_msg = trimmed_msg[:157] + "..."
messageData = {
"workflowId": workflow.id,
"role": "assistant",
@ -171,7 +177,8 @@ class MessageCreator:
"roundNumber": currentRound,
"taskNumber": currentTask,
"actionNumber": currentAction,
"actionProgress": "success" if result.success else "fail"
"actionProgress": "success" if result.success else "fail",
"summary": f"{action.execMethod}.{action.execAction}: {doc_count} docs | msg='{trimmed_msg}'"
}
# Add debugging for error messages

View file

@ -204,6 +204,13 @@ class ReactMode(BaseMode):
selection = json.loads(response[jsonStart:jsonEnd])
if 'action' not in selection or not isinstance(selection['action'], str):
raise ValueError("Selection missing 'action' as string")
# Enforce spec: Stage 1 must NOT include 'parameters'
if 'parameters' in selection:
# Remove to avoid accidental carryover
try:
del selection['parameters']
except Exception:
selection['parameters'] = None
return selection
async def _actExecute(self, context: TaskContext, selection: Dict[str, Any], taskStep: TaskStep,
@ -217,13 +224,29 @@ class ReactMode(BaseMode):
methodName, actionName = compoundActionName.split('.', 1)
# Check if parameters are already provided in the selection
if 'parameters' in selection and selection['parameters']:
logger.info("Using parameters from action selection")
parameters = selection['parameters']
# Always request parameters in Stage 2 (spec: Stage 1 must not provide them)
logger.info("Requesting parameters in Stage 2 based on Stage 1 outputs")
# Create a permissive Stage 2 context to avoid TaskContext attribute restrictions
from types import SimpleNamespace
stage2Context = SimpleNamespace()
# Copy essential fields from original context for fallbacks (snake_case for placeholderFactory compatibility)
stage2Context.task_step = getattr(context, 'task_step', None)
stage2Context.workflow_id = getattr(context, 'workflow_id', None)
# Set Stage 1 data directly on the permissive context (snake_case for promptGenerationActionsReact compatibility)
if isinstance(selection, dict):
stage2Context.action_objective = selection.get('actionObjective', '')
stage2Context.parameters_context = selection.get('parametersContext', '')
stage2Context.learnings = selection.get('learnings', [])
else:
logger.info("No parameters in action selection, requesting from AI")
bundle = generateReactParametersPrompt(self.services, context, compoundActionName)
stage2Context.action_objective = ''
stage2Context.parameters_context = ''
stage2Context.learnings = []
# Build and send the Stage 2 parameters prompt (always)
bundle = generateReactParametersPrompt(self.services, stage2Context, compoundActionName)
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
@ -254,20 +277,48 @@ class ReactMode(BaseMode):
try:
paramObj = json.loads(js)
parameters = paramObj.get('parameters', {}) if isinstance(paramObj, dict) else {}
# Log only the parsed JSON object to avoid duplicated raw text
try:
self._writeTraceLog("React Parameters Response", paramObj)
except Exception:
pass
except Exception as e:
logger.error(f"Failed to parse AI parameters response as JSON: {str(e)}")
logger.error(f"Response was: {paramsResp}")
parameters = {}
# Merge Stage 1 resource selections into Stage 2 parameters (only if action expects them)
try:
requiredDocs = selection.get('requiredInputDocuments')
if requiredDocs:
# Ensure list
if isinstance(requiredDocs, list):
# Only attach if target action defines 'documentList'
methodName, actionName = compoundActionName.split('.', 1)
from modules.workflows.processing.shared.methodDiscovery import getActionParameterList, methods as _methods
expectedParams = getActionParameterList(methodName, actionName, _methods)
if 'documentList' in expectedParams:
parameters['documentList'] = requiredDocs
requiredConn = selection.get('requiredConnection')
if requiredConn:
# Only attach if target action defines 'connectionReference'
methodName, actionName = compoundActionName.split('.', 1)
from modules.workflows.processing.shared.methodDiscovery import getActionParameterList, methods as _methods
expectedParams = getActionParameterList(methodName, actionName, _methods)
if 'connectionReference' in expectedParams:
parameters['connectionReference'] = requiredConn
except Exception:
pass
# Apply minimal defaults in-code (language)
if 'language' not in parameters and hasattr(self.services, 'user') and getattr(self.services.user, 'language', None):
parameters['language'] = self.services.user.language
# Write merged parameters to trace BEFORE continuing
try:
mergedParamObj = {
"schema": (paramObj.get('schema') if isinstance(paramObj, dict) else 'parameters_v1'),
"parameters": parameters
}
self._writeTraceLog("React Parameters Response", mergedParamObj)
except Exception:
pass
# Build a synthetic ActionItem for execution routing and labels
currentRound = getattr(self.workflow, 'currentRound', 0)
currentTask = getattr(self.workflow, 'currentTask', 0)
@ -295,7 +346,7 @@ class ReactMode(BaseMode):
for doc in actionResult.documents:
# Extract all available metadata without content
docMetadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"name": getattr(doc, 'fileName', None) or getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),

View file

@ -11,7 +11,7 @@ NAMING CONVENTION:
MAPPING TABLE (keys function) with usage [taskplan | actionplan | react]:
{{KEY:USER_PROMPT}} -> extractUserPrompt() [taskplan, actionplan, react]
{{KEY:USER_LANGUAGE}} -> extractUserLanguage() [actionplan, react]
{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory() [taskplan, actionplan]
{{KEY:WORKFLOW_HISTORY}} -> extractWorkflowHistory() [taskplan, actionplan, react]
{{KEY:AVAILABLE_CONNECTIONS_INDEX}} -> extractAvailableConnectionsIndex() [actionplan, react]
{{KEY:AVAILABLE_CONNECTIONS_SUMMARY}} -> extractAvailableConnectionsSummary() []
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}} -> extractAvailableDocumentsSummary() [taskplan, actionplan, react]
@ -54,7 +54,9 @@ def extractUserPrompt(context: Any) -> str:
return 'No request specified'
def extractWorkflowHistory(service: Any, context: Any) -> str:
"""Extract workflow history from context. Maps to {{KEY:WORKFLOW_HISTORY}}"""
"""Extract workflow history from context. Maps to {{KEY:WORKFLOW_HISTORY}}
Reverse-chronological, enriched with message summaries and document labels.
"""
# Prefer explicit workflow on context; else fall back to services.workflow
workflow = None
try:
@ -99,30 +101,10 @@ def extractUserLanguage(service: Any) -> str:
"""Extract user language from service. Maps to {{KEY:USER_LANGUAGE}}"""
return service.user.language if service and service.user else 'en'
def getConnectionReferenceList(services) -> List[str]:
"""Get list of available connections"""
try:
# Get connections from the database
if hasattr(services, 'interfaceDbApp') and hasattr(services, 'user'):
userId = services.user.id
connections = services.interfaceDbApp.getUserConnections(userId)
if connections:
# Format connections as reference strings
connectionRefs = []
for conn in connections:
# Create reference string in format: conn_{authority}_{id}
ref = f"conn_{conn.authority.value}_{conn.id}"
connectionRefs.append(ref)
return connectionRefs
return []
except Exception as e:
logger.error(f"Error getting connection reference list: {str(e)}")
return []
def _computeMessageSummary(msg) -> str:
"""Create a concise summary for a ChatMessage with documents only.
Fields: documentCount, roundNumber, documentsLabel, document names, message (trimmed), success flag.
Fields: documentCount, roundNumber, documentsLabel, document names, message (full), success flag.
"""
try:
docs = getattr(msg, 'documents', []) or []
@ -131,26 +113,39 @@ def _computeMessageSummary(msg) -> str:
document_count = len(docs)
round_number = getattr(msg, 'roundNumber', None) or 0
label = getattr(msg, 'documentsLabel', None) or ""
# Collect up to 3 document names (supports dicts or objects)
# Collect ALL document names (supports ChatDocument objects and dicts)
doc_names = []
for d in docs[:3]:
for d in docs:
name = None
try:
if isinstance(d, dict):
name = d.get('documentName') or d.get('name') or d.get('filename')
# For dict objects, try multiple possible field names
name = d.get('fileName') or d.get('documentName') or d.get('name') or d.get('filename')
else:
name = getattr(d, 'documentName', None) or getattr(d, 'name', None) or getattr(d, 'filename', None)
# For ChatDocument objects, use fileName field
name = getattr(d, 'fileName', None) or getattr(d, 'documentName', None) or getattr(d, 'name', None) or getattr(d, 'filename', None)
except Exception:
name = None
doc_names.append(name or "(unnamed)")
names_part = ", ".join(doc_names) + (" +more" if document_count > 3 else "")
# Format document names in brackets
if doc_names:
names_part = f"({', '.join(doc_names)})"
else:
names_part = "(no documents)"
# Don't truncate the message - show full content
user_message = (getattr(msg, 'message', '') or '').strip().replace("\n", " ")
if len(user_message) > 120:
user_message = user_message[:117] + "..."
# Read success from ChatMessage.success field
success_flag = getattr(msg, 'success', None)
success_text = "success=True" if success_flag is True else ("success=False" if success_flag is False else "success=Unknown")
label_part = f" label='{label}'" if label else ""
return f"Round {round_number}: {document_count} docs - {names_part}{label_part} | {success_text} | msg='{user_message}'"
# Add learning/feedback if available
learning_part = ""
if hasattr(msg, 'summary') and msg.summary and 'learnings' in msg.summary.lower():
learning_part = " | learnings available"
return f"Round {round_number}: {document_count} docs {names_part}{label_part} | {success_text}{learning_part} | msg='{user_message}'"
except Exception:
return ""
@ -171,17 +166,35 @@ def getMessageSummary(msg) -> str:
return ""
def getPreviousRoundContext(services, workflow: Any) -> str:
"""Get previous round context listing only messages that produced documents, using summaries (full history)."""
"""Get enriched context:
- Reverse-chronological ordering
- Current round first (newest oldest), then older rounds
- Only messages with documents summarized
- Include available documents snapshot at end
"""
try:
if not workflow:
return "No previous round context available"
lines: List[str] = []
# Summarize ALL messages WITH documents only, in chronological order
# Reverse-chronological, current round first
try:
msgs = getattr(workflow, 'messages', []) or []
current_round = getattr(workflow, 'currentRound', None)
current_round_msgs: List[Any] = []
previous_round_msgs: List[Any] = []
for m in msgs:
if current_round is not None and getattr(m, 'roundNumber', None) == current_round:
current_round_msgs.append(m)
else:
previous_round_msgs.append(m)
for m in reversed(current_round_msgs):
s = getMessageSummary(m)
if s:
lines.append(f"- {s}")
for m in reversed(previous_round_msgs):
s = getMessageSummary(m)
if s:
lines.append(f"- {s}")
@ -222,7 +235,7 @@ def extractReviewContent(context: Any) -> str:
for doc in result.documents:
# Extract all available metadata without content
doc_metadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"name": getattr(doc, 'fileName', None) or getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),
@ -358,13 +371,11 @@ def extractLatestRefinementFeedback(context: Any) -> str:
def extractAvailableDocumentsSummary(service: Any, context: Any) -> str:
"""Summary of available documents (count only)."""
try:
if hasattr(context, 'workflow') and context.workflow:
documents = service.workflow.getAvailableDocuments(context.workflow)
if documents and documents != "No documents available":
doc_count = documents.count("docList:") + documents.count("docItem:")
return f"{doc_count} documents available from previous tasks"
return "No documents available"
return "No documents available"
except Exception as e:
logger.error(f"Error getting document summary: {str(e)}")
return "No documents available"
@ -372,9 +383,7 @@ def extractAvailableDocumentsSummary(service: Any, context: Any) -> str:
def extractAvailableDocumentsIndex(service: Any, context: Any) -> str:
"""Index of available documents with detailed references for parameter generation."""
try:
if hasattr(context, 'workflow') and context.workflow:
return service.workflow.getAvailableDocuments(context.workflow)
return "No documents available"
except Exception as e:
logger.error(f"Error getting document index: {str(e)}")
return "No documents available"
@ -382,7 +391,7 @@ def extractAvailableDocumentsIndex(service: Any, context: Any) -> str:
def extractAvailableConnectionsSummary(service: Any) -> str:
"""Summary of available connections (count only)."""
try:
connections = getConnectionReferenceList(service)
connections = service.workflow.getConnectionReferenceList()
if connections:
return f"{len(connections)} connections available"
return "No connections available"
@ -393,7 +402,7 @@ def extractAvailableConnectionsSummary(service: Any) -> str:
def extractAvailableConnectionsIndex(service: Any) -> str:
"""Index of available connections with detailed references for parameter generation."""
try:
connections = getConnectionReferenceList(service)
connections = service.workflow.getConnectionReferenceList()
if connections:
return '\n'.join(f"- {conn}" for conn in connections)
return "No connections available"

View file

@ -25,9 +25,14 @@ def generateReactPlanSelectionPrompt(services, context: Any) -> PromptBundle:
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_SUMMARY", content=extractAvailableDocumentsSummary(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_METHODS", content=extractAvailableMethods(services), summaryAllowed=False),
# Provide enriched history context for Stage 1 to craft parametersContext
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services, context), summaryAllowed=True),
# Provide deterministic indexes so the planner can choose exact labels
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_INDEX", content=extractAvailableDocumentsIndex(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
]
template = """Select one action to advance the task.
template = """Select exactly one action to advance the task.
OBJECTIVE:
{{KEY:USER_PROMPT}}
@ -38,28 +43,94 @@ def generateReactPlanSelectionPrompt(services, context: Any) -> PromptBundle:
AVAILABLE_METHODS:
{{KEY:AVAILABLE_METHODS}}
REPLY: Return only a JSON object with the selected action:
WORKFLOW_HISTORY (reverse-chronological, enriched):
{{KEY:WORKFLOW_HISTORY}}
AVAILABLE_DOCUMENTS_INDEX:
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
AVAILABLE_CONNECTIONS_INDEX:
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
REPLY: Return ONLY a JSON object with the following structure (no comments, no extra text):
{{
"action": "method.action_name"
"action": "method.action_name",
"actionObjective": "...",
"learnings": ["..."],
"requiredInputDocuments": ["docList:..."],
"requiredConnection": "connection:..." | null,
"parametersContext": "concise text that Stage 2 will use to set business parameters"
}}
EXAMPLE how to assign references from AVAILABLE_DOCUMENTS_INDEX and AVAILABLE_CONNECTIONS_INDEX:
"requiredInputDocuments": ["docList:msg_47a7a578-e8f2-4ba8-ac66-0dbff40605e0:round8_task1_action1_results","docItem:5d8b7aee-b546-4487-b6a8-835c86f7b186:AI_Generated_Document_20251006-104256.docx"],
"requiredConnection": "connection:msft:p.motsch@valueon.ch:1ae8b8e5-128b-49b8-b1cb-7c632669eeae",
RULES:
1. Use EXACT action names from AVAILABLE_METHODS
2. Return ONLY JSON - no other text
3. Do NOT use markdown code blocks
4. Do NOT add explanations
2. Do NOT output a "parameters" object
3. parametersContext must be short and sufficient for Stage 2
4. Return ONLY JSON - no markdown, no explanations
5. For requiredInputDocuments, use ONLY exact references from AVAILABLE_DOCUMENTS_INDEX (docList:... or docItem:...)
6. For requiredConnection, use ONLY an exact label from AVAILABLE_CONNECTIONS_INDEX
"""
return PromptBundle(prompt=template, placeholders=placeholders)
def generateReactParametersPrompt(services, context: Any, compoundActionName: str) -> PromptBundle:
"""Define placeholders first, then the template; return PromptBundle."""
"""Define placeholders first, then the template; return PromptBundle.
Minimal Stage 2 (no fallback): consumes actionObjective, selectedAction, parametersContext only.
Excludes documents/connections/history entirely.
"""
# derive method/action and parameter list
methodName, actionName = (compoundActionName.split('.', 1) if '.' in compoundActionName else (compoundActionName, ''))
actionParameterList = getActionParameterList(methodName, actionName, methods)
def _formatBusinessParameters(params) -> str:
excluded = {"documentList", "connectionReference"}
# Case 1: params is a list of dicts or objects with 'name'
if isinstance(params, (list, tuple)):
entries = []
for p in params:
try:
if isinstance(p, dict):
name = p.get("name")
if not name or name in excluded:
continue
ptype = p.get("type") or p.get("dataType") or ""
req = p.get("required")
reqTxt = "required" if (req is True or str(req).lower() == "true") else "optional"
desc = p.get("description") or p.get("desc") or ""
entry = f"- {name} ({ptype}, {reqTxt})" + (f": {desc}" if desc else "")
entries.append(entry)
else:
# Try attribute access
name = getattr(p, "name", None)
if not name or name in excluded:
continue
ptype = getattr(p, "type", "") or getattr(p, "dataType", "")
req = getattr(p, "required", False)
reqTxt = "required" if (req is True or str(req).lower() == "true") else "optional"
desc = getattr(p, "description", None) or getattr(p, "desc", None) or ""
entry = f"- {name} ({ptype}, {reqTxt})" + (f": {desc}" if desc else "")
entries.append(entry)
except Exception:
continue
return "\n".join(entries)
# Case 2: params is a string description: filter out lines mentioning excluded names
if isinstance(params, str):
lines = [ln for ln in params.splitlines() if not any(ex in ln for ex in excluded)]
return "\n".join(lines).strip()
# Fallback: plain string
try:
return str(params)
except Exception:
return ""
actionParametersText = _formatBusinessParameters(actionParameterList)
# determine action objective if available, else fall back to user prompt
actionObjective = None
if hasattr(context, 'action_objective') and context.action_objective:
actionObjective = context.action_objective
elif hasattr(context, 'task_step') and context.task_step and getattr(context.task_step, 'objective', None):
@ -67,31 +138,51 @@ def generateReactParametersPrompt(services, context: Any, compoundActionName: st
else:
actionObjective = extractUserPrompt(context)
# Minimal Stage 2 (no fallback)
parametersContext = getattr(context, 'parameters_context', None)
learningsText = ""
try:
# If Stage 1 learnings were attached to context, pass them textually
if hasattr(context, 'learnings') and context.learnings:
if isinstance(context.learnings, (list, tuple)):
learningsText = "\n".join(f"- {str(x)}" for x in context.learnings)
else:
learningsText = str(context.learnings)
except Exception:
learningsText = ""
placeholders: List[PromptPlaceholder] = [
PromptPlaceholder(label="ACTION_OBJECTIVE", content=actionObjective, summaryAllowed=False),
PromptPlaceholder(label="ACTION_PARAMETER_LIST", content=actionParameterList, summaryAllowed=False),
PromptPlaceholder(label="AVAILABLE_DOCUMENTS_INDEX", content=extractAvailableDocumentsIndex(services, context), summaryAllowed=True),
PromptPlaceholder(label="AVAILABLE_CONNECTIONS_INDEX", content=extractAvailableConnectionsIndex(services), summaryAllowed=False),
PromptPlaceholder(label="USER_PROMPT", content=extractUserPrompt(context), summaryAllowed=False),
PromptPlaceholder(label="USER_LANGUAGE", content=extractUserLanguage(services), summaryAllowed=False),
PromptPlaceholder(label="PREVIOUS_ACTION_RESULTS", content=extractPreviousActionResults(context), summaryAllowed=True),
PromptPlaceholder(label="LEARNINGS_AND_IMPROVEMENTS", content=extractLearningsAndImprovements(context), summaryAllowed=True),
PromptPlaceholder(label="LATEST_REFINEMENT_FEEDBACK", content=extractLatestRefinementFeedback(context), summaryAllowed=True),
PromptPlaceholder(label="WORKFLOW_HISTORY", content=extractWorkflowHistory(services, context), summaryAllowed=True),
PromptPlaceholder(label="SELECTED_ACTION", content=compoundActionName, summaryAllowed=False),
PromptPlaceholder(label="PARAMETERS_CONTEXT", content=(parametersContext or ""), summaryAllowed=True),
PromptPlaceholder(label="ACTION_PARAMETERS", content=actionParametersText, summaryAllowed=False),
PromptPlaceholder(label="LEARNINGS", content=learningsText, summaryAllowed=True),
]
template = """Generate parameters for this action.
template = """You are a parameter generator. Set the parameters for this specific action.
## Return ONLY a JSON RESPONSEOBJECT without comments.
ACTION_OBJECTIVE (the objective for this action to fulfill):
CONTEXT AND OBJECTIVE:
{{KEY:ACTION_OBJECTIVE}}
SELECTED_ACTION:
{{KEY:SELECTED_ACTION}}
JSON RESPONSEOBJECT:
CONTEXT FOR PARAMETER VALUES:
{{KEY:PARAMETERS_CONTEXT}}
LEARNINGS (from prior attempts, if any):
{{KEY:LEARNINGS}}
REQUIRED PARAMETERS FOR THIS ACTION (use these exact parameter names):
{{KEY:ACTION_PARAMETERS}}
INSTRUCTIONS:
- Use ONLY the parameter names listed above
- Fill in appropriate values based on the context and objective
- Do NOT invent new parameters
- Do NOT include: documentList, connectionReference, history, documents, connections
REPLY (ONLY JSON):
{{
"schema": "parameters_v1",
"parameters": {{
@ -99,74 +190,9 @@ def generateReactParametersPrompt(services, context: Any, compoundActionName: st
}}
}}
EXAMPLE of the result format to deliver:
{{
"schema": "parameters_v1",
"parameters": {{
"aiPrompt": "...",
"resultType": "docx",
"processingMode": "detailed"
}}
}}
## RULES:
1. Use ONLY parameter names from ACTION_PARAMETER_LIST
2. For connectionReference, use an EXACT label from AVAILABLE_CONNECTIONS_INDEX (do NOT invent labels)
3. Use exact document references from AVAILABLE_DOCUMENTS_INDEX for documentList parameters (do NOT invent names like "doc1"): pick specific docItem references; to include all from a list, use its docList reference
4. Learn from PREVIOUS_ACTION_RESULTS and LEARNINGS_AND_IMPROVEMENTS to avoid repeating mistakes
5. Consider LATEST_REFINEMENT_FEEDBACK when generating parameters
6. Use the ACTION_OBJECTIVE to understand the specific goal for this action
7. Generate parameters that align with the USER_LANGUAGE when applicable
## ACTION_PARAMETER_LIST:
{{KEY:ACTION_PARAMETER_LIST}}
## AVAILABLE_DOCUMENTS_INDEX:
(Use these references in parameter "documentList" if given; to include all docs from a list, pass its docList reference)
{{KEY:AVAILABLE_DOCUMENTS_INDEX}}
## AVAILABLE_CONNECTIONS_INDEX:
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
(Use an EXACT label here for parameter "connectionReference")
## Example how to assign references from AVAILABLE_DOCUMENTS_INDEX and AVAILABLE_CONNECTIONS_INDEX:
{{
"schema": "parameters_v1",
"parameters": {{
"documentList": ["docList:msg_47a7a578-e8f2-4ba8-ac66-0dbff40605e0:round8_task1_action1_results", "docItem:5d8b7aee-b546-4487-b6a8-835c86f7b186:AI_Generated_Document_20251006-104256.docx"],
"connectionReference": "conn_msft_1ae8b8e5-128b-49b8-b1cb-7c632669eeae",
"aiPrompt": "...",
"resultType": "xlsx",
"processingMode": "basic"
}}
}}
## CONTEXT
USER_REQUEST (final user prompt to deliver):
{{KEY:USER_PROMPT}}
USER_LANGUAGE:
{{KEY:USER_LANGUAGE}}
PREVIOUS_ACTION_RESULTS:
{{KEY:PREVIOUS_ACTION_RESULTS}}
LEARNINGS_AND_IMPROVEMENTS:
{{KEY:LEARNINGS_AND_IMPROVEMENTS}}
LATEST_REFINEMENT_FEEDBACK:
{{KEY:LATEST_REFINEMENT_FEEDBACK}}
WORKFLOW_HISTORY:
{{KEY:WORKFLOW_HISTORY}}
RULES:
- Return ONLY JSON (no markdown, no prose)
- Use only the parameters listed in REQUIRED PARAMETERS FOR THIS ACTION
"""
return PromptBundle(prompt=template, placeholders=placeholders)

View file

@ -524,7 +524,7 @@ class WorkflowManager:
# Add failed log entry
self.services.workflow.createLog({
"workflowId": workflow.id,
"message": f"Workflow failed: {workflow_result.error or 'Unknown error'}",
"message": "Workflow failed: Unknown error",
"type": "error",
"status": "failed",
"progress": 100