fexed react planning

This commit is contained in:
ValueOn AG 2025-10-03 22:40:41 +02:00
parent ebb15da91b
commit 1cbc669970
52 changed files with 1012 additions and 442 deletions

View file

@ -549,7 +549,7 @@ class ChatObjects:
created_documents.append(created_doc)
# Convert to ChatMessage model
return ChatMessage(
chat_message = ChatMessage(
id=createdMessage["id"],
workflowId=createdMessage["workflowId"],
parentMessageId=createdMessage.get("parentMessageId"),
@ -570,6 +570,11 @@ class ChatObjects:
actionMethod=createdMessage.get("actionMethod"),
actionName=createdMessage.get("actionName")
)
# Debug: Store message and documents for debugging TODO REMOVE
self._storeDebugMessageAndDocuments(chat_message)
return chat_message
except Exception as e:
logger.error(f"Error creating workflow message: {str(e)}")
@ -1045,6 +1050,120 @@ class ChatObjects:
return {"items": items}
def _storeDebugMessageAndDocuments(self, message: ChatMessage) -> None:
"""
Store message and documents for debugging purposes in fileshare.
Structure: gateway/test-chat/obj/m_round_task_action_timestamp/documentlist_label/documents
Args:
message: ChatMessage object to store
"""
try:
import os
import json
from datetime import datetime
# Create base debug directory
debug_root = "./test-chat/obj"
os.makedirs(debug_root, exist_ok=True)
# Generate timestamp
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
# Create message folder name: m_round_task_action_timestamp
# Use actual values from message, not defaults
round_str = str(message.roundNumber) if message.roundNumber is not None else "0"
task_str = str(message.taskNumber) if message.taskNumber is not None else "0"
action_str = str(message.actionNumber) if message.actionNumber is not None else "0"
message_folder = f"m{timestamp}_{round_str}_{task_str}_{action_str}"
message_path = os.path.join(debug_root, message_folder)
os.makedirs(message_path, exist_ok=True)
# Store message data - use dict() instead of model_dump() for compatibility
message_file = os.path.join(message_path, "message.json")
with open(message_file, "w", encoding="utf-8") as f:
# Convert message to dict manually to avoid model_dump() issues
message_dict = {
"id": message.id,
"workflowId": message.workflowId,
"parentMessageId": message.parentMessageId,
"message": message.message,
"role": message.role,
"status": message.status,
"sequenceNr": message.sequenceNr,
"publishedAt": message.publishedAt,
"roundNumber": message.roundNumber,
"taskNumber": message.taskNumber,
"actionNumber": message.actionNumber,
"documentsLabel": message.documentsLabel,
"actionId": message.actionId,
"actionMethod": message.actionMethod,
"actionName": message.actionName,
"success": message.success,
"documents": []
}
json.dump(message_dict, f, indent=2, ensure_ascii=False, default=str)
# Store message content as text
if message.message:
message_text_file = os.path.join(message_path, "message_text.txt")
with open(message_text_file, "w", encoding="utf-8") as f:
f.write(str(message.message))
# Store documents if provided
if message.documents and len(message.documents) > 0:
logger.info(f"Debug: Processing {len(message.documents)} documents")
# Group documents by documentsLabel
documents_by_label = {}
for doc in message.documents:
label = message.documentsLabel or 'default'
if label not in documents_by_label:
documents_by_label[label] = []
documents_by_label[label].append(doc)
# Create subfolder for each document label
for label, docs in documents_by_label.items():
# Sanitize label for filesystem
safe_label = "".join(c for c in str(label) if c.isalnum() or c in (' ', '-', '_')).rstrip()
safe_label = safe_label.replace(' ', '_')
if not safe_label:
safe_label = "default"
label_folder = os.path.join(message_path, safe_label)
os.makedirs(label_folder, exist_ok=True)
logger.info(f"Debug: Created document folder: {label_folder}")
# Store each document
for i, doc in enumerate(docs):
# Create document metadata file
doc_meta = {
"id": doc.id,
"messageId": doc.messageId,
"fileId": doc.fileId,
"fileName": doc.fileName,
"fileSize": doc.fileSize,
"mimeType": doc.mimeType,
"roundNumber": doc.roundNumber,
"taskNumber": doc.taskNumber,
"actionNumber": doc.actionNumber,
"actionId": doc.actionId
}
doc_meta_file = os.path.join(label_folder, f"document_{i+1:03d}_metadata.json")
with open(doc_meta_file, "w", encoding="utf-8") as f:
json.dump(doc_meta, f, indent=2, ensure_ascii=False, default=str)
logger.info(f"Debug: Stored document metadata for {doc.fileName}")
logger.info(f"Debug: Stored message and documents in {message_path}")
except Exception as e:
logger.error(f"Debug: Failed to store message and documents: {e}")
import traceback
logger.error(f"Debug: Traceback: {traceback.format_exc()}")
def getInterface(currentUser: Optional[User] = None) -> 'ChatObjects':
"""

View file

@ -535,7 +535,7 @@ class AiService:
# Prepare debug directory TODO TO REMOVE
import os
from datetime import datetime
debug_root = "../local/testing_extraction"
debug_root = "./test-chat/extraction"
ts = datetime.now().strftime("%Y%m%d-%H%M%S")
debug_dir = os.path.join(debug_root, f"per_chunk_{ts}")
try:

View file

@ -94,7 +94,7 @@ def runExtraction(extractorRegistry: ExtractorRegistry, chunkerRegistry: Chunker
logger.debug(f"runExtraction: Final parts after merging: {len(parts)} (chunks: {len(chunk_parts)})")
# DEBUG: dump parts and chunks to files under @testing_extraction/ TODO TO REMOVE
try:
base_dir = "../local/testing_extraction"
base_dir = "./test-chat/extraction"
doc_dir = os.path.join(base_dir, f"extraction_{fileName}")
os.makedirs(doc_dir, exist_ok=True)
# Write a summary file

View file

@ -313,7 +313,7 @@ class GenerationService:
try:
import os
ts = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
debug_root = "../local/testing_extraction"
debug_root = "./test-chat/extraction"
debug_dir = os.path.join(debug_root, f"render_input_{ts}")
os.makedirs(debug_dir, exist_ok=True)
with open(os.path.join(debug_dir, "meta.txt"), "w", encoding="utf-8") as f:

View file

@ -60,7 +60,9 @@ OUTPUT POLICY:
- Include all necessary information
- Valid CSV that can be imported
Generate the complete CSV report:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete CSV report using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -87,7 +87,9 @@ OUTPUT POLICY:
- Professional document format
- Include all necessary information
Generate the complete DOCX report content:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete DOCX report content using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -97,7 +97,9 @@ OUTPUT POLICY:
- Professional spreadsheet format
- Include all necessary information
Generate the complete Excel report data:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete Excel report data using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -57,7 +57,9 @@ OUTPUT POLICY:
- Include all necessary CSS inline
- Make it look professional and polished
Generate the complete HTML report:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete HTML report using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -65,7 +65,9 @@ OUTPUT POLICY:
- Include all necessary information
- Valid JSON that can be parsed
Generate the complete JSON report:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete JSON report using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -59,7 +59,9 @@ OUTPUT POLICY:
- Professional appearance with good structure
- Include all necessary information
Generate the complete Markdown report:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete Markdown report using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -78,7 +78,9 @@ OUTPUT POLICY:
- Professional document format
- Include all necessary information
Generate the complete PDF report content:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete PDF report content using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -88,7 +88,9 @@ OUTPUT POLICY:
- Preserve code structure when appropriate
- Include all necessary information
Generate the complete text report:
CRITICAL: Use the actual data from the source documents to create the content. Do not generate placeholder text or templates. Extract and use the real data provided in the source documents to create meaningful content.
Generate the complete text report using the actual data from the source documents:
"""
async def render(self, extracted_content: str, title: str) -> Tuple[str, str]:

View file

@ -534,4 +534,4 @@ class WorkflowService:
return self.interfaceDbChat.createLog(logData)
except Exception as e:
logger.error(f"Error creating log: {str(e)}")
raise
raise

View file

@ -30,12 +30,20 @@ class MethodAi(MethodBase):
@action
async def process(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Perform a generic AI call with optional document references, producing plain text output
AI text processing and analysis - returns plain text only, NO document generation
USE FOR: Text analysis, data processing, content generation, research, Q&A, brainstorming, summarization, translation, code generation
DO NOT USE FOR: Creating formatted documents (Word, PDF, Excel), document generation, file creation
INPUT REQUIREMENTS: Requires aiPrompt parameter (the question or task for AI)
OUTPUT FORMAT: Plain text only (.txt, .json, .md, .csv, .xml) - NO binary files
DEPENDENCIES: None - can work standalone
WORKFLOW POSITION: Use for analysis, research, or text processing tasks
Parameters:
aiPrompt (str): The AI prompt for processing
documentList (list, optional): List of document references to include in context
expectedDocumentFormat (str, optional): Preferred output extension (string or dict). Note: This action only returns plain text content.
resultType (str, optional): Output format type - use 'txt', 'json', 'md', 'csv', or 'xml' (defaults to 'txt')
processingMode (str, optional): Processing mode - use 'basic', 'advanced', or 'detailed' (defaults to 'basic')
includeMetadata (bool, optional): Whether to include metadata (default: True)
operationType (str, optional): Operation type - use 'general', 'generate_plan', 'analyse_content', 'generate_content', 'web_research', 'image_analysis', or 'image_generation'
@ -49,7 +57,7 @@ class MethodAi(MethodBase):
documentList = parameters.get("documentList", [])
if isinstance(documentList, str):
documentList = [documentList]
expectedDocumentFormat = parameters.get("expectedDocumentFormat", "")
resultType = parameters.get("resultType", "txt")
processingMode = parameters.get("processingMode", "basic")
includeMetadata = parameters.get("includeMetadata", True)
operationType = parameters.get("operationType", "general")
@ -63,19 +71,24 @@ class MethodAi(MethodBase):
error="AI prompt is required"
)
# Determine output format first (needed for context building)
output_extension = ".txt" # Default
output_mime_type = "text/plain" # Default
# Validate and determine output format
valid_result_types = ["txt", "json", "md", "csv", "xml"]
if resultType not in valid_result_types:
return ActionResult.isFailure(
error=f"Invalid resultType '{resultType}'. Must be one of: {', '.join(valid_result_types)}"
)
if expectedDocumentFormat:
if isinstance(expectedDocumentFormat, dict):
output_extension = expectedDocumentFormat.get("extension", ".txt")
output_mime_type = expectedDocumentFormat.get("mimeType", "text/plain")
else:
# If it's a string, treat it as the extension
output_extension = expectedDocumentFormat
output_mime_type = "text/plain"
logger.info(f"Using expected format: {output_extension} ({output_mime_type})")
# Map resultType to file extension and MIME type
format_mapping = {
"txt": (".txt", "text/plain"),
"json": (".json", "application/json"),
"md": (".md", "text/markdown"),
"csv": (".csv", "text/csv"),
"xml": (".xml", "application/xml")
}
output_extension, output_mime_type = format_mapping[resultType]
logger.info(f"Using result type: {resultType} -> {output_extension} ({output_mime_type})")
# Get ChatDocuments for AI service - let AI service handle all document processing
chatDocuments = []
@ -96,32 +109,49 @@ class MethodAi(MethodBase):
# Note: customInstructions parameter was removed as it's not defined in the method signature
# Add format guidance to prompt
if expectedDocumentFormat:
enhanced_prompt += f"\n\nPlease try to deliver the result in {output_extension.upper()} format. If you cannot deliver in that specific format, please use an appropriate alternative format and include a comment explaining the format used."
if resultType != "txt":
enhanced_prompt += f"\n\nPlease deliver the result in {resultType.upper()} format. Ensure the output follows the proper {resultType.upper()} syntax and structure."
# Call AI service - it will handle all document processing internally
logger.info(f"Executing AI call with mode: {processingMode}, prompt length: {len(enhanced_prompt)}")
if chatDocuments:
logger.info(f"Including {len(chatDocuments)} documents for AI processing")
# Add JSON format instruction for structured response
json_instruction = """
# Add format-specific instruction for structured response
if resultType == "json":
format_instruction = """
Please return your response in the following JSON format:
{{
"documents": [
{{
"data": "your actual content here",
"mimeType": "appropriate/mime-type",
"comment": "optional comment about format or content"
"mimeType": "application/json",
"comment": "optional comment about content"
}}
]
}}
If you need to return multiple documents, add more objects to the documents array. The data field should contain the actual content, mimeType should be appropriate for the content format, and comment is optional.
The data field should contain valid JSON content.
"""
else:
format_instruction = f"""
Please return your response in the following JSON format:
{{
"documents": [
{{
"data": "your actual content here in {resultType.upper()} format",
"mimeType": "{output_mime_type}",
"comment": "optional comment about content"
}}
]
}}
The data field should contain the content in {resultType.upper()} format.
"""
call_prompt = enhanced_prompt + json_instruction
call_prompt = enhanced_prompt + format_instruction
output_format = output_extension.replace('.', '') or 'txt'
@ -150,7 +180,7 @@ If you need to return multiple documents, add more objects to the documents arra
try:
import os
from datetime import datetime
debug_root = "../local/testing_extraction"
debug_root = "./test-chat/extraction"
ts = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
debug_dir = os.path.join(debug_root, f"method_ai_{ts}")
os.makedirs(debug_dir, exist_ok=True)
@ -243,7 +273,7 @@ If you need to return multiple documents, add more objects to the documents arra
# Reuse the same debug_dir if created above; otherwise create a new one
import os
from datetime import datetime
debug_root = "../local/testing_extraction"
debug_root = "./test-chat/extraction"
ts = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
debug_dir = os.path.join(debug_root, f"method_ai_{ts}")
os.makedirs(debug_dir, exist_ok=True)
@ -274,7 +304,15 @@ If you need to return multiple documents, add more objects to the documents arra
@action
async def webResearch(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Perform comprehensive web research using the full workflow.
Comprehensive web research and information gathering from the internet
USE FOR: Finding current information, researching topics, gathering external data, fact-checking, market research
DO NOT USE FOR: Processing local documents, creating formatted reports, email operations
INPUT REQUIREMENTS: Requires user_prompt parameter (the research question or topic)
OUTPUT FORMAT: JSON with research results, sources, and analysis
DEPENDENCIES: Requires internet connection and web search capabilities
WORKFLOW POSITION: Use when external information is needed, before document processing
Parameters:
user_prompt (str): The user input or question to investigate

View file

@ -31,7 +31,15 @@ class MethodDocument(MethodBase):
@action
async def extract(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Extract content from any document using AI prompt.
Extract and analyze content from existing documents using AI
USE FOR: Analyzing documents, extracting specific information, summarizing content, finding patterns, data extraction
DO NOT USE FOR: Creating new documents, generating reports, web research, email operations
INPUT REQUIREMENTS: Requires documentList (existing documents) and prompt (what to extract)
OUTPUT FORMAT: Plain text extracted content (.txt files)
DEPENDENCIES: Requires existing documents in documentList parameter
WORKFLOW POSITION: Use after documents are available, before generating reports
Parameters:
documentList (list): Document list reference(s) - List of document references to extract content from
@ -183,9 +191,17 @@ class MethodDocument(MethodBase):
@action
async def generateReport(self, parameters: Dict[str, Any]) -> ActionResult:
async def generate(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Generate report from multiple documents using AI.
Generate formatted documents and reports from source documents - creates actual files (Word, PDF, Excel, etc.)
USE FOR: Creating formatted documents, reports, presentations, spreadsheets, structured outputs, professional documents
DO NOT USE FOR: Simple text analysis, Q&A, web research, email operations
INPUT REQUIREMENTS: Requires documentList (source documents) and prompt (what kind of report to generate)
OUTPUT FORMAT: Formatted documents (.html, .pdf, .docx, .txt, .md, .json, .csv, .xlsx)
DEPENDENCIES: Requires existing documents in documentList parameter
WORKFLOW POSITION: Use after document analysis, as final output generation step
Parameters:
documentList (list): Document list reference(s) - List of document references to include in report
@ -384,155 +400,4 @@ class MethodDocument(MethodBase):
# Return minimal fallback content
return f"Error extracting content: {str(e)}"
async def _generateHtmlReport(self, chatDocuments: List[Any], title: str, includeMetadata: bool, prompt: str) -> str:
"""
Generate a comprehensive HTML report using AI from all input documents.
"""
try:
# Filter out empty documents and collect content
validDocuments = []
allContent = []
for doc in chatDocuments:
content = ""
logger.info(f"Processing document: type={type(doc)}")
# Use new extraction service for each document
try:
# Build extraction options for report generation from AI planner parameters
extraction_options = {
"prompt": prompt,
"operationType": operationType,
"processDocumentsIndividually": processDocumentsIndividually,
"chunkAllowed": chunkAllowed,
"mergeStrategy": mergeStrategy
}
# Add optional parameters if provided by AI planner
if not includeMetadata:
extraction_options["includeMetadata"] = False
# Extract content using new service
extracted_list = self.services.extraction.extractContent(
documents=[doc],
options=extraction_options
)
ec = extracted_list[0] if extracted_list else None
if ec and hasattr(ec, 'parts'):
for part in ec.parts:
try:
if part.typeGroup in ("text", "table", "structure") and part.data:
content += part.data + " "
except Exception:
continue
if content.strip():
logger.info(f" Retrieved content from file: {len(content)} characters")
else:
logger.info(f" No readable text content found (binary file)")
else:
logger.info(f" No content extracted (binary file)")
except Exception as e:
logger.info(f" Could not extract content (binary file): {str(e)}")
# Skip empty documents
if content and content.strip():
validDocuments.append(doc)
allContent.append(f"Document: {doc.fileName}\n{content}\n")
logger.info(f" Added document to valid documents list")
else:
logger.info(f" Skipping document with no readable text content")
if not validDocuments:
# No readable content; return a minimal valid HTML document
timestamp = int(self.services.utils.getUtcTimestamp())
return f"<!DOCTYPE html><html><head><meta charset=\"UTF-8\"><title>{title}</title></head><body><h1>{title}</h1><p>Keine auswertbaren Inhalte gefunden.</p><p>Generated: {timestamp}</p></body></html>"
# Create AI prompt for comprehensive report generation using user's prompt
combinedContent = "\n\n".join(allContent)
aiPrompt = f"""
{prompt}
Report Title: {title}
OUTPUT POLICY:
- Return ONLY a complete, raw HTML document.
- Start with: <!DOCTYPE html>
- Must include: <html>, <head> (with <meta charset="UTF-8"> and <title>), and <body>.
- The response must be valid, self-contained HTML suitable for saving as .html.
Structure:
- Title and short subtitle
- Executive summary
- Sections with clear headings
- Use tables for structured data when helpful
- Key findings and recommendations
- Generation date and number of documents
Quality and design requirements:
- Use clear, professional, and accessible styling in a <style> block
- Apply clean layout, spacing, and visual hierarchy for headings
- Keep HTML and CSS standards-compliant and lightweight
SOURCE DOCUMENT CONTENT:
---START---
{combinedContent}
---END---
"""
# Call AI to generate the report
logger.info(f"Generating AI report for {len(validDocuments)} documents")
# Build ChatDocument list from chatDocuments
documents = []
try:
for d in validDocuments:
try:
data = self.services.workflow.getFileData(d.fileId) if hasattr(d, 'fileId') else None
if data:
documents.append(ChatDocument(fileData=data, fileName=d.fileName, mimeType=d.mimeType))
except Exception:
continue
except Exception:
documents = None
aiReport = await self.services.ai.callAi(
prompt=aiPrompt,
documents=documents or None,
options=AiCallOptions(
operationType=OperationType.GENERATE_CONTENT, # Using GENERATE_CONTENT for report generation
priority=Priority.QUALITY,
compressPrompt=False,
compressContext=True,
processDocumentsIndividually=True,
resultFormat="html",
processingMode="detailed",
maxCost=0.08,
maxProcessingTime=90
)
)
# If AI call fails, return error - AI is crucial for report generation
if not aiReport or aiReport.strip() == "":
logger.error("AI report generation failed - AI is crucial for this action")
raise Exception("AI report generation failed - AI is required for report generation")
# Clean up the AI response and ensure it's valid HTML
aiReport = aiReport.strip()
# Normalize: strip code fences if present
if aiReport.startswith("```") and aiReport.endswith("```"):
lines = aiReport.split('\n')
if len(lines) >= 2:
aiReport = '\n'.join(lines[1:-1]).strip()
cleaned = aiReport.strip()
# Return exactly what we have (no wrapping)
return cleaned
except Exception as e:
logger.error(f"Error generating AI report: {str(e)}")
# Re-raise the error - AI is crucial for report generation
raise

View file

@ -292,7 +292,15 @@ class MethodOutlook(MethodBase):
@action
async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Read emails from Outlook
Read emails from Microsoft Outlook mailbox
USE FOR: Reading emails from Outlook, checking mailbox contents, retrieving email data
DO NOT USE FOR: Sending emails, composing emails, web research, document generation
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection)
OUTPUT FORMAT: JSON with email data and metadata
DEPENDENCIES: Requires Microsoft connection, requires internet access
WORKFLOW POSITION: Use for email analysis, before composing responses
Parameters:
connectionReference (str): Reference to the Microsoft connection
@ -445,8 +453,13 @@ class MethodOutlook(MethodBase):
"""
Send email via Outlook using composed email content
This action takes a composed email document and sends it via Outlook.
The composed email must contain all necessary email details (recipients, subject, body, attachments).
USE FOR: Sending emails via Microsoft Outlook, executing composed email drafts
DO NOT USE FOR: Composing emails, reading emails, web research, document generation
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection) and composedEmail (from composeEmail action)
OUTPUT FORMAT: JSON with send status and draft information
DEPENDENCIES: REQUIRES composedEmail from composeEmail action, requires Microsoft connection
WORKFLOW POSITION: Use after composeEmail action, final step in email workflow
Parameters:
connectionReference (str): Reference to the Microsoft connection
@ -768,7 +781,15 @@ class MethodOutlook(MethodBase):
@action
async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Search emails in Outlook
Search emails in Microsoft Outlook mailbox
USE FOR: Finding specific emails, searching mailbox contents, filtering email data
DO NOT USE FOR: Sending emails, composing emails, web research, document generation
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection) and query (search terms)
OUTPUT FORMAT: JSON with search results and email data
DEPENDENCIES: Requires Microsoft connection, requires internet access
WORKFLOW POSITION: Use for finding specific emails, before reading or responding
Parameters:
connectionReference (str): Reference to the Microsoft connection
@ -1352,11 +1373,13 @@ class MethodOutlook(MethodBase):
"""
Compose email content using AI based on context and requirements
This action uses AI to generate professional email content including:
- Subject line
- Body content
- Recipient suggestions
- Attachment recommendations
USE FOR: Creating email drafts, composing professional emails, generating email content with document context
DO NOT USE FOR: Sending emails, reading emails, web research, document generation
INPUT REQUIREMENTS: Requires context parameter (email requirements/description)
OUTPUT FORMAT: JSON document with email structure (to, subject, body, cc, bcc, attachments)
DEPENDENCIES: None - can work standalone, optional documentList for context
WORKFLOW POSITION: Use before sendEmail action, can be used with document analysis results
Parameters:
context (str): Email context/requirements

View file

@ -443,7 +443,15 @@ class MethodSharepoint(MethodBase):
@action
async def findDocumentPath(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Find documents/folders by searching their NAMES across SharePoint sites.
Find documents and folders by searching their names across SharePoint sites
USE FOR: Locating SharePoint documents, finding folders, searching for specific files, discovering content
DO NOT USE FOR: Reading document content, uploading files, email operations, web research
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection) and searchQuery (what to find)
OUTPUT FORMAT: JSON with found documents/folders and their paths
DEPENDENCIES: Requires Microsoft connection, requires internet access
WORKFLOW POSITION: Use first to locate documents, before readDocuments or uploadDocument
Parameters:
connectionReference (str): Microsoft connection reference
@ -814,7 +822,15 @@ class MethodSharepoint(MethodBase):
@action
async def readDocuments(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Read documents from SharePoint across all accessible sites
Read documents from SharePoint and extract their content
USE FOR: Reading SharePoint document content, extracting text from files, processing documents
DO NOT USE FOR: Finding documents, uploading files, email operations, web research
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection) and documentList (from findDocumentPath)
OUTPUT FORMAT: Documents with extracted content and metadata
DEPENDENCIES: Requires Microsoft connection, requires documentList from findDocumentPath action
WORKFLOW POSITION: Use after findDocumentPath, before document analysis or generation
Parameters:
documentList (list): Reference(s) to the document list to read
@ -1090,7 +1106,15 @@ class MethodSharepoint(MethodBase):
@action
async def uploadDocument(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Upload documents to SharePoint across accessible sites
Upload documents to SharePoint sites
USE FOR: Uploading files to SharePoint, storing documents, saving generated content
DO NOT USE FOR: Reading documents, finding documents, email operations, web research
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection), documentList (files to upload), and fileNames
OUTPUT FORMAT: JSON with upload status and file information
DEPENDENCIES: Requires Microsoft connection, requires documents to upload
WORKFLOW POSITION: Use after document generation, as final storage step
Parameters:
connectionReference (str): Reference to the Microsoft connection
@ -1444,6 +1468,14 @@ class MethodSharepoint(MethodBase):
"""
List documents in SharePoint folders across accessible sites
USE FOR: Browsing SharePoint folders, listing available documents, exploring content structure
DO NOT USE FOR: Reading document content, uploading files, email operations, web research
INPUT REQUIREMENTS: Requires connectionReference (Microsoft connection)
OUTPUT FORMAT: JSON with document list and folder structure
DEPENDENCIES: Requires Microsoft connection, requires internet access
WORKFLOW POSITION: Use for exploring SharePoint content, before findDocumentPath
Parameters:
connectionReference (str): Reference to the Microsoft connection
pathObject (str, optional): Path object to locate documents. This can ONLY be a reference to a result from sharepoint.findDocumentPath action

View file

@ -48,7 +48,7 @@ from modules.workflows.processing.promptFactoryPlaceholders import (
extractUserLanguage,
extractReviewContent
)
from modules.workflows.processing.promptFactory import methods
from modules.workflows.processing.promptFactory import methods, getEnhancedDocumentContext
from modules.workflows.processing.executionState import should_continue
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
@ -305,17 +305,142 @@ class HandlingTasks:
message = self.services.interfaceDbChat.createMessage(message_data)
if message:
workflow.messages.append(message)
async def createReactActionMessage(self, workflow, selection, step, max_steps, task_index, message_type, result=None, observation=None):
"""Create user-friendly messages for React workflow actions"""
try:
action = selection.get('action', {})
method = action.get('method', '')
action_name = action.get('name', '')
# Get user language
user_language = self.services.user.language if self.services and self.services.user else 'en'
if message_type == "before":
# Message BEFORE action execution
user_message = await self.generateActionIntentionMessage(method, action_name, user_language)
message_content = f"🔄 **Step {step}/{max_steps}**\n\n{user_message}"
status = "step"
action_progress = "pending"
documents_label = f"action_{step}_intention"
# PHASE 4: Update workflow object after task plan created
# Set currentTask=1, currentAction=0, totalTasks=len(task_plan.tasks), totalActions=0
self.updateWorkflowAfterTaskPlanCreated(len(task_plan.tasks))
logger.info(f"Task plan message created with {len(task_plan.tasks)} tasks")
elif message_type == "after":
# Message AFTER action execution
user_message = await self.generateActionResultMessage(method, action_name, result, observation, user_language)
success_icon = "" if result and result.success else ""
message_content = f"{success_icon} **Step {step}/{max_steps} Complete**\n\n{user_message}"
status = "step"
action_progress = "success" if result and result.success else "fail"
documents_label = observation.get('resultLabel') if observation else f"action_{step}_result"
else:
logger.error("Failed to create task plan message")
return
# Create workflow message
message_data = {
"workflowId": workflow.id,
"role": "assistant",
"message": message_content,
"status": status,
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.getUtcTimestamp(),
"documentsLabel": documents_label,
"documents": [],
"roundNumber": workflow.currentRound,
"taskNumber": task_index,
"actionNumber": step,
"actionProgress": action_progress
}
message = self.services.interfaceDbChat.createMessage(message_data)
if message:
workflow.messages.append(message)
except Exception as e:
logger.error(f"Error creating task plan message: {str(e)}")
logger.error(f"Error creating React action message: {str(e)}")
async def generateActionIntentionMessage(self, method, action_name, user_language):
"""Generate user-friendly message explaining what action will do"""
try:
# Create a simple AI prompt to generate user-friendly action descriptions
prompt = f"""Generate a brief, user-friendly message explaining what the {method}.{action_name} action will do.
User language: {user_language}
Examples:
- For ai.process: "I'll analyze the content and provide insights"
- For document.extract: "I'll extract the key information from the documents"
- For document.generate: "I'll create a formatted report from the documents"
- For outlook.composeEmail: "I'll compose an email based on your requirements"
- For outlook.sendEmail: "I'll send the composed email"
- For sharepoint.findDocumentPath: "I'll search for the requested documents"
- For sharepoint.readDocuments: "I'll read the document contents"
Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly message
response = await self.services.ai.callAi(
prompt=prompt,
options=AiCallOptions(
operationType=OperationType.GENERATE_CONTENT,
priority=Priority.SPEED,
compressPrompt=True,
maxCost=0.01,
maxProcessingTime=5
)
)
return response.strip() if response else f"Executing {method}.{action_name} action..."
except Exception as e:
logger.error(f"Error generating action intention message: {str(e)}")
return f"Executing {method}.{action_name} action..."
async def generateActionResultMessage(self, method, action_name, result, observation, user_language):
"""Generate user-friendly message explaining action results"""
try:
# Build result context
result_context = ""
if result and result.documents:
doc_count = len(result.documents)
result_context = f"Generated {doc_count} document(s)"
elif observation and observation.get('documentsCount', 0) > 0:
doc_count = observation.get('documentsCount', 0)
result_context = f"Generated {doc_count} document(s)"
# Create AI prompt for result message
prompt = f"""Generate a brief, user-friendly message explaining the result of the {method}.{action_name} action.
User language: {user_language}
Success: {result.success if result else 'Unknown'}
Result context: {result_context}
Examples:
- For successful ai.process: "Analysis complete! I've processed the content and generated insights."
- For successful document.extract: "Extraction complete! I've extracted the key information from the documents."
- For successful document.generate: "Report generated! I've created a formatted document with the requested content."
- For successful outlook.composeEmail: "Email composed! I've prepared the email content for sending."
- For successful outlook.sendEmail: "Email sent! The message has been delivered successfully."
- For failed actions: "The action encountered an issue. Please check the details."
Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly result message
response = await self.services.ai.callAi(
prompt=prompt,
options=AiCallOptions(
operationType=OperationType.GENERATE_CONTENT,
priority=Priority.SPEED,
compressPrompt=True,
maxCost=0.01,
maxProcessingTime=5
)
)
return response.strip() if response else f"{method}.{action_name} action completed"
except Exception as e:
logger.error(f"Error generating action result message: {str(e)}")
return f"{method}.{action_name} action completed"
async def generateTaskActions(self, task_step, workflow, previous_results=None, enhanced_context=None) -> List[TaskAction]:
"""Generate actions for a given task step."""
@ -573,9 +698,15 @@ class HandlingTasks:
# Extract content for placeholders
user_prompt = extractUserPrompt(context)
available_documents = extractAvailableDocuments(context)
# Use enhanced document context instead of simple summary for React mode
available_documents = getEnhancedDocumentContext(self.services)
user_language = extractUserLanguage(self.services)
# Get available connections for React mode
from modules.workflows.processing.promptFactory import _getConnectionReferenceList
available_connections = _getConnectionReferenceList(self.services)
available_connections_str = '\n'.join(f"- {conn}" for conn in available_connections) if available_connections else "No connections available"
# Get action signature
method = action.get('method', '')
name = action.get('name', '')
@ -590,6 +721,7 @@ class HandlingTasks:
placeholders = {
"USER_PROMPT": user_prompt,
"AVAILABLE_DOCUMENTS": available_documents,
"AVAILABLE_CONNECTIONS": available_connections_str,
"USER_LANGUAGE": user_language,
"SELECTED_ACTION": selected_action,
"ACTION_SIGNATURE": action_signature
@ -641,28 +773,45 @@ class HandlingTasks:
return await self.executeSingleAction(task_action, workflow, task_step, current_task, step_index, 1)
def observe_build(self, action_result: ActionResult) -> Dict[str, Any]:
"""Observe: build compact observation object from ActionResult"""
"""Observe: build compact observation object from ActionResult with full document metadata"""
previews = []
notes = []
if action_result and action_result.documents:
# Use generation service to process documents
processed_docs = self.services.generation.processActionResultDocuments(action_result, None, None)
for doc_data in processed_docs[:5]:
name = doc_data.get('fileName', '')
mime = doc_data.get('mimeType', '')
content = doc_data.get('content', '')
snippet = content[:200] if content else ''
# Process all documents and show full metadata
for doc in action_result.documents:
# Extract all available metadata without content
doc_metadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),
"modified": getattr(doc, 'modified', 'Unknown'),
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
"documentId": getattr(doc, 'documentId', 'Unknown'),
"reference": getattr(doc, 'reference', 'Unknown')
}
# Remove 'Unknown' values to keep it clean
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
# Extract comment if available from original document
original_doc = next((d for d in action_result.documents if getattr(d, 'documentName', '') == name), None)
if original_doc and hasattr(original_doc, 'documentData'):
data = getattr(original_doc, 'documentData', None)
# Add content size indicator instead of actual content
if hasattr(doc, 'documentData') and doc.documentData:
if isinstance(doc.documentData, dict) and 'content' in doc.documentData:
content_length = len(str(doc.documentData['content']))
doc_metadata['contentSize'] = f"{content_length} characters"
else:
content_length = len(str(doc.documentData))
doc_metadata['contentSize'] = f"{content_length} characters"
# Extract comment if available
if hasattr(doc, 'documentData') and doc.documentData:
data = getattr(doc, 'documentData', None)
if isinstance(data, dict):
comment = data.get("comment", "")
if comment:
notes.append(f"Document '{name}': {comment}")
notes.append(f"Document '{doc_metadata.get('name', 'Unknown')}': {comment}")
previews.append({"name": name, "mime": mime, "snippet": snippet})
previews.append(doc_metadata)
observation = {
"success": bool(action_result.success),
"resultLabel": action_result.resultLabel or "",
@ -778,6 +927,10 @@ class HandlingTasks:
t0 = time.time()
selection = await self.plan_select(context)
logger.info(f"React step {step}: Selected action: {selection}")
# Create user-friendly message BEFORE action execution
await self.createReactActionMessage(workflow, selection, step, state.max_steps, task_index, "before")
result = await self.act_execute(context, selection, task_step, workflow, step)
observation = self.observe_build(result)
# Attach deterministic label for clarity
@ -791,22 +944,9 @@ class HandlingTasks:
"type": "info"
})
last_review_dict = decision
# Simple messaging per iteration
msg = {
"workflowId": workflow.id,
"role": "assistant",
"message": f"🔁 Step {step}/{state.max_steps}: {selection.get('action',{}).get('method','')}.{selection.get('action',{}).get('name','')}{'' if result.success else ''}",
"status": "step",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": self.services.utils.getUtcTimestamp(),
"documentsLabel": observation.get('resultLabel'),
"documents": [],
"roundNumber": workflow.currentRound,
"taskNumber": task_index,
"actionNumber": step,
"actionProgress": "success" if result.success else "fail"
}
self.services.interfaceDbChat.createMessage(msg)
# Create user-friendly message AFTER action execution
await self.createReactActionMessage(workflow, selection, step, state.max_steps, task_index, "after", result, observation)
except Exception as e:
logger.error(f"React step {step} error: {e}")
break
@ -1456,7 +1596,7 @@ class HandlingTasks:
)
result_label = action.execResultLabel
# Trace action result (without document data)
# Trace action result with full document metadata
action_result_trace = {
"method": action.execMethod,
"action": action.execAction,
@ -1465,6 +1605,25 @@ class HandlingTasks:
"resultLabel": result_label,
"documentsCount": len(result.documents) if result.documents else 0
}
# Add full document metadata if documents exist
if result.documents:
action_result_trace["documents"] = []
for doc in result.documents:
doc_metadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),
"modified": getattr(doc, 'modified', 'Unknown'),
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
"documentId": getattr(doc, 'documentId', 'Unknown'),
"reference": getattr(doc, 'reference', 'Unknown')
}
# Remove 'Unknown' values to keep it clean
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
action_result_trace["documents"].append(doc_metadata)
self.writeTraceLog("Action Result", action_result_trace)
# Process documents from the action result
@ -1572,12 +1731,15 @@ class HandlingTasks:
# Create a more meaningful message that includes task context
task_objective = task_step.objective if task_step else 'Unknown task'
# Use passed parameters first, fallback to workflow context
current_round = workflow_context.get('currentRound', 0)
current_task = task_index if task_index is not None else workflow_context.get('currentTask', 0)
# Extract round, task, and action numbers from result_label first, then fallback to workflow context
current_round = self._extractRoundNumberFromLabel(result_label) if result_label else workflow_context.get('currentRound', 0)
current_task = self._extractTaskNumberFromLabel(result_label) if result_label else (task_index if task_index is not None else workflow_context.get('currentTask', 0))
total_tasks = workflow_stats.get('totalTasks', 0)
current_action = action_index if action_index is not None else workflow_context.get('currentAction', 0)
current_action = self._extractActionNumberFromLabel(result_label) if result_label else (action_index if action_index is not None else workflow_context.get('currentAction', 0))
total_actions = total_actions if total_actions is not None else workflow_stats.get('totalActions', 0)
# Debug logging for round number extraction
logger.info(f"Action message round number extraction: result_label='{result_label}', extracted_round={current_round}, workflow_round={workflow_context.get('currentRound', 0)}")
# Build a user-friendly message based on success/failure
if result.success:
@ -1687,6 +1849,42 @@ class HandlingTasks:
logger.error(f"Error validating task plan: {str(e)}")
return False
def _extractRoundNumberFromLabel(self, label: str) -> int:
"""Extract round number from a document label like 'round1_task1_action1_diagram_analysis'"""
try:
if not label or not isinstance(label, str):
return 0
# Parse label format: round{round}_task{task}_action{action}_{context}
if label.startswith('round'):
round_part = label.split('_')[0] # Get 'round1' part
if round_part.startswith('round'):
round_number = round_part[5:] # Remove 'round' prefix
return int(round_number)
return 0
except Exception as e:
logger.warning(f"Could not extract round number from label '{label}': {str(e)}")
return 0
def _extractTaskNumberFromLabel(self, label: str) -> int:
"""Extract task number from a document label like 'round1_task1_action1_diagram_analysis'"""
try:
if not label or not isinstance(label, str):
return 0
# Parse label format: round{round}_task{task}_action{action}_{context}
if '_task' in label:
task_part = label.split('_task')[1]
if task_part and '_' in task_part:
task_number = task_part.split('_')[0]
return int(task_number)
return 0
except Exception as e:
logger.warning(f"Could not extract task number from label '{label}': {str(e)}")
return 0
def _extractActionNumberFromLabel(self, label: str) -> int:
"""Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
try:
@ -1941,12 +2139,14 @@ class HandlingTasks:
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
trace_entry = f"[{timestamp}] {contextText}\n"
# Add data if provided
# Add data if provided - show full content without truncation
if data is not None:
if isinstance(data, (dict, list)):
import json
trace_entry += f"Data: {json.dumps(data, indent=2, default=str)}\n"
# Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
trace_entry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
else:
# For string data, show full content without truncation
trace_entry += f"Data: {str(data)}\n"
trace_entry += "-" * 80 + "\n\n"

View file

@ -786,7 +786,7 @@ EXAMPLES OF GOOD ACTIONS:
4. Comprehensive summary report with user message:
{{
"method": "document",
"action": "generateReport",
"action": "generate",
"parameters": {{
"documentList": ["docList:msg_456:candidate_analysis_results"],
"title": "Comprehensive Candidate Evaluation Report"
@ -812,7 +812,7 @@ EXAMPLES OF GOOD ACTIONS:
}},
{{
"method": "document",
"action": "generateReport",
"action": "generate",
"parameters": {{
"documentList": ["round{current_round}_task{current_task}_action1_extracted_data"],
"title": "Report"

View file

@ -17,120 +17,42 @@ from modules.workflows.processing.promptFactory import (
def createTaskPlanningPromptTemplate() -> str:
"""Create task planning prompt template with placeholders."""
return """You are a task planning AI that analyzes user requests and creates structured, self-contained task plans with user-friendly feedback messages.
return """You are a task planning AI that breaks down user requests into logical, executable task steps.
USER REQUEST: {{KEY:USER_PROMPT}}
USER REQUEST:
{{KEY:USER_PROMPT}}
AVAILABLE DOCUMENTS: {{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE DOCUMENTS:
{{KEY:AVAILABLE_DOCUMENTS}}
PREVIOUS WORKFLOW ROUNDS CONTEXT:
PREVIOUS WORKFLOW ROUNDS:
{{KEY:WORKFLOW_HISTORY}}
INSTRUCTIONS:
1. Analyze the user request, available documents, and previous workflow rounds context
2. If the user request appears to be a follow-up (like "try again", "versuche es nochmals", "retry", etc.),
use the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what the user wants to retry or continue
3. Group related topics and sequential steps into single, comprehensive tasks
4. Focus on business outcomes, not technical operations
5. Make each task self-contained: clearly state what to do and what outputs are expected
6. Ensure proper handover between tasks (later actions will use your task outputs)
7. Detect the language of the user request and include it in languageUserDetected
8. Generate user-friendly messages for each task in the user's request language
9. Return a JSON object with the exact structure shown below
TASK GROUPING PRINCIPLES:
- COMBINE RELATED TOPICS: Group related subjects, sequential steps, or workflow-structured activities into single tasks
- SEQUENTIAL WORKFLOWS: If the user says "first do this, then that, then that" create ONE task that handles the entire sequence
- SIMILAR CONTENT: If multiple items deal with the same subject matter combine into ONE comprehensive task
- ONLY SPLIT WHEN DIFFERENT: Create separate tasks ONLY when the user explicitly wants different, independent things
EXAMPLES OF GOOD TASK GROUPING:
COMBINE INTO ONE TASK:
- "Analyze the documents, extract key insights, and create a summary report" ONE task: "Analyze documents and create comprehensive summary report"
- "First check my emails, then respond to urgent ones, then organize my inbox" ONE task: "Process and organize email inbox with priority responses"
- "Review the budget, analyze spending patterns, and suggest cost-cutting measures" ONE task: "Comprehensive budget analysis with optimization recommendations"
- "Create a business strategy, develop marketing plan, and prepare presentation" ONE task: "Develop complete business strategy with marketing plan and presentation"
SPLIT INTO MULTIPLE TASKS:
- "Create a business strategy for Q4" AND "Check my emails for messages from my assistant" TWO separate tasks (different subjects)
- "Analyze customer feedback" AND "Prepare quarterly financial report" TWO separate tasks (different business areas)
- "Review project timeline" AND "Update employee handbook" TWO separate tasks (unrelated activities)
TASK PLANNING PRINCIPLES:
- Break down complex requests into logical, sequential steps
- Focus on business value and outcomes
- Keep tasks at a meaningful level of abstraction (not implementation details)
- Each task should produce results that can be used by subsequent tasks
- Ensure clear dependencies and handovers between tasks
- Provide clear, actionable user messages in the user's request language
- Group related activities to minimize task fragmentation
- Only create multiple tasks when dealing with truly different, independent objectives
- Make task objectives action-oriented and specific (include scope, data sources to consider, and output intent at high level)
- Write success_criteria as measurable acceptance criteria focusing on outputs (what artifacts or insights will exist and how they are validated)
FOLLOW-UP PROMPT HANDLING:
- If the user request is a follow-up (e.g., "try again", "versuche es nochmals", "retry", "continue", "proceed"),
analyze the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what failed or was incomplete
- Use the previous round's user requests and task outcomes to determine what the user wants to retry
- If previous rounds failed due to missing documents, and documents are now available,
create tasks that use the newly available documents to accomplish the original request
- Maintain the same business objective from previous rounds but adapt to current available resources
SPECIFIC SCENARIO HANDLING:
- If previous round failed with "documents missing" error and current round has documents available,
the user likely wants to retry the same operation with the newly provided documents
- Example: Previous round "speichere mir die 3 dokumente im sharepoint unter xxx" failed due to missing documents,
current round "versuche es nochmals" with documents should retry the SharePoint save operation
- Always check if the current request is a retry by looking for retry keywords and previous round context
TASK PLANNING RULES:
- COMBINE related activities into single tasks to avoid fragmentation
- Focus on business value and meaningful outcomes
- Keep tasks at appropriate abstraction level (not implementation details)
- Each task should produce usable results for subsequent tasks
- If retry request, analyze previous rounds to understand what failed
REQUIRED JSON STRUCTURE:
{{
"overview": "Brief description of the overall plan",
"languageUserDetected": "en", // Language code detected from user request (en, de, fr, it, es, etc.)
"userMessage": "User-friendly message explaining the task plan in user's request language",
"languageUserDetected": "en",
"userMessage": "User-friendly message explaining the task plan",
"tasks": [
{{
"id": "task_1",
"objective": "Clear business objective this task accomplishes (combining related activities)",
"dependencies": ["task_0"], // IDs of tasks that must complete first
"success_criteria": ["criteria1", "criteria2"],
"objective": "Clear business objective combining related activities",
"dependencies": ["task_0"],
"success_criteria": ["measurable criteria 1", "measurable criteria 2"],
"estimated_complexity": "low|medium|high",
"userMessage": "User-friendly message explaining what this task will accomplish in user's request language"
"userMessage": "What this task will accomplish"
}}
]
}}
EXAMPLES OF GOOD TASK OBJECTIVES (COMBINING RELATED ACTIVITIES):
- "Analyze documents and extract key insights for business communication"
- "Create professional business communication incorporating analyzed information"
- "Execute business communication using specified channels and document outcomes"
- "Develop comprehensive business strategy with implementation roadmap and success metrics"
EXAMPLES OF WELL-FORMED SUCCESS CRITERIA (OUTPUT-FOCUSED):
- "Deliver a prioritized list of 1020 candidates with justification"
- "Provide a structured JSON with fields: company, ticker, rationale, metrics"
- "Produce a presentation outline with 5 sections and bullet points per section"
- "Include data sources and date stamped references for traceability"
EXAMPLES OF GOOD SUCCESS CRITERIA:
- "Key insights extracted and ready for business use"
- "Professional communication created with clear business value"
- "Business communication successfully delivered and documented"
- "All outcomes properly documented and accessible"
EXAMPLES OF BAD TASK OBJECTIVES:
- "Read the PDF file" (too granular - should be "Analyze document content")
- "Convert data to CSV" (implementation detail - should be "Structure data for analysis")
- "Send email" (too specific - should be "Deliver business communication")
LANGUAGE DETECTION:
- Analyze the user request text to identify the language
- Use standard language codes: en (English), de (German), fr (French), it (Italian), es (Spanish), etc.
- If the language cannot be determined, use "en" as default
- Include the detected language in the languageUserDetected field
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
RESPONSE: Return ONLY the JSON object."""
def createActionDefinitionPromptTemplate() -> str:
@ -147,59 +69,34 @@ AVAILABLE METHODS: {{KEY:AVAILABLE_METHODS}}
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
INSTRUCTIONS:
- Generate actions to accomplish this task step using available documents, connections, and previous results
- Use docItem for single documents and docList for groups of documents as shown in AVAILABLE DOCUMENTS
- If there are no documents available, do not create document extraction actions. Select methods strictly based on the task objective; choose web actions when external information is required. Otherwise, generate a status/information report requesting needed inputs.
- Always pass documentList as a LIST of references (docItem and/or docList) - this list CANNOT be empty for document extraction actions
- For referencing documents from previous actions, use the format "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}"
- Each action must be self-contained and executable with the provided parameters
- For document extraction, ensure prompts are specific and detailed
- Include validation steps in extraction prompts where relevant
- If this is a retry, learn from previous failures and improve the approach
- Address specific issues mentioned in previous review feedback
- When specifying expectedDocumentFormats, ensure AI prompts explicitly request pure data without markdown formatting
- Generate user-friendly messages for each action in the user's language
ACTION SELECTION RULES:
- Use document.generateReport for creating formatted documents (Word, PDF, Excel, etc.)
- Use ai.process for text analysis, Q&A, research, brainstorming (plain text only)
- Use web.search for external information gathering
- Use document.extract for analyzing existing documents
- If no documents available, use web actions or create status reports
PARAMETER COMPLETENESS REQUIREMENTS:
- Every parameter must contain all information needed to execute without implicit context
- Use explicit, concrete values (units, languages, formats, limits, date ranges, IDs) when applicable
- For search-like parameters (if any method requires a query), derive the query from the task objective AND ALL success criteria dimensions. Include:
- Key entities and domain terms from the objective
- All distinct facets from success_criteria (e.g., valuation AND AI potential AND know-how needs)
- Geography/localization (e.g., Schweiz/Suisse/Switzerland; use multilingual synonyms when helpful)
- Time horizon or recency if relevant
- Boolean operators and synonyms to increase precision (use AND/OR, quotes, parentheses)
- Avoid single-topic or generic queries focused only on one facet (e.g., pure valuation metrics)
- When facets are truly distinct, create 13 focused actions with precise queries rather than one vague catch-all
- Document list parameters must reference only existing labels or prior action outputs; do not reference future outputs
DOCUMENT ROUTING GUIDANCE:
- Each action should produce documents with a clear resultLabel for routing
- Use consistent naming: "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}"
- Ensure document flow: Action A produces documents that Action B can consume
- Document labels should be descriptive of content, not just "results" or "output"
- Consider what subsequent actions will need and structure outputs accordingly
PARAMETER REQUIREMENTS:
- documentList must be a LIST of references from AVAILABLE DOCUMENTS
- Use specific, detailed prompts for document actions
- Include all necessary parameters for execution
- Reference previous action outputs using: "round{current_round}_task{current_task}_action{action_number}_{label}"
REQUIRED JSON STRUCTURE:
{{
"actions": [
{{
"method": "method_name",
"action": "action_name",
"action": "action_name",
"parameters": {{}},
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
"description": "Brief description of what this action accomplishes",
"userMessage": "User-friendly message explaining what this action will do in user's language"
"description": "What this action accomplishes",
"userMessage": "User-friendly message in {{KEY:USER_LANGUAGE}}"
}}
]
}}
IMPORTANT NOTES:
- Respond with ONLY the JSON object. Do not include any explanatory text.
- Before creating any document extraction action, verify that AVAILABLE DOCUMENTS contains actual document references.
- Always include a user-friendly userMessage for each action in the user's language.
- The examples above show German user messages as reference - adapt the language to match the USER LANGUAGE specified above."""
RESPONSE: Return ONLY the JSON object."""
def createActionSelectionPromptTemplate() -> str:
@ -228,18 +125,36 @@ def createActionParameterPromptTemplate() -> str:
return """Provide only the required parameters for this action.
SELECTED ACTION: {{KEY:SELECTED_ACTION}}
ACTION SIGNATURE: {{KEY:ACTION_SIGNATURE}}
OBJECTIVE: {{KEY:USER_PROMPT}}
AVAILABLE DOCUMENTS: {{KEY:AVAILABLE_DOCUMENTS}}
AVAILABLE CONNECTIONS: {{KEY:AVAILABLE_CONNECTIONS}}
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
RULES:
- Return only the parameters object.
- Include user language if relevant.
- Reference documents only by exact labels available.
- Avoid unnecessary fields; host applies defaults.
- Use the ACTION SIGNATURE above to understand what parameters are required.
- Convert the objective into appropriate parameter values as needed.
DOCUMENT REFERENCE TYPES:
- docItem: Reference to a single document (e.g., "docItem:uuid:filename.pdf")
- docList: Reference to a group of documents (e.g., "docList:msg_123:AnalysisResults")
- round{{round_number}}_task{{task_number}}_action{{action_number}}_{{label}}: Reference to resulting document list from previous action
CONNECTION REFERENCE TYPES:
- Use exact connection references from AVAILABLE CONNECTIONS (e.g., "conn_microsoft_123", "conn_sharepoint_456")
CRITICAL RULES:
- ONLY use exact document labels listed in AVAILABLE DOCUMENTS above
- ONLY use exact connection references from AVAILABLE CONNECTIONS
- For documentList parameters: Use docList references when you need multiple documents
- For documentList parameters: Use docItem references when you need specific documents
- For connectionReference parameters: Use the exact connection reference from AVAILABLE CONNECTIONS
- Return only the parameters object as JSON
- Include user language if relevant
- Avoid unnecessary fields; host applies defaults
- Use the ACTION SIGNATURE above to understand what parameters are required
- Convert the objective into appropriate parameter values as needed
RESPONSE FORMAT (JSON only):
{{"parameters":{{}}}}"""
@ -343,45 +258,32 @@ def extractAvailableMethods(service) -> str:
method_instance = methods.get(method, {}).get('instance') if methods else None
for action, sig in actions:
# Parse the signature to extract parameters
if '(' in sig and ')' in sig:
# Extract parameters from signature
params_start = sig.find('(')
params_end = sig.find(')')
params_str = sig[params_start+1:params_end]
# Parse parameters directly from the docstring - much simpler and more reliable!
parameters = []
# Get the actual function's docstring
if method_instance and hasattr(method_instance, action):
func = getattr(method_instance, action)
if hasattr(func, '__doc__') and func.__doc__:
docstring = func.__doc__
# Parse Parameters section from docstring
lines = docstring.split('\n')
in_parameters = False
for i, line in enumerate(lines):
original_line = line
line = line.strip()
if line.startswith('Parameters:'):
in_parameters = True
continue
elif line.startswith('Returns:') or line.startswith('Raises:') or line.startswith('Note:') or line.startswith('Example:') or line.startswith('Examples:'):
in_parameters = False
continue
elif in_parameters and line and not line.startswith('-') and not line.startswith('*'):
# This is a parameter line
if ':' in line:
param_name = line.split(':')[0].strip()
param_desc = line.split(':', 1)[1].strip()
parameters.append(f"{param_name}: {param_desc}")
available_methods_json[method][action] = parameters
else:
available_methods_json[method][action] = []
# Get the main action description (not parameters) for Step 1 action selection
action_description = ""
# Get the actual function's docstring
if method_instance and hasattr(method_instance, action):
func = getattr(method_instance, action)
if hasattr(func, '__doc__') and func.__doc__:
docstring = func.__doc__
# Extract main description (everything before "Parameters:")
lines = docstring.split('\n')
description_lines = []
for line in lines:
line = line.strip()
if line.startswith('Parameters:'):
break
if line and not line.startswith('@'):
description_lines.append(line)
action_description = ' '.join(description_lines).strip()
# If no description found, create a basic one
if not action_description:
action_description = f"Execute {method}.{action} action"
available_methods_json[method][action] = action_description
return json.dumps(available_methods_json, indent=2, ensure_ascii=False)
@ -392,7 +294,7 @@ def extractUserLanguage(service) -> str:
def extractReviewContent(context) -> str:
"""Extract review content from context."""
"""Extract review content from context with full document metadata."""
if hasattr(context, 'action_results') and context.action_results:
# Build result summary
result_summary = ""
@ -405,14 +307,39 @@ def extractReviewContent(context) -> str:
if result.documents:
result_summary += f" Documents: {len(result.documents)} document(s)\n"
for doc in result.documents:
doc_name = getattr(doc, 'documentName', 'Unknown')
doc_mime = getattr(doc, 'mimeType', 'Unknown')
result_summary += f" - {doc_name} ({doc_mime})\n"
# Extract all available metadata without content
doc_metadata = {
"name": getattr(doc, 'documentName', 'Unknown'),
"mimeType": getattr(doc, 'mimeType', 'Unknown'),
"size": getattr(doc, 'size', 'Unknown'),
"created": getattr(doc, 'created', 'Unknown'),
"modified": getattr(doc, 'modified', 'Unknown'),
"typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
"documentId": getattr(doc, 'documentId', 'Unknown'),
"reference": getattr(doc, 'reference', 'Unknown')
}
# Remove 'Unknown' values to keep it clean
doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
result_summary += f" - {json.dumps(doc_metadata, indent=6, ensure_ascii=False)}\n"
else:
result_summary += f" Documents: None\n"
return result_summary
elif hasattr(context, 'observation') and context.observation:
return json.dumps(context.observation, ensure_ascii=False)
# For observation data, show full content but handle documents specially
if isinstance(context.observation, dict):
# Create a copy to modify
obs_copy = context.observation.copy()
# If there are previews with documents, show only metadata
if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
for preview in obs_copy['previews']:
if isinstance(preview, dict) and 'snippet' in preview:
# Replace snippet with metadata indicator
preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
else:
return json.dumps(context.observation, ensure_ascii=False)
else:
return "No review content available"

View file

@ -0,0 +1,32 @@
To calculate the first 1000 prime numbers efficiently, we can use the Sieve of Eratosthenes algorithm. This algorithm is efficient for finding all prime numbers up to a specified integer. Here's how you can implement it and extract the first 1000 prime numbers:
```python
def sieve_of_eratosthenes(limit):
is_prime = [True] * (limit + 1)
p = 2
while (p * p <= limit):
if (is_prime[p] == True):
for i in range(p * p, limit + 1, p):
is_prime[i] = False
p += 1
prime_numbers = [p for p in range(2, limit) if is_prime[p]]
return prime_numbers
def first_n_primes(n):
limit = 12500 # Initial guess for the upper limit
primes = sieve_of_eratosthenes(limit)
while len(primes) < n:
limit *= 2
primes = sieve_of_eratosthenes(limit)
return primes[:n]
first_1000_primes = first_n_primes(1000)
```
Now, let's format the output in the specified JSON format:
```json
{
"documents": [
{
"data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 336

View file

@ -0,0 +1,32 @@
To calculate the first 1000 prime numbers efficiently, we can use the Sieve of Eratosthenes algorithm. This algorithm is efficient for finding all prime numbers up to a specified integer. Here's how you can implement it and extract the first 1000 prime numbers:
```python
def sieve_of_eratosthenes(limit):
is_prime = [True] * (limit + 1)
p = 2
while (p * p <= limit):
if (is_prime[p] == True):
for i in range(p * p, limit + 1, p):
is_prime[i] = False
p += 1
prime_numbers = [p for p in range(2, limit) if is_prime[p]]
return prime_numbers
def first_n_primes(n):
limit = 12500 # Initial guess for the upper limit
primes = sieve_of_eratosthenes(limit)
while len(primes) < n:
limit *= 2
primes = sieve_of_eratosthenes(limit)
return primes[:n]
first_1000_primes = first_n_primes(1000)
```
Now, let's format the output in the specified JSON format:
```json
{
"documents": [
{
"data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 336

View file

@ -0,0 +1,2 @@
documents: 1
doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain

View file

@ -0,0 +1,3 @@
Prime Numbers:
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997

View file

@ -0,0 +1,11 @@
```json
{
"documents": [
{
"data": "Prime Numbers:\n\n2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997",
"mimeType": "text/plain",
"comment": "This document contains a list of prime numbers up to 1000."
}
]
}
```

View file

@ -0,0 +1,2 @@
documents: 1
doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain

View file

@ -0,0 +1,3 @@
Prime Numbers:
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997

View file

@ -0,0 +1,11 @@
```json
{
"documents": [
{
"data": "Prime Numbers:\n\n2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997",
"mimeType": "text/plain",
"comment": "A list of prime numbers up to 1000."
}
]
}
```

View file

@ -0,0 +1,2 @@
documents: 1
doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain

View file

@ -0,0 +1,19 @@
{
"id": "msg_b5c1b3f5-6ba3-4927-ade9-902afb683490",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
"role": "user",
"status": "first",
"sequenceNr": 1,
"publishedAt": 1759522071.7880292,
"roundNumber": 1,
"taskNumber": 0,
"actionNumber": 0,
"documentsLabel": "round1_task0_action0_context",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1 @@
Gib mir die ersten 1000 Primzahlen in einem word dokument aus

View file

@ -0,0 +1,19 @@
{
"id": "msg_4079e23b-a1bc-4f25-9304-66e7a00d3143",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "🚀 **Task 1/2**\n\n💬 Berechne die ersten 1000 Primzahlen mit einem effizienten Algorithmus",
"role": "assistant",
"status": "step",
"sequenceNr": 3,
"publishedAt": 1759522077.4175804,
"roundNumber": 1,
"taskNumber": 1,
"actionNumber": 0,
"documentsLabel": "task_1_start",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,3 @@
🚀 **Task 1/2**
💬 Berechne die ersten 1000 Primzahlen mit einem effizienten Algorithmus

View file

@ -0,0 +1,19 @@
{
"id": "msg_b476c20d-37e0-4758-b9bc-dfac28d7aca0",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "**Action 1/1 (ai.process)**\n\n✅ Calculate first 1000 prime numbers using efficient algorithm\n\n",
"role": "assistant",
"status": "step",
"sequenceNr": 4,
"publishedAt": 1759522121.3918097,
"roundNumber": 1,
"taskNumber": 1,
"actionNumber": 1,
"documentsLabel": "round1_task1_action1_results",
"actionId": "action_aaacb272-b7b6-494b-9fea-db17d710bc8a",
"actionMethod": "ai",
"actionName": "process",
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
**Action 1/1 (ai.process)**
✅ Calculate first 1000 prime numbers using efficient algorithm

View file

@ -0,0 +1,12 @@
{
"id": "d19c5aba-156b-4aab-b8d2-e5701d5f01b8",
"messageId": "msg_b476c20d-37e0-4758-b9bc-dfac28d7aca0",
"fileId": "e36734d2-da8d-4423-b7a3-b44fa2c93f30",
"fileName": "ai_result_r0t0a0_39.txt",
"fileSize": 3638,
"mimeType": "text/plain",
"roundNumber": 1,
"taskNumber": 1,
"actionNumber": 1,
"actionId": "action_aaacb272-b7b6-494b-9fea-db17d710bc8a"
}

View file

@ -0,0 +1,19 @@
{
"id": "msg_d50a37c8-cea6-40b9-b317-6b4eb339c619",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "🔁 Step 1/5: ai.process → ✅",
"role": "assistant",
"status": "step",
"sequenceNr": 5,
"publishedAt": 1759522122.9830983,
"roundNumber": 1,
"taskNumber": 1,
"actionNumber": 1,
"documentsLabel": "round1_task1_action1_results",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1 @@
🔁 Step 1/5: ai.process → ✅

View file

@ -0,0 +1,19 @@
{
"id": "msg_d14a05bd-90ca-4d3b-a6b3-03244e8e4e18",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "🚀 **Task 2/2**\n\n💬 Erstelle ein Word-Dokument und formatiere die Primzahlen übersichtlich",
"role": "assistant",
"status": "step",
"sequenceNr": 6,
"publishedAt": 1759522123.165301,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 0,
"documentsLabel": "task_2_start",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,3 @@
🚀 **Task 2/2**
💬 Erstelle ein Word-Dokument und formatiere die Primzahlen übersichtlich

View file

@ -0,0 +1,19 @@
{
"id": "msg_08fa0763-a33c-4ad2-81f8-e943354dc4e5",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "**Action 1/1 (ai.process)**\n\n✅ Create and format Word document with prime numbers\n\n",
"role": "assistant",
"status": "step",
"sequenceNr": 7,
"publishedAt": 1759522133.1566048,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 1,
"documentsLabel": "round1_task2_action1_results",
"actionId": "action_ea265db5-c27e-43bc-8667-182369622318",
"actionMethod": "ai",
"actionName": "process",
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
**Action 1/1 (ai.process)**
✅ Create and format Word document with prime numbers

View file

@ -0,0 +1,12 @@
{
"id": "bf4e69a8-fcd6-42a4-862f-de2dbc907cb4",
"messageId": "msg_08fa0763-a33c-4ad2-81f8-e943354dc4e5",
"fileId": "a3901b8a-8a59-4162-94f7-3151f039b014",
"fileName": "ai_result_r0t0a0_40.txt",
"fileSize": 825,
"mimeType": "text/plain",
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 1,
"actionId": "action_ea265db5-c27e-43bc-8667-182369622318"
}

View file

@ -0,0 +1,19 @@
{
"id": "msg_225c63a8-49b7-4c66-93cf-3944e2219b5b",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "🔁 Step 1/5: ai.process → ✅",
"role": "assistant",
"status": "step",
"sequenceNr": 8,
"publishedAt": 1759522134.8963306,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 1,
"documentsLabel": "round1_task2_action1_results",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1 @@
🔁 Step 1/5: ai.process → ✅

View file

@ -0,0 +1,19 @@
{
"id": "msg_cf96022a-b7d8-48c9-9a97-208d1cd68f5f",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "**Action 2/1 (ai.process)**\n\n✅ Create and format Word document with prime numbers\n\n",
"role": "assistant",
"status": "step",
"sequenceNr": 9,
"publishedAt": 1759522144.608428,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 2,
"documentsLabel": "round1_task2_action2_results",
"actionId": "action_dc3e1666-c85f-46c1-a1a5-babbb4ac6688",
"actionMethod": "ai",
"actionName": "process",
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
**Action 2/1 (ai.process)**
✅ Create and format Word document with prime numbers

View file

@ -0,0 +1,12 @@
{
"id": "e2816428-e83e-4116-985a-6fe52622605d",
"messageId": "msg_cf96022a-b7d8-48c9-9a97-208d1cd68f5f",
"fileId": "9ec61115-e379-4cde-992e-b064eccb16a5",
"fileName": "ai_result_r0t0a0_41.txt",
"fileSize": 825,
"mimeType": "text/plain",
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 2,
"actionId": "action_dc3e1666-c85f-46c1-a1a5-babbb4ac6688"
}

View file

@ -0,0 +1,19 @@
{
"id": "msg_b9f347a2-a282-4d06-a1d5-cc06e45e3a63",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "🔁 Step 2/5: ai.process → ✅",
"role": "assistant",
"status": "step",
"sequenceNr": 10,
"publishedAt": 1759522146.7869656,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 2,
"documentsLabel": "round1_task2_action2_results",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1 @@
🔁 Step 2/5: ai.process → ✅

View file

@ -0,0 +1,19 @@
{
"id": "msg_5a285839-6b7c-48a0-811f-c35439afa498",
"workflowId": "4886a461-687b-4980-853d-91251f9424ac",
"parentMessageId": null,
"message": "Workflow completed.\n\nProcessed 1 user inputs and generated 6 responses.\nWorkflow status: running",
"role": "assistant",
"status": "last",
"sequenceNr": 11,
"publishedAt": 1759522146.9623349,
"roundNumber": 1,
"taskNumber": 0,
"actionNumber": 0,
"documentsLabel": "workflow_feedback",
"actionId": null,
"actionMethod": null,
"actionName": null,
"success": null,
"documents": []
}

View file

@ -0,0 +1,4 @@
Workflow completed.
Processed 1 user inputs and generated 6 responses.
Workflow status: running