finished email nodes

This commit is contained in:
idittrich-valueon 2026-03-22 18:20:31 +01:00
parent 04305c5f95
commit f808d63c25
47 changed files with 3077 additions and 286 deletions

4
app.py
View file

@ -21,6 +21,8 @@ from datetime import datetime
from modules.shared.configuration import APP_CONFIG from modules.shared.configuration import APP_CONFIG
from modules.shared.eventManagement import eventManager from modules.shared.eventManagement import eventManager
from modules.workflows.automation import subAutomationSchedule from modules.workflows.automation import subAutomationSchedule
from modules.features.automation2.emailPoller import start as startAutomation2EmailPoller
from modules.features.automation2.emailPoller import stop as stopAutomation2EmailPoller
from modules.interfaces.interfaceDbApp import getRootInterface from modules.interfaces.interfaceDbApp import getRootInterface
from modules.system.registry import loadFeatureMainModules from modules.system.registry import loadFeatureMainModules
@ -354,6 +356,7 @@ async def lifespan(app: FastAPI):
# --- Init Managers --- # --- Init Managers ---
subAutomationSchedule.start(eventUser) # Automation scheduler subAutomationSchedule.start(eventUser) # Automation scheduler
# Automation2 email poller: started on-demand when a run pauses for email.checkEmail
eventManager.start() eventManager.start()
# Register audit log cleanup scheduler # Register audit log cleanup scheduler
@ -382,6 +385,7 @@ async def lifespan(app: FastAPI):
yield yield
# --- Stop Managers --- # --- Stop Managers ---
stopAutomation2EmailPoller(eventUser) # Automation2 email poller (no-op if not running)
eventManager.stop() eventManager.stop()
subAutomationSchedule.stop(eventUser) # Automation scheduler subAutomationSchedule.stop(eventUser) # Automation scheduler

View file

@ -0,0 +1,268 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
Background email poller for automation2.
Checks paused runs waiting for email (email.checkEmail node) and resumes when a new matching email arrives.
"""
import asyncio
import json
import logging
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, Optional
logger = logging.getLogger(__name__)
# Job ID for scheduler
POLLER_JOB_ID = "automation2_email_poller"
POLL_INTERVAL_MINUTES = 2
async def _pollEmailWaits(eventUser) -> None:
"""
Poll for new emails for runs waiting on email.checkEmail.
Uses eventUser for DB access; loads owner user for each run to call readEmails.
Stops the poller when no runs are waiting.
"""
try:
from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface
from modules.features.automation2.mainAutomation2 import getAutomation2Services
from modules.workflows.automation2.executionEngine import executeGraph
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
from modules.interfaces.interfaceDbApp import getRootInterface
root = getRootInterface()
if not root:
logger.warning("Email poller: root interface not available")
return
# Use eventUser - getRunsWaitingForEmail queries by status only
a2 = getAutomation2Interface(eventUser, mandateId="", featureInstanceId="")
runs = a2.getRunsWaitingForEmail()
if not runs:
# No workflows waiting for email - stop the poller
stop(eventUser)
return
logger.info("Automation2 email poller: checking %d run(s) waiting for email", len(runs))
for run in runs:
run_id = run.get("id")
workflow_id = run.get("workflowId")
context = run.get("context") or {}
wait_config = context.get("waitConfig") or {}
node_id = run.get("currentNodeId") or context.get("waitConfig", {}).get("_nodeId")
owner_id = context.get("ownerId")
mandate_id = context.get("mandateId")
instance_id = context.get("instanceId")
last_checked = context.get("lastCheckedAt")
if not owner_id or not mandate_id or not instance_id or not workflow_id or not node_id:
logger.warning("Email wait run %s missing ownerId/mandateId/instanceId/workflowId/nodeId - skipping", run_id)
continue
# First poll: use pausedAt (or now - 5 min) as baseline so we don't miss emails
# that arrived between pause and first poll
if last_checked is None:
paused_at = context.get("pausedAt")
if paused_at:
baseline = paused_at
else:
# Fallback: look back 5 minutes for runs created before pausedAt existed
baseline = (datetime.now(timezone.utc) - timedelta(minutes=5)).strftime("%Y-%m-%dT%H:%M:%SZ")
last_checked = baseline
# Load owner user (root interface has broad access)
owner = root.getUser(owner_id) if hasattr(root, "getUser") else None
if not owner:
logger.warning("Email wait run %s: owner user %s not found", run_id, owner_id)
continue
# Get workflow (need scoped interface for mandate/instance)
a2_scoped = getAutomation2Interface(eventUser, mandateId=mandate_id, featureInstanceId=instance_id)
wf = a2_scoped.getWorkflow(workflow_id)
if not wf or not wf.get("graph"):
logger.warning("Email wait run %s: workflow %s not found or has no graph", run_id, workflow_id)
continue
# Only process runs paused at email.checkEmail searchEmail never waits, it searches all immediately
nodes = (wf.get("graph") or {}).get("nodes") or []
paused_node = next((n for n in nodes if n.get("id") == node_id), None)
if paused_node and paused_node.get("type") == "email.searchEmail":
logger.warning("Email wait run %s: paused at email.searchEmail (should not wait) skipping", run_id)
continue
services = getAutomation2Services(owner, mandateId=mandate_id, featureInstanceId=instance_id)
discoverMethods(services)
# Build filter with receivedDateTime only emails received at or after baseline (new emails)
base_filter = wait_config.get("filter") or ""
dt_filter = f"receivedDateTime ge {last_checked}"
combined_filter = f"({base_filter}) and {dt_filter}" if base_filter else dt_filter
logger.debug("Email wait run %s: fetch filter (new emails only) %s", run_id, combined_filter)
from modules.workflows.processing.core.actionExecutor import ActionExecutor
executor = ActionExecutor(services)
params = {
"connectionReference": wait_config.get("connectionReference"),
"folder": wait_config.get("folder", "Inbox"),
"limit": min(int(wait_config.get("limit", 10)), 50),
"filter": combined_filter,
}
try:
result = await executor.executeAction("outlook", "readEmails", params)
except Exception as e:
logger.warning("Email wait run %s: readEmails failed: %s", run_id, e)
continue
# readEmails always returns 1 document (JSON wrapper); check actual email count
email_count = 0
if result and result.documents:
doc = result.documents[0]
meta = getattr(doc, "validationMetadata", None)
if not meta and isinstance(doc, dict):
meta = doc.get("validationMetadata")
if meta and isinstance(meta, dict):
email_count = int(meta.get("emailCount", 0))
else:
try:
data = json.loads(getattr(doc, "documentData", "") or "{}")
email_count = len(data.get("emails", {}).get("emails", []))
except Exception:
pass
if not result or not result.success or email_count == 0:
# No new emails - persist lastCheckedAt so next poll uses this as baseline
now_iso = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
ctx = dict(context)
ctx["lastCheckedAt"] = now_iso
a2_scoped.updateRun(run_id, context=ctx)
continue
# Only pass NEW emails (receivedDateTime >= last_checked) filter server-side as safeguard
doc = result.documents[0]
raw_data = json.loads(getattr(doc, "documentData", "") or "{}")
emails_data = raw_data.get("emails", {})
all_emails = emails_data.get("emails", [])
new_emails = [
e for e in all_emails
if (e.get("receivedDateTime") or "") >= last_checked
]
if not new_emails:
now_iso = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
ctx = dict(context)
ctx["lastCheckedAt"] = now_iso
a2_scoped.updateRun(run_id, context=ctx)
continue
# Rebuild document with only new emails for downstream nodes
result_data = dict(raw_data)
result_data["emails"] = dict(emails_data)
result_data["emails"]["emails"] = new_emails
result_data["emails"]["count"] = len(new_emails)
from modules.datamodels.datamodelChat import ActionDocument
filtered_doc = ActionDocument(
documentName=getattr(doc, "documentName", "outlook_emails.json"),
documentData=json.dumps(result_data, indent=2),
mimeType=getattr(doc, "mimeType", "application/json"),
validationMetadata={**(getattr(doc, "validationMetadata") or {}), "emailCount": len(new_emails)},
)
# Build node output in same format as ActionNodeExecutor for readEmails
node_output = {
"success": result.success,
"error": result.error,
"documents": [filtered_doc.model_dump() if hasattr(filtered_doc, "model_dump") else filtered_doc],
"data": result.model_dump() if hasattr(result, "model_dump") else {"success": result.success, "error": result.error},
}
# Update lastCheckedAt before resume
now_iso = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
ctx = dict(context)
ctx["lastCheckedAt"] = now_iso
a2_scoped.updateRun(run_id, status="running", context=ctx)
node_outputs = dict(run.get("nodeOutputs") or {})
node_outputs[node_id] = node_output
logger.info("Email wait run %s: found new email, resuming from node %s", run_id, node_id)
resume_result = await executeGraph(
graph=wf["graph"],
services=services,
workflowId=workflow_id,
instanceId=instance_id,
userId=owner_id,
mandateId=mandate_id,
automation2_interface=a2_scoped,
initialNodeOutputs=node_outputs,
startAfterNodeId=node_id,
runId=run_id,
)
if resume_result.get("success"):
logger.info("Email wait run %s: completed successfully", run_id)
elif resume_result.get("paused"):
logger.info("Email wait run %s: paused again (e.g. human task)", run_id)
else:
logger.warning("Email wait run %s: failed: %s", run_id, resume_result.get("error"))
except Exception as e:
logger.exception("Email poller failed: %s", e)
def _runPollSync(ev_user):
"""Sync job for scheduler - runs async poll. Thread-safe for both main loop and worker threads."""
try:
try:
loop = asyncio.get_running_loop()
# Already in event loop - schedule, don't block
loop.create_task(_pollEmailWaits(ev_user))
except RuntimeError:
# No running loop (worker thread) - run in new loop
asyncio.run(_pollEmailWaits(ev_user))
except Exception as e:
logger.exception("Automation2 email poller job failed: %s", e)
def ensureRunning(eventUser) -> bool:
"""Start the poller if not already running. Called when a run pauses for email.checkEmail."""
return start(eventUser)
def start(eventUser) -> bool:
"""Register the email poller interval job."""
if not eventUser:
logger.warning("Automation2 email poller: no eventUser, not registering")
return False
try:
from modules.shared.eventManagement import eventManager
# Use sync wrapper - APScheduler may run jobs in thread pool where async doesn't work
job_func = lambda: _runPollSync(eventUser)
eventManager.registerInterval(
POLLER_JOB_ID,
job_func,
seconds=0,
minutes=POLL_INTERVAL_MINUTES,
hours=0,
)
logger.info("Automation2 email poller started (interval=%s min)", POLL_INTERVAL_MINUTES)
# Run once immediately so we don't wait 2 minutes for the first check
_runPollSync(eventUser)
return True
except Exception as e:
logger.error("Failed to register automation2 email poller: %s", e)
return False
def stop(eventUser) -> bool:
"""Remove the email poller job."""
try:
from modules.shared.eventManagement import eventManager
eventManager.remove(POLLER_JOB_ID)
logger.info("Automation2 email poller removed")
return True
except Exception as e:
logger.warning("Error removing automation2 email poller: %s", e)
return True

View file

@ -194,6 +194,24 @@ class Automation2Objects:
) )
return [dict(r) for r in records] if records else [] return [dict(r) for r in records] if records else []
def getRunsWaitingForEmail(self) -> List[Dict[str, Any]]:
"""Get all paused runs waiting for a new email (for background poller)."""
if not self.db._ensureTableExists(Automation2WorkflowRun):
return []
records = self.db.getRecordset(
Automation2WorkflowRun,
recordFilter={"status": "paused"},
)
if not records:
return []
result = []
for r in records:
rec = dict(r)
ctx = rec.get("context") or {}
if ctx.get("waitReason") == "email":
result.append(rec)
return result
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Human Tasks # Human Tasks
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------

View file

@ -29,9 +29,14 @@ UI_OBJECTS = [
"label": {"en": "Editor", "de": "Editor", "fr": "Éditeur"}, "label": {"en": "Editor", "de": "Editor", "fr": "Éditeur"},
"meta": {"area": "editor"} "meta": {"area": "editor"}
}, },
{
"objectKey": "ui.feature.automation2.workflows",
"label": {"en": "Workflows", "de": "Workflows", "fr": "Workflows"},
"meta": {"area": "workflows"}
},
{ {
"objectKey": "ui.feature.automation2.workflows-tasks", "objectKey": "ui.feature.automation2.workflows-tasks",
"label": {"en": "Workflows & Tasks", "de": "Workflows & Tasks", "fr": "Workflows et tâches"}, "label": {"en": "Tasks", "de": "Tasks", "fr": "Tâches"},
"meta": {"area": "tasks"} "meta": {"area": "tasks"}
}, },
] ]
@ -64,6 +69,7 @@ TEMPLATE_ROLES = [
}, },
"accessRules": [ "accessRules": [
{"context": "UI", "item": "ui.feature.automation2.editor", "view": True}, {"context": "UI", "item": "ui.feature.automation2.editor", "view": True},
{"context": "UI", "item": "ui.feature.automation2.workflows", "view": True},
{"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True}, {"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True},
{"context": "RESOURCE", "item": "resource.feature.automation2.dashboard", "view": True}, {"context": "RESOURCE", "item": "resource.feature.automation2.dashboard", "view": True},
{"context": "RESOURCE", "item": "resource.feature.automation2.node-types", "view": True}, {"context": "RESOURCE", "item": "resource.feature.automation2.node-types", "view": True},
@ -94,7 +100,11 @@ def getAutomation2Services(
_workflow = workflow _workflow = workflow
if _workflow is None: if _workflow is None:
_workflow = type("_Placeholder", (), {"featureCode": FEATURE_CODE, "id": None, "workflowMode": None})() _workflow = type(
"_Placeholder",
(),
{"featureCode": FEATURE_CODE, "id": None, "workflowMode": None, "messages": []},
)()
ctx = ServiceCenterContext( ctx = ServiceCenterContext(
user=user, user=user,
@ -149,6 +159,16 @@ class _Automation2ServiceHub:
sharepoint = None sharepoint = None
async def onStart(eventUser) -> None:
"""Feature startup. Email poller is started on-demand when a run pauses for email.checkEmail."""
async def onStop(eventUser) -> None:
"""Feature shutdown - remove email poller if running."""
from modules.features.automation2.emailPoller import stop as stopEmailPoller
stopEmailPoller(eventUser)
def getFeatureDefinition() -> Dict[str, Any]: def getFeatureDefinition() -> Dict[str, Any]:
"""Return the feature definition for registration.""" """Return the feature definition for registration."""
return { return {

View file

@ -5,5 +5,16 @@ from .triggers import TRIGGER_NODES
from .flow import FLOW_NODES from .flow import FLOW_NODES
from .data import DATA_NODES from .data import DATA_NODES
from .input import INPUT_NODES from .input import INPUT_NODES
from .ai import AI_NODES
from .email import EMAIL_NODES
from .sharepoint import SHAREPOINT_NODES
STATIC_NODE_TYPES = TRIGGER_NODES + FLOW_NODES + DATA_NODES + INPUT_NODES STATIC_NODE_TYPES = (
TRIGGER_NODES
+ FLOW_NODES
+ DATA_NODES
+ INPUT_NODES
+ AI_NODES
+ EMAIL_NODES
+ SHAREPOINT_NODES
)

View file

@ -0,0 +1,113 @@
# Copyright (c) 2025 Patrick Motsch
# AI node definitions - map to methodAi actions.
AI_NODES = [
{
"id": "ai.prompt",
"category": "ai",
"label": {"en": "Prompt", "de": "Prompt", "fr": "Invite"},
"description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"},
"parameters": [
{"name": "prompt", "type": "string", "required": True, "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}},
{"name": "resultType", "type": "string", "required": False, "description": {"en": "Output format (txt, json, md, etc.)", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "txt"},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-robot", "color": "#9C27B0"},
"_method": "ai",
"_action": "process",
"_paramMap": {"prompt": "aiPrompt"},
},
{
"id": "ai.webResearch",
"category": "ai",
"label": {"en": "Web Research", "de": "Web-Recherche", "fr": "Recherche web"},
"description": {"en": "Research on the web", "de": "Recherche im Web", "fr": "Recherche sur le web"},
"parameters": [
{"name": "query", "type": "string", "required": True, "description": {"en": "Research query", "de": "Recherche-Anfrage", "fr": "Requête de recherche"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-magnify", "color": "#9C27B0"},
"_method": "ai",
"_action": "webResearch",
"_paramMap": {"query": "prompt"},
},
{
"id": "ai.summarizeDocument",
"category": "ai",
"label": {"en": "Summarize Document", "de": "Dokument zusammenfassen", "fr": "Résumer document"},
"description": {"en": "Summarize document content", "de": "Dokumentinhalt zusammenfassen", "fr": "Résumer le contenu du document"},
"parameters": [
{"name": "summaryLength", "type": "string", "required": False, "description": {"en": "Short, medium, or long", "de": "Kurz, mittel oder lang", "fr": "Court, moyen ou long"}, "default": "medium"},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0"},
"_method": "ai",
"_action": "summarizeDocument",
"_paramMap": {},
},
{
"id": "ai.translateDocument",
"category": "ai",
"label": {"en": "Translate Document", "de": "Dokument übersetzen", "fr": "Traduire document"},
"description": {"en": "Translate document to target language", "de": "Dokument in Zielsprache übersetzen", "fr": "Traduire le document"},
"parameters": [
{"name": "targetLanguage", "type": "string", "required": True, "description": {"en": "Target language (e.g. en, de, fr)", "de": "Zielsprache", "fr": "Langue cible"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-translate", "color": "#9C27B0"},
"_method": "ai",
"_action": "translateDocument",
"_paramMap": {"targetLanguage": "targetLanguage"},
},
{
"id": "ai.convertDocument",
"category": "ai",
"label": {"en": "Convert Document", "de": "Dokument konvertieren", "fr": "Convertir document"},
"description": {"en": "Convert document to another format", "de": "Dokument in anderes Format konvertieren", "fr": "Convertir le document"},
"parameters": [
{"name": "targetFormat", "type": "string", "required": True, "description": {"en": "Target format (pdf, docx, txt, etc.)", "de": "Zielformat", "fr": "Format cible"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-file-convert", "color": "#9C27B0"},
"_method": "ai",
"_action": "convertDocument",
"_paramMap": {"targetFormat": "targetFormat"},
},
{
"id": "ai.generateDocument",
"category": "ai",
"label": {"en": "Generate Document", "de": "Dokument generieren", "fr": "Générer document"},
"description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"},
"parameters": [
{"name": "prompt", "type": "string", "required": True, "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}},
{"name": "format", "type": "string", "required": False, "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "docx"},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-file-plus", "color": "#9C27B0"},
"_method": "ai",
"_action": "generateDocument",
"_paramMap": {"prompt": "prompt", "format": "format"},
},
{
"id": "ai.generateCode",
"category": "ai",
"label": {"en": "Generate Code", "de": "Code generieren", "fr": "Générer code"},
"description": {"en": "Generate code from description", "de": "Code aus Beschreibung generieren", "fr": "Générer du code"},
"parameters": [
{"name": "prompt", "type": "string", "required": True, "description": {"en": "Code generation prompt", "de": "Code-Generierungs-Prompt", "fr": "Invite de génération de code"}},
{"name": "language", "type": "string", "required": False, "description": {"en": "Programming language", "de": "Programmiersprache", "fr": "Langage de programmation"}, "default": "python"},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-code-tags", "color": "#9C27B0"},
"_method": "ai",
"_action": "generateCode",
"_paramMap": {"prompt": "prompt", "language": "language"},
},
]

View file

@ -0,0 +1,70 @@
# Copyright (c) 2025 Patrick Motsch
# Email node definitions - map to methodOutlook actions.
# Use connectionId from user connections (like AI workspace sources).
EMAIL_NODES = [
{
"id": "email.checkEmail",
"category": "email",
"label": {"en": "Check Email", "de": "E-Mail prüfen", "fr": "Vérifier email"},
"description": {"en": "Check for new emails (general or from specific account)", "de": "Neue E-Mails prüfen", "fr": "Vérifier les nouveaux emails"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
{"name": "folder", "type": "string", "required": False, "description": {"en": "Folder (e.g. Inbox)", "de": "Ordner (z.B. Posteingang)", "fr": "Dossier (ex. Boîte de réception)"}, "default": "Inbox"},
{"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to fetch", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100},
{"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""},
{"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""},
{"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False},
{"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw filter (overrides above if set)", "de": "Erweitert: Filter-Text (überschreibt obige)", "fr": "Avancé: filtre brut"}, "default": ""},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-email-check", "color": "#1976D2"},
"_method": "outlook",
"_action": "readEmails",
"_paramMap": {"connectionId": "connectionReference", "folder": "folder", "limit": "limit", "filter": "filter"},
},
{
"id": "email.searchEmail",
"category": "email",
"label": {"en": "Search Email", "de": "E-Mail suchen", "fr": "Rechercher email"},
"description": {"en": "Search or find emails", "de": "E-Mails suchen oder finden", "fr": "Rechercher des emails"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
{"name": "query", "type": "string", "required": False, "description": {"en": "General search term (searches subject, body, from)", "de": "Suchbegriff (durchsucht Betreff, Inhalt, Absender)", "fr": "Terme de recherche (sujet, corps, expéditeur)"}, "default": ""},
{"name": "folder", "type": "string", "required": False, "description": {"en": "Folder to search", "de": "Ordner zum Suchen", "fr": "Dossier à rechercher"}, "default": "Inbox"},
{"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to return", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100},
{"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""},
{"name": "toAddress", "type": "string", "required": False, "description": {"en": "Only emails to this recipient", "de": "Nur E-Mails an diesen Empfänger", "fr": "Seulement les e-mails à ce destinataire"}, "default": ""},
{"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""},
{"name": "bodyContains", "type": "string", "required": False, "description": {"en": "Body/content must contain this text", "de": "Inhalt muss diesen Text enthalten", "fr": "Le corps doit contenir ce texte"}, "default": ""},
{"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False},
{"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw KQL (overrides above if set)", "de": "Erweitert: KQL-Filter (überschreibt obige)", "fr": "Avancé: filtre KQL brut"}, "default": ""},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-email-search", "color": "#1976D2"},
"_method": "outlook",
"_action": "searchEmails",
"_paramMap": {"connectionId": "connectionReference", "query": "query", "folder": "folder", "limit": "limit", "filter": "filter"},
},
{
"id": "email.draftEmail",
"category": "email",
"label": {"en": "Draft Email", "de": "E-Mail entwerfen", "fr": "Brouillon email"},
"description": {"en": "Create a draft email", "de": "E-Mail-Entwurf erstellen", "fr": "Créer un brouillon d'email"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
{"name": "subject", "type": "string", "required": True, "description": {"en": "Email subject", "de": "E-Mail-Betreff", "fr": "Sujet"}},
{"name": "body", "type": "string", "required": True, "description": {"en": "Email body", "de": "E-Mail-Text", "fr": "Corps de l'email"}},
{"name": "to", "type": "string", "required": False, "description": {"en": "Recipient(s)", "de": "Empfänger", "fr": "Destinataire(s)"}, "default": ""},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-email-edit", "color": "#1976D2"},
"_method": "outlook",
"_action": "composeAndDraftEmailWithContext",
"_paramMap": {"connectionId": "connectionReference", "to": "to"},
"_contextFrom": ["subject", "body"],
},
]

View file

@ -0,0 +1,111 @@
# Copyright (c) 2025 Patrick Motsch
# SharePoint node definitions - map to methodSharepoint actions.
# Use connectionId and path from connection selector (like workflow folder view).
SHAREPOINT_NODES = [
{
"id": "sharepoint.findFile",
"category": "sharepoint",
"label": {"en": "Find File", "de": "Datei finden", "fr": "Trouver fichier"},
"description": {"en": "Find file by path or search", "de": "Datei nach Pfad oder Suche finden", "fr": "Trouver fichier par chemin ou recherche"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "searchQuery", "type": "string", "required": True, "description": {"en": "Search query or path", "de": "Suchanfrage oder Pfad", "fr": "Requête ou chemin"}},
{"name": "site", "type": "string", "required": False, "description": {"en": "Optional site hint", "de": "Optionaler Site-Hinweis", "fr": "Indication de site"}, "default": ""},
{"name": "maxResults", "type": "number", "required": False, "description": {"en": "Max results", "de": "Max Ergebnisse", "fr": "Max résultats"}, "default": 1000},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-file-search", "color": "#0078D4"},
"_method": "sharepoint",
"_action": "findDocumentPath",
"_paramMap": {"connectionId": "connectionReference", "searchQuery": "searchQuery", "site": "site", "maxResults": "maxResults"},
},
{
"id": "sharepoint.readFile",
"category": "sharepoint",
"label": {"en": "Read File", "de": "Datei lesen", "fr": "Lire fichier"},
"description": {"en": "Extract content from file", "de": "Inhalt aus Datei extrahieren", "fr": "Extraire le contenu du fichier"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "path", "type": "string", "required": True, "description": {"en": "File path or documentList from find file", "de": "Dateipfad oder documentList von Find", "fr": "Chemin ou documentList"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-file-document", "color": "#0078D4"},
"_method": "sharepoint",
"_action": "readDocuments",
"_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
},
{
"id": "sharepoint.uploadFile",
"category": "sharepoint",
"label": {"en": "Upload File", "de": "Datei hochladen", "fr": "Téléverser fichier"},
"description": {"en": "Upload file to SharePoint", "de": "Datei zu SharePoint hochladen", "fr": "Téléverser fichier vers SharePoint"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "folderPath", "type": "string", "required": True, "description": {"en": "Target folder path", "de": "Zielordner-Pfad", "fr": "Chemin du dossier cible"}},
{"name": "siteId", "type": "string", "required": True, "description": {"en": "SharePoint site ID", "de": "SharePoint Site-ID", "fr": "ID du site SharePoint"}},
{"name": "fileName", "type": "string", "required": True, "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-upload", "color": "#0078D4"},
"_method": "sharepoint",
"_action": "uploadFile",
"_paramMap": {"connectionId": "connectionReference", "folderPath": "folderPath", "siteId": "siteId", "fileName": "fileName"},
},
{
"id": "sharepoint.listFiles",
"category": "sharepoint",
"label": {"en": "List Files", "de": "Dateien auflisten", "fr": "Lister fichiers"},
"description": {"en": "List files in folder or SharePoint", "de": "Dateien in Ordner oder SharePoint auflisten", "fr": "Lister les fichiers dans un dossier"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "path", "type": "string", "required": False, "description": {"en": "Folder path (e.g. /sites/SiteName/Shared Documents)", "de": "Ordnerpfad", "fr": "Chemin du dossier"}, "default": "/"},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-folder-open", "color": "#0078D4"},
"_method": "sharepoint",
"_action": "listDocuments",
"_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
},
{
"id": "sharepoint.downloadFile",
"category": "sharepoint",
"label": {"en": "Download File", "de": "Datei herunterladen", "fr": "Télécharger fichier"},
"description": {"en": "Download file from path", "de": "Datei vom Pfad herunterladen", "fr": "Télécharger le fichier"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "siteId", "type": "string", "required": True, "description": {"en": "SharePoint site ID", "de": "SharePoint Site-ID", "fr": "ID du site"}},
{"name": "filePath", "type": "string", "required": True, "description": {"en": "File path", "de": "Dateipfad", "fr": "Chemin du fichier"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-download", "color": "#0078D4"},
"_method": "sharepoint",
"_action": "downloadFileByPath",
"_paramMap": {"connectionId": "connectionReference", "siteId": "siteId", "filePath": "filePath"},
},
{
"id": "sharepoint.copyFile",
"category": "sharepoint",
"label": {"en": "Copy File", "de": "Datei kopieren", "fr": "Copier fichier"},
"description": {"en": "Copy file to destination", "de": "Datei an Ziel kopieren", "fr": "Copier le fichier"},
"parameters": [
{"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
{"name": "siteId", "type": "string", "required": True, "description": {"en": "SharePoint site ID", "de": "SharePoint Site-ID", "fr": "ID du site"}},
{"name": "sourceFolder", "type": "string", "required": True, "description": {"en": "Source folder path", "de": "Quellordner-Pfad", "fr": "Chemin dossier source"}},
{"name": "sourceFile", "type": "string", "required": True, "description": {"en": "Source file name", "de": "Quelldatei-Name", "fr": "Nom fichier source"}},
{"name": "destFolder", "type": "string", "required": True, "description": {"en": "Destination folder path", "de": "Zielordner-Pfad", "fr": "Chemin dossier cible"}},
{"name": "destFile", "type": "string", "required": True, "description": {"en": "Destination file name", "de": "Zieldatei-Name", "fr": "Nom fichier cible"}},
],
"inputs": 1,
"outputs": 1,
"meta": {"icon": "mdi-content-copy", "color": "#0078D4"},
"_method": "sharepoint",
"_action": "copyFile",
"_paramMap": {"connectionId": "connectionReference", "siteId": "siteId", "sourceFolder": "sourceFolder", "sourceFile": "sourceFile", "destFolder": "destFolder", "destFile": "destFile"},
},
]

View file

@ -1,138 +1,37 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# All rights reserved. # All rights reserved.
""" """
Node Type Registry for automation2 - merges static definitions with dynamic I/O nodes from methodDiscovery. Node Type Registry for automation2 - static node definitions (ai, email, sharepoint, trigger, flow, data, input).
Nodes are defined first; IO/method actions are used at execution time.
""" """
import logging import logging
from typing import Dict, List, Any, Optional from typing import Dict, List, Any
from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Short method names that map to I/O node category display
METHOD_LABELS = {
"outlook": {"en": "Outlook", "de": "Outlook", "fr": "Outlook"},
"sharepoint": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"},
"context": {"en": "Context", "de": "Kontext", "fr": "Contexte"},
"ai": {"en": "AI", "de": "KI", "fr": "IA"},
"trustee": {"en": "Trustee", "de": "Trustee", "fr": "Trustee"},
"jira": {"en": "Jira", "de": "Jira", "fr": "Jira"},
"chatbot": {"en": "Chatbot", "de": "Chatbot", "fr": "Chatbot"},
}
def _actionNameToLabel(actionName: str) -> str:
"""Convert camelCase actionName to readable label."""
import re
parts = re.sub(r"([A-Z])", r" \1", actionName).strip().split()
return " ".join(p.capitalize() for p in parts) if parts else actionName
def _buildIoNodeFromAction(
shortMethod: str,
actionName: str,
actionDef: Dict[str, Any],
language: str = "en",
) -> Dict[str, Any]:
"""Build a single I/O node definition from a method action."""
lang = language if language in ("en", "de", "fr") else "en"
methodLabel = METHOD_LABELS.get(shortMethod, {}).get(lang, shortMethod)
actionLabel = _actionNameToLabel(actionName)
nodeId = f"io.{shortMethod}.{actionName}"
nodeLabel = {l: f"{METHOD_LABELS.get(shortMethod, {}).get(l, shortMethod)} - {_actionNameToLabel(actionName)}" for l in ("en", "de", "fr")}
parameters = []
paramDefs = actionDef.get("parameters", {})
for paramName, paramInfo in paramDefs.items():
if isinstance(paramInfo, dict):
p = {
"name": paramName,
"type": paramInfo.get("type", "str"),
"required": paramInfo.get("required", False),
"description": paramInfo.get("description", ""),
}
if paramInfo.get("default") is not None:
p["default"] = paramInfo["default"]
parameters.append(p)
else:
parameters.append({
"name": paramName,
"type": "str",
"required": False,
"description": str(paramInfo),
})
return {
"id": nodeId,
"category": "io",
"label": nodeLabel,
"description": actionDef.get("description") or nodeLabel,
"parameters": parameters,
"inputs": 1,
"outputs": 1,
"executor": "io",
"meta": {"icon": "mdi-connection", "color": "#00BCD4", "method": shortMethod, "action": actionName},
}
def getIoNodesFromMethods(methods: Dict[str, Any], language: str = "en") -> List[Dict[str, Any]]:
"""
Build I/O node types from methodDiscovery.methods.
methods: { methodName: { instance, actions: { actionName: { description, parameters, method } } } }
Returns list of node definitions for io.{shortMethod}.{actionName}.
"""
ioNodes = []
processed = set()
for methodName, methodInfo in methods.items():
if not methodName.startswith("Method"):
continue
shortMethod = methodName.replace("Method", "").lower()
if shortMethod in processed:
continue
processed.add(shortMethod)
methodInstance = methodInfo.get("instance")
if not methodInstance:
continue
actions = methodInstance.actions
for actionName, actionDef in actions.items():
if not isinstance(actionDef, dict):
continue
try:
node = _buildIoNodeFromAction(shortMethod, actionName, actionDef, language)
ioNodes.append(node)
except Exception as e:
logger.warning(f"Failed to build I/O node io.{shortMethod}.{actionName}: {e}")
continue
return ioNodes
def getNodeTypes( def getNodeTypes(
services: Any, services: Any = None,
language: str = "en", language: str = "en",
) -> List[Dict[str, Any]]: ) -> List[Dict[str, Any]]:
""" """
Return merged node types: static (trigger, flow, data) + dynamic I/O nodes from methodDiscovery. Return static node types. No dynamic I/O derivation from methodDiscovery.
services: Hub from getAutomation2Services (needed for discoverMethods + RBAC-filtered actions). services: Optional (kept for API compatibility, not used).
""" """
from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods return list(STATIC_NODE_TYPES)
discoverMethods(services)
static = list(STATIC_NODE_TYPES)
ioNodes = getIoNodesFromMethods(methods, language)
return static + ioNodes
def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
"""Apply language to label/description/parameters.""" """Apply language to label/description/parameters."""
lang = language if language in ("en", "de", "fr") else "en" lang = language if language in ("en", "de", "fr") else "en"
out = dict(node) out = dict(node)
# Strip internal keys for API response
for key in list(out.keys()):
if key.startswith("_"):
del out[key]
if isinstance(node.get("label"), dict): if isinstance(node.get("label"), dict):
out["label"] = node["label"].get(lang, node["label"].get("en", str(node["label"]))) out["label"] = node["label"].get(lang, node["label"].get("en", str(node["label"])))
if isinstance(node.get("description"), dict): if isinstance(node.get("description"), dict):
@ -161,6 +60,22 @@ def getNodeTypesForApi(
{"id": "input", "label": {"en": "Input/Human", "de": "Eingabe/Mensch", "fr": "Entrée/Humain"}}, {"id": "input", "label": {"en": "Input/Human", "de": "Eingabe/Mensch", "fr": "Entrée/Humain"}},
{"id": "flow", "label": {"en": "Flow", "de": "Ablauf", "fr": "Flux"}}, {"id": "flow", "label": {"en": "Flow", "de": "Ablauf", "fr": "Flux"}},
{"id": "data", "label": {"en": "Data", "de": "Daten", "fr": "Données"}}, {"id": "data", "label": {"en": "Data", "de": "Daten", "fr": "Données"}},
{"id": "io", "label": {"en": "I/O", "de": "E/A", "fr": "E/S"}}, {"id": "ai", "label": {"en": "AI", "de": "KI", "fr": "IA"}},
{"id": "email", "label": {"en": "Email", "de": "E-Mail", "fr": "Email"}},
{"id": "sharepoint", "label": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"}},
] ]
return {"nodeTypes": localized, "categories": categories} return {"nodeTypes": localized, "categories": categories}
def getNodeTypeToMethodAction() -> Dict[str, tuple]:
"""
Mapping from node type id to (method, action) for execution.
Used by ActionNodeExecutor.
"""
mapping = {}
for node in STATIC_NODE_TYPES:
method = node.get("_method")
action = node.get("_action")
if method and action:
mapping[node["id"]] = (method, action)
return mapping

View file

@ -1,11 +1,12 @@
# Copyright (c) 2025 Patrick Motsch # Copyright (c) 2025 Patrick Motsch
# All rights reserved. # All rights reserved.
""" """
Automation2 routes - node-types, execute, workflows, runs, tasks. Automation2 routes - node-types, execute, workflows, runs, tasks, connections, browse.
""" """
import logging import logging
from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException
from fastapi.responses import JSONResponse
from modules.auth import limiter, getRequestContext, RequestContext from modules.auth import limiter, getRequestContext, RequestContext
from modules.features.automation2.mainAutomation2 import getAutomation2Services from modules.features.automation2.mainAutomation2 import getAutomation2Services
@ -101,6 +102,10 @@ async def post_execute(
mandateId=mandateId, mandateId=mandateId,
featureInstanceId=instanceId, featureInstanceId=instanceId,
) )
# Ensure workflow methods (outlook, ai, sharepoint, etc.) are discovered for ActionExecutor
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
discoverMethods(services)
graph = body.get("graph") or body graph = body.get("graph") or body
workflowId = body.get("workflowId") workflowId = body.get("workflowId")
if workflowId: if workflowId:
@ -109,6 +114,12 @@ async def post_execute(
if wf and wf.get("graph"): if wf and wf.get("graph"):
graph = wf["graph"] graph = wf["graph"]
logger.info("automation2 execute: loaded graph from workflow %s", workflowId) logger.info("automation2 execute: loaded graph from workflow %s", workflowId)
# Use transient workflowId when none provided (e.g. execute from editor without save)
# Required for email.checkEmail pause/resume - run must be created
if not workflowId:
import uuid
workflowId = f"transient-{uuid.uuid4().hex[:12]}"
logger.info("automation2 execute: using transient workflowId=%s", workflowId)
nodes_count = len(graph.get("nodes") or []) nodes_count = len(graph.get("nodes") or [])
connections_count = len(graph.get("connections") or []) connections_count = len(graph.get("connections") or [])
logger.info( logger.info(
@ -118,7 +129,7 @@ async def post_execute(
workflowId, workflowId,
mandateId, mandateId,
) )
a2_interface = getAutomation2Interface(context.user, mandateId, instanceId) if workflowId else None a2_interface = getAutomation2Interface(context.user, mandateId, instanceId)
result = await executeGraph( result = await executeGraph(
graph=graph, graph=graph,
services=services, services=services,
@ -139,11 +150,190 @@ async def post_execute(
return result return result
# -------------------------------------------------------------------------
# Connections and Browse (for Email/SharePoint node config - like workspace)
# -------------------------------------------------------------------------
def _buildResolverDbInterface(chatService):
"""Build a DB adapter that ConnectorResolver can use to load UserConnections."""
class _ResolverDbAdapter:
def __init__(self, appInterface):
self._app = appInterface
def getUserConnection(self, connectionId: str):
if hasattr(self._app, "getUserConnectionById"):
return self._app.getUserConnectionById(connectionId)
return None
appIf = getattr(chatService, "interfaceDbApp", None)
if appIf:
return _ResolverDbAdapter(appIf)
return getattr(chatService, "interfaceDbComponent", None)
@router.get("/{instanceId}/connections")
@limiter.limit("300/minute")
def list_automation2_connections(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Return the user's active connections (UserConnections) for Email/SharePoint node config."""
mandateId = _validateInstanceAccess(instanceId, context)
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
mandate_id=str(context.mandateId) if context.mandateId else mandateId,
feature_instance_id=instanceId,
)
chatService = getService("chat", ctx)
connections = chatService.getUserConnections()
items = []
for c in connections or []:
conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {})
authority = conn.get("authority")
if hasattr(authority, "value"):
authority = authority.value
status = conn.get("status")
if hasattr(status, "value"):
status = status.value
items.append({
"id": conn.get("id"),
"authority": authority,
"externalUsername": conn.get("externalUsername"),
"externalEmail": conn.get("externalEmail"),
"status": status,
})
return {"connections": items}
@router.get("/{instanceId}/connections/{connectionId}/services")
@limiter.limit("120/minute")
async def list_connection_services(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
connectionId: str = Path(..., description="Connection ID"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Return the available services for a specific UserConnection."""
mandateId = _validateInstanceAccess(instanceId, context)
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.serviceCenter import getService as getSvc
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
mandate_id=str(context.mandateId) if context.mandateId else mandateId,
feature_instance_id=instanceId,
)
chatService = getSvc("chat", ctx)
securityService = getSvc("security", ctx)
dbInterface = _buildResolverDbInterface(chatService)
resolver = ConnectorResolver(securityService, dbInterface)
provider = await resolver.resolve(connectionId)
services = provider.getAvailableServices()
_serviceLabels = {
"sharepoint": "SharePoint",
"outlook": "Outlook",
"teams": "Teams",
"onedrive": "OneDrive",
"drive": "Google Drive",
"gmail": "Gmail",
"files": "Files (FTP)",
}
_serviceIcons = {
"sharepoint": "sharepoint",
"outlook": "mail",
"teams": "chat",
"onedrive": "cloud",
"drive": "cloud",
"gmail": "mail",
"files": "folder",
}
items = [
{"service": s, "label": _serviceLabels.get(s, s), "icon": _serviceIcons.get(s, "folder")}
for s in services
]
return {"services": items}
except Exception as e:
logger.error(f"Error listing services for connection {connectionId}: {e}")
return JSONResponse({"services": [], "error": str(e)}, status_code=400)
@router.get("/{instanceId}/connections/{connectionId}/browse")
@limiter.limit("300/minute")
async def browse_connection_service(
request: Request,
instanceId: str = Path(..., description="Feature instance ID"),
connectionId: str = Path(..., description="Connection ID"),
service: str = Query(..., description="Service name (e.g. sharepoint, onedrive, outlook)"),
path: str = Query("/", description="Path within the service to browse"),
context: RequestContext = Depends(getRequestContext),
) -> dict:
"""Browse folders/items within a connection's service at a given path."""
mandateId = _validateInstanceAccess(instanceId, context)
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.serviceCenter import getService as getSvc
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
mandate_id=str(context.mandateId) if context.mandateId else mandateId,
feature_instance_id=instanceId,
)
chatService = getSvc("chat", ctx)
securityService = getSvc("security", ctx)
dbInterface = _buildResolverDbInterface(chatService)
resolver = ConnectorResolver(securityService, dbInterface)
adapter = await resolver.resolveService(connectionId, service)
entries = await adapter.browse(path, filter=None)
items = []
for entry in (entries or []):
items.append({
"name": entry.name,
"path": entry.path,
"isFolder": entry.isFolder,
"size": entry.size,
"mimeType": entry.mimeType,
"metadata": entry.metadata if hasattr(entry, "metadata") else {},
})
return {"items": items, "path": path, "service": service}
except Exception as e:
logger.error(f"Error browsing {service} for connection {connectionId} at '{path}': {e}")
return JSONResponse({"items": [], "error": str(e)}, status_code=400)
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
# Workflow CRUD # Workflow CRUD
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------
def _get_node_label_from_graph(graph: dict, nodeId: str) -> str:
"""Extract human-readable label for a node from graph."""
if not graph or not nodeId:
return nodeId or ""
nodes = graph.get("nodes") or []
for n in nodes:
if n.get("id") == nodeId:
params = n.get("parameters") or {}
config = params.get("config") or {}
if isinstance(config, dict):
label = config.get("title") or config.get("label")
else:
label = None
return (
n.get("title")
or label
or params.get("title")
or params.get("label")
or n.get("type", "")
or nodeId
)
return nodeId or ""
@router.get("/{instanceId}/workflows") @router.get("/{instanceId}/workflows")
@limiter.limit("60/minute") @limiter.limit("60/minute")
def get_workflows( def get_workflows(
@ -151,11 +341,41 @@ def get_workflows(
instanceId: str = Path(..., description="Feature instance ID"), instanceId: str = Path(..., description="Feature instance ID"),
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
) -> dict: ) -> dict:
"""List all workflows for this feature instance.""" """List all workflows for this feature instance.
Enriches each workflow with runCount, isRunning, stuckAtNodeId, stuckAtNodeLabel,
createdAt, lastStartedAt.
"""
mandateId = _validateInstanceAccess(instanceId, context) mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId) a2 = getAutomation2Interface(context.user, mandateId, instanceId)
items = a2.getWorkflows() items = a2.getWorkflows()
return {"workflows": items} enriched = []
for wf in items:
wf_id = wf.get("id")
runs = a2.getRunsByWorkflow(wf_id) if wf_id else []
run_count = len(runs)
active_run = None
last_started_at = None
for r in runs:
ts = r.get("_createdAt")
if ts and (last_started_at is None or ts > last_started_at):
last_started_at = ts
if r.get("status") in ("running", "paused"):
active_run = r
stuck_at_node_id = active_run.get("currentNodeId") if active_run else None
stuck_at_node_label = ""
if stuck_at_node_id and wf.get("graph"):
stuck_at_node_label = _get_node_label_from_graph(wf["graph"], stuck_at_node_id)
enriched.append({
**wf,
"runCount": run_count,
"isRunning": active_run is not None,
"runStatus": active_run.get("status") if active_run else None,
"stuckAtNodeId": stuck_at_node_id,
"stuckAtNodeLabel": stuck_at_node_label or stuck_at_node_id or "",
"createdAt": wf.get("_createdAt"),
"lastStartedAt": last_started_at,
})
return {"workflows": enriched}
@router.get("/{instanceId}/workflows/{workflowId}") @router.get("/{instanceId}/workflows/{workflowId}")
@ -312,12 +532,23 @@ def get_tasks(
status: str = Query(None, description="Filter: pending, completed, rejected"), status: str = Query(None, description="Filter: pending, completed, rejected"),
context: RequestContext = Depends(getRequestContext), context: RequestContext = Depends(getRequestContext),
) -> dict: ) -> dict:
"""Get tasks - by default those assigned to current user, or all if no assignee filter.""" """Get tasks - by default those assigned to current user, or all if no assignee filter.
Enriches each task with workflowLabel and createdAt (_createdAt).
"""
mandateId = _validateInstanceAccess(instanceId, context) mandateId = _validateInstanceAccess(instanceId, context)
a2 = getAutomation2Interface(context.user, mandateId, instanceId) a2 = getAutomation2Interface(context.user, mandateId, instanceId)
assigneeId = str(context.user.id) if context.user else None assigneeId = str(context.user.id) if context.user else None
items = a2.getTasks(workflowId=workflowId, status=status, assigneeId=assigneeId) items = a2.getTasks(workflowId=workflowId, status=status, assigneeId=assigneeId)
return {"tasks": items} workflows = {w["id"]: w for w in a2.getWorkflows()}
enriched = []
for t in items:
wf = workflows.get(t.get("workflowId") or "")
enriched.append({
**t,
"workflowLabel": wf.get("label", t.get("workflowId", "")) if wf else t.get("workflowId", ""),
"createdAt": t.get("_createdAt"),
})
return {"tasks": enriched}
@router.post("/{instanceId}/tasks/{taskId}/complete") @router.post("/{instanceId}/tasks/{taskId}/complete")

View file

@ -135,8 +135,8 @@ class ProgressLogger:
message = f"{op['service']}" message = f"{op['service']}"
workflow = self.services.workflow workflow = self.services.workflow
if not workflow: if not workflow or not getattr(workflow, "id", None):
logger.warning(f"Cannot log progress: no workflow available") # No workflow or no workflow.id (e.g. automation2 placeholder) - skip progress logging
return None return None
# Validate parentOperationId exists in activeOperations or finishedOperations # Validate parentOperationId exists in activeOperations or finishedOperations

View file

@ -2,6 +2,7 @@
# Main execution engine for automation2 graphs. # Main execution engine for automation2 graphs.
import logging import logging
from datetime import datetime, timezone
from typing import Dict, Any, List, Set, Optional from typing import Dict, Any, List, Set, Optional
from modules.workflows.automation2.graphUtils import ( from modules.workflows.automation2.graphUtils import (
@ -16,30 +17,19 @@ from modules.workflows.automation2.executors import (
TriggerExecutor, TriggerExecutor,
FlowExecutor, FlowExecutor,
DataExecutor, DataExecutor,
IOExecutor, ActionNodeExecutor,
InputExecutor, InputExecutor,
PauseForHumanTaskError, PauseForHumanTaskError,
PauseForEmailWaitError,
) )
from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _getNodeTypeIds(services: Any) -> Set[str]: def _getNodeTypeIds(services: Any = None) -> Set[str]:
"""Collect all known node type IDs from static + methodDiscovery.""" """Collect all known node type IDs from static definitions."""
discoverMethods(services) return {n["id"] for n in STATIC_NODE_TYPES}
ids = {n["id"] for n in STATIC_NODE_TYPES}
for methodName, methodInfo in methods.items():
if not methodName.startswith("Method"):
continue
shortMethod = methodName.replace("Method", "").lower()
instance = methodInfo.get("instance")
if not instance:
continue
for actionName in instance.actions:
ids.add(f"io.{shortMethod}.{actionName}")
return ids
def _getExecutor( def _getExecutor(
@ -54,8 +44,8 @@ def _getExecutor(
return FlowExecutor() return FlowExecutor()
if nodeType.startswith("data."): if nodeType.startswith("data."):
return DataExecutor() return DataExecutor()
if nodeType.startswith("io."): if nodeType.startswith("ai.") or nodeType.startswith("email.") or nodeType.startswith("sharepoint."):
return IOExecutor(services) return ActionNodeExecutor(services)
if nodeType.startswith("input.") and automation2_interface: if nodeType.startswith("input.") and automation2_interface:
return InputExecutor(automation2_interface) return InputExecutor(automation2_interface)
return None return None
@ -111,10 +101,21 @@ async def executeGraph(
nodeOutputs: Dict[str, Any] = dict(initialNodeOutputs or {}) nodeOutputs: Dict[str, Any] = dict(initialNodeOutputs or {})
is_resume = startAfterNodeId is not None is_resume = startAfterNodeId is not None
if not runId and automation2_interface and workflowId and not is_resume: if not runId and automation2_interface and workflowId and not is_resume:
run_context = {
"connectionMap": connectionMap,
"inputSources": inputSources,
"orderedNodeIds": ordered_ids,
}
if userId:
run_context["ownerId"] = userId
if mandateId:
run_context["mandateId"] = mandateId
if instanceId:
run_context["instanceId"] = instanceId
run = automation2_interface.createRun( run = automation2_interface.createRun(
workflowId=workflowId, workflowId=workflowId,
nodeOutputs=nodeOutputs, nodeOutputs=nodeOutputs,
context={"connectionMap": connectionMap, "inputSources": inputSources, "orderedNodeIds": ordered_ids}, context=run_context,
) )
runId = run.get("id") if run else None runId = run.get("id") if run else None
logger.info("executeGraph created run %s", runId) logger.info("executeGraph created run %s", runId)
@ -175,6 +176,46 @@ async def executeGraph(
"nodeId": e.nodeId, "nodeId": e.nodeId,
"nodeOutputs": dict(nodeOutputs), "nodeOutputs": dict(nodeOutputs),
} }
except PauseForEmailWaitError as e:
logger.info("executeGraph paused for email wait (run %s, node %s)", e.runId, e.nodeId)
# Start email poller on-demand (only runs while workflows wait for email)
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.features.automation2.emailPoller import ensureRunning
root = getRootInterface()
event_user = root.getUserByUsername("event") if root else None
if event_user:
ensureRunning(event_user)
except Exception as poll_err:
logger.warning("Could not start email poller: %s", poll_err)
paused_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
run_ctx = {
"connectionMap": context.get("connectionMap"),
"inputSources": context.get("inputSources"),
"orderedNodeIds": [n.get("id") for n in context.get("_orderedNodes", []) if n.get("id")],
"waitReason": "email",
"waitConfig": e.waitConfig,
"pausedAt": paused_at,
"lastCheckedAt": None,
"ownerId": context.get("userId"),
"mandateId": context.get("mandateId"),
"instanceId": context.get("instanceId"),
}
automation2_interface.updateRun(
e.runId,
status="paused",
nodeOutputs=dict(nodeOutputs),
currentNodeId=e.nodeId,
context=run_ctx,
)
return {
"success": False,
"paused": True,
"waitReason": "email",
"runId": e.runId,
"nodeId": e.nodeId,
"nodeOutputs": dict(nodeOutputs),
}
except Exception as e: except Exception as e:
logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e) logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e)
nodeOutputs[nodeId] = {"error": str(e), "success": False} nodeOutputs[nodeId] = {"error": str(e), "success": False}

View file

@ -4,14 +4,15 @@
from .triggerExecutor import TriggerExecutor from .triggerExecutor import TriggerExecutor
from .flowExecutor import FlowExecutor from .flowExecutor import FlowExecutor
from .dataExecutor import DataExecutor from .dataExecutor import DataExecutor
from .ioExecutor import IOExecutor from .actionNodeExecutor import ActionNodeExecutor
from .inputExecutor import InputExecutor, PauseForHumanTaskError from .inputExecutor import InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError
__all__ = [ __all__ = [
"TriggerExecutor", "TriggerExecutor",
"FlowExecutor", "FlowExecutor",
"DataExecutor", "DataExecutor",
"IOExecutor", "ActionNodeExecutor",
"InputExecutor", "InputExecutor",
"PauseForHumanTaskError", "PauseForHumanTaskError",
"PauseForEmailWaitError",
] ]

View file

@ -0,0 +1,457 @@
# Copyright (c) 2025 Patrick Motsch
# Action node executor - maps ai.*, email.*, sharepoint.* to method actions via ActionExecutor.
import logging
from typing import Dict, Any, List, Optional
logger = logging.getLogger(__name__)
def _getNodeDefinition(nodeType: str) -> Optional[Dict[str, Any]]:
"""Get node definition by type id for _method, _action, _paramMap."""
from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
for node in STATIC_NODE_TYPES:
if node.get("id") == nodeType:
return node
return None
def _resolveConnectionIdToReference(chatService, connectionId: str) -> Optional[str]:
"""
Resolve connectionId (UserConnection.id) to connectionReference format.
connectionReference format: connection:{authority}:{externalUsername}
"""
if not connectionId or not chatService:
return None
try:
connections = chatService.getUserConnections()
for c in connections or []:
conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {})
if str(conn.get("id")) == str(connectionId):
authority = conn.get("authority")
if hasattr(authority, "value"):
authority = authority.value
username = conn.get("externalUsername", "")
return f"connection:{authority}:{username}"
return None
except Exception as e:
logger.warning(f"Could not resolve connectionId {connectionId} to reference: {e}")
return None
def _extractEmailContentFromUpstream(inp: Any) -> Optional[Dict[str, Any]]:
"""
Extract {subject, body, to} from upstream node output (e.g. AI node returning JSON).
Expects JSON like {"subject": "...", "body": "...", "to": "..."} in documentData.
"""
if not inp:
return None
import json
docs = inp.get("documents", inp.get("documentList", [])) if isinstance(inp, dict) else []
if not docs:
return None
doc = docs[0] if isinstance(docs, list) else docs
raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None)
if not raw:
return None
try:
data = json.loads(raw) if isinstance(raw, str) else raw
if isinstance(data, dict) and data.get("subject") and data.get("body"):
return {
"subject": str(data.get("subject", "")),
"body": str(data.get("body", "")),
"to": data.get("to"),
}
except (json.JSONDecodeError, TypeError):
pass
return None
def _getIncomingEmailFromUpstream(
nodeId: str,
inputSources: Dict[str, Dict[int, tuple]],
nodeOutputs: Dict[str, Any],
orderedNodes: List[Dict],
) -> Optional[tuple]:
"""
Walk upstream from draftEmail to find email.checkEmail/searchEmail and return (context, documentList).
context = formatted incoming email(s) for composeAndDraftEmail.
documentList = documents from the email node for attachment/context.
"""
src = inputSources.get(nodeId, {}).get(0)
if not src:
return None
srcId, _ = src
srcNode = next((n for n in (orderedNodes or []) if n.get("id") == srcId), None)
srcType = (srcNode or {}).get("type", "")
# Direct connection to email node
if srcType in ("email.checkEmail", "email.searchEmail"):
out = nodeOutputs.get(srcId)
return _formatEmailOutputAsContext(out)
# Connected via AI node: walk one more step to email source
if srcType.startswith("ai."):
src2 = inputSources.get(srcId, {}).get(0)
if not src2:
return None
emailNodeId, _ = src2
emailNode = next((n for n in (orderedNodes or []) if n.get("id") == emailNodeId), None)
if (emailNode or {}).get("type") in ("email.checkEmail", "email.searchEmail"):
out = nodeOutputs.get(emailNodeId)
return _formatEmailOutputAsContext(out)
return None
def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]:
"""Format email node output as (context, documentList, reply_to) for composeAndDraftEmail.
reply_to = sender address of first email (recipient for the reply).
"""
if not out:
return None
docs = out.get("documents", out.get("documentList", [])) if isinstance(out, dict) else []
if not docs:
return None
doc = docs[0] if isinstance(docs, list) else docs
raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None)
if not raw:
return None
import json
try:
data = json.loads(raw) if isinstance(raw, str) else raw
except (json.JSONDecodeError, TypeError):
return None
if not isinstance(data, dict):
return None
# readEmails: data.emails.emails | searchEmails: data.searchResults.results
emails_data = data.get("emails") or {}
emails_list = emails_data.get("emails", []) if isinstance(emails_data, dict) else []
if not emails_list:
search_results = data.get("searchResults") or {}
emails_list = search_results.get("results", []) if isinstance(search_results, dict) else []
if not emails_list:
return None
reply_to = None
parts = ["Reply to the following email(s):", ""]
for i, em in enumerate(emails_list[:5]): # max 5
if not isinstance(em, dict):
continue
fr = em.get("from", em.get("sender", {}))
addr = fr.get("emailAddress", {}) if isinstance(fr, dict) else {}
from_str = addr.get("address", "") or addr.get("name", "")
if from_str and not reply_to:
reply_to = addr.get("address", "") or from_str
subj = em.get("subject", "")
body = em.get("bodyPreview", "") or (em.get("body") or {}).get("content", "") if isinstance(em.get("body"), dict) else ""
if body and len(str(body)) > 1500:
body = str(body)[:1500] + "..."
parts.append(f"From: {from_str}")
parts.append(f"Subject: {subj}")
parts.append(f"Content:\n{body}")
parts.append("")
if reply_to:
parts.insert(2, f"Recipient (reply to this address): {reply_to}")
parts.insert(3, "")
context = "\n".join(parts).strip()
return (context, docs, reply_to)
def _buildSearchQuery(
query: str = None,
fromAddress: str = None,
toAddress: str = None,
subjectContains: str = None,
bodyContains: str = None,
hasAttachment: bool = None,
filter: str = None,
) -> str:
"""
Build Microsoft Graph $search query from discrete params.
Uses KQL: from:, to:, subject:, body:, hasattachments: (supported by Graph API).
"""
if filter and str(filter).strip():
return str(filter).strip()
parts = []
if query and str(query).strip():
parts.append(str(query).strip())
if fromAddress and str(fromAddress).strip():
safe = str(fromAddress).strip().replace('"', '')
parts.append(f'from:{safe}')
if toAddress and str(toAddress).strip():
safe = str(toAddress).strip().replace('"', '')
parts.append(f'to:{safe}')
if subjectContains and str(subjectContains).strip():
safe = str(subjectContains).strip().replace('"', '')
parts.append(f'subject:{safe}')
if bodyContains and str(bodyContains).strip():
safe = str(bodyContains).strip().replace('"', '')
parts.append(f'body:{safe}')
if hasAttachment is True:
parts.append("hasattachments:true")
return " ".join(parts) if parts else "*"
def _buildEmailFilter(fromAddress: str = None, subjectContains: str = None, hasAttachment: bool = None) -> str:
"""
Build Microsoft Graph API $filter string from discrete email filter params.
Used for email.checkEmail (and trigger.newEmail).
"""
parts = []
if fromAddress and str(fromAddress).strip():
safe = str(fromAddress).strip().replace("'", "''")
parts.append(f"from/emailAddress/address eq '{safe}'")
if subjectContains and str(subjectContains).strip():
safe = str(subjectContains).strip().replace("'", "''")
parts.append(f"contains(subject,'{safe}')")
if hasAttachment is True:
parts.append("hasAttachments eq true")
return " and ".join(parts) if parts else ""
def _buildActionParams(
node: Dict[str, Any],
nodeDef: Dict[str, Any],
resolvedParams: Dict[str, Any],
chatService,
) -> Dict[str, Any]:
"""
Build params for ActionExecutor from node parameters using _paramMap.
Resolves connectionId -> connectionReference.
Handles _contextFrom for composite params (e.g. email.draftEmail subject+body -> context).
"""
params = dict(resolvedParams)
paramMap = nodeDef.get("_paramMap") or {}
contextFrom = nodeDef.get("_contextFrom") or []
# email.checkEmail: build filter from discrete params (fromAddress, subjectContains, hasAttachment)
nodeType = node.get("type", "")
if nodeType == "email.checkEmail":
built = _buildEmailFilter(
fromAddress=params.get("fromAddress"),
subjectContains=params.get("subjectContains"),
hasAttachment=params.get("hasAttachment"),
)
raw_filter = (params.get("filter") or "").strip()
params["filter"] = built if built else (raw_filter if raw_filter else None)
params.pop("fromAddress", None)
params.pop("subjectContains", None)
params.pop("hasAttachment", None)
# email.searchEmail: build query from discrete params (fromAddress, toAddress, subjectContains, bodyContains, hasAttachment)
if nodeType == "email.searchEmail":
built = _buildSearchQuery(
query=params.get("query"),
fromAddress=params.get("fromAddress"),
toAddress=params.get("toAddress"),
subjectContains=params.get("subjectContains"),
bodyContains=params.get("bodyContains"),
hasAttachment=params.get("hasAttachment"),
filter=params.get("filter"),
)
params["query"] = built
params.pop("fromAddress", None)
params.pop("toAddress", None)
params.pop("subjectContains", None)
params.pop("bodyContains", None)
params.pop("hasAttachment", None)
params.pop("filter", None)
# Resolve connectionId to connectionReference
if "connectionId" in params:
connId = params.get("connectionId")
if connId and chatService:
ref = _resolveConnectionIdToReference(chatService, connId)
if ref:
params["connectionReference"] = ref
else:
logger.warning(f"Could not resolve connectionId {connId} to connectionReference")
params.pop("connectionId", None)
# Build context from multiple params (e.g. subject + body for draft email)
if contextFrom:
parts = []
for key in contextFrom:
val = params.get(key)
if val:
if key == "subject":
parts.append(f"Subject: {val}")
elif key == "body":
parts.append(f"Body:\n{val}")
else:
parts.append(str(val))
if parts:
params["context"] = "\n\n".join(parts)
for k in contextFrom:
params.pop(k, None)
# Apply paramMap: node param name -> action param name
result = {}
mappedNodeKeys = {nodeKey for nodeKey, actionKey in paramMap.items() if actionKey and nodeKey in params}
for nodeKey, actionKey in paramMap.items():
if nodeKey in params and actionKey:
result[actionKey] = params[nodeKey]
# Pass through params not used as source for mapping
for k, v in params.items():
if k not in mappedNodeKeys and k not in result:
result[k] = v
return result
class ActionNodeExecutor:
"""Execute ai.*, email.*, sharepoint.* nodes by mapping to method actions."""
def __init__(self, services: Any):
self.services = services
async def execute(
self,
node: Dict[str, Any],
context: Dict[str, Any],
) -> Any:
from modules.features.automation2.nodeRegistry import getNodeTypeToMethodAction
from modules.workflows.automation2.graphUtils import resolveParameterReferences
from modules.workflows.processing.core.actionExecutor import ActionExecutor
nodeType = node.get("type", "")
nodeId = node.get("id", "")
logger.info("ActionNodeExecutor node %s type=%s", nodeId, nodeType)
mapping = getNodeTypeToMethodAction()
methodAction = mapping.get(nodeType)
if not methodAction:
logger.debug("ActionNodeExecutor node %s not in mapping -> None", nodeId)
return None
methodName, actionName = methodAction
logger.info("ActionNodeExecutor node %s method=%s action=%s", nodeId, methodName, actionName)
nodeDef = _getNodeDefinition(nodeType)
params = dict(node.get("parameters") or {})
resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {}))
# Merge input from connected nodes (documentList, etc.)
inputSources = context.get("inputSources", {}).get(nodeId, {})
if 0 in inputSources:
srcId, _ = inputSources[0]
inp = context.get("nodeOutputs", {}).get(srcId)
if isinstance(inp, dict):
resolvedParams.setdefault("documentList", inp.get("documents", inp.get("documentList", [])))
elif inp is not None:
resolvedParams.setdefault("input", inp)
# ai.prompt with email upstream: inject actual email content into prompt so AI has context
# (getChatDocumentsFromDocumentList fails in automation2 workflow has no messages)
if nodeType.startswith("ai."):
orderedNodes = context.get("_orderedNodes") or []
if 0 in inputSources:
srcId, _ = inputSources[0]
srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None)
srcType = (srcNode or {}).get("type", "")
if srcType in ("email.checkEmail", "email.searchEmail"):
incoming = _getIncomingEmailFromUpstream(
nodeId,
context.get("inputSources", {}),
context.get("nodeOutputs", {}),
orderedNodes,
)
if incoming:
ctx, _doc_list, _reply_to = incoming
if ctx and ctx.strip():
base_prompt = (resolvedParams.get("aiPrompt") or "").strip()
resolvedParams["aiPrompt"] = (
f"Eingehende E-Mail:\n{ctx}\n\nAufgabe: {base_prompt}"
if base_prompt
else f"Eingehende E-Mail:\n{ctx}"
)
logger.debug("ai.prompt: injected email context from upstream %s", srcType)
chatService = getattr(self.services, "chat", None)
actionParams = _buildActionParams(node, nodeDef or {}, resolvedParams, chatService)
# email.checkEmail: pause and wait for new email (background poller will resume)
if nodeType == "email.checkEmail":
runId = context.get("_runId")
workflowId = context.get("workflowId")
connRef = actionParams.get("connectionReference")
if runId and workflowId and connRef:
from modules.workflows.automation2.executors import PauseForEmailWaitError
waitConfig = {
"connectionReference": connRef,
"folder": actionParams.get("folder", "Inbox"),
"limit": min(int(actionParams.get("limit") or 10), 50),
"filter": actionParams.get("filter"),
}
raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig)
# Fallback: no pause (calls readEmails directly) needs runId, workflowId, connectionReference
if not runId or not workflowId:
logger.warning(
"email.checkEmail not pausing (runId=%s workflowId=%s) run must be saved/executed as workflow",
runId,
workflowId,
)
elif not connRef:
logger.warning(
"email.checkEmail not pausing connectionReference missing (check connectionId/config)",
)
# email.draftEmail: use AI output as emailContent if available; else pass incoming email as context
if nodeType == "email.draftEmail":
inputSources = context.get("inputSources", {})
nodeOutputs = context.get("nodeOutputs", {})
orderedNodes = context.get("_orderedNodes") or []
if 0 in inputSources.get(nodeId, {}):
srcId, _ = inputSources[nodeId][0]
srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None)
srcType = (srcNode or {}).get("type", "")
if srcType.startswith("ai."):
inp = nodeOutputs.get(srcId)
email_content = _extractEmailContentFromUpstream(inp)
if email_content:
actionParams["emailContent"] = email_content
actionParams.setdefault("context", "(from connected AI node)")
else:
# AI failed or wrong format: pass incoming email from upstream as context
incoming = _getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)
if incoming:
ctx, doc_list, reply_to = incoming
actionParams["context"] = ctx
if doc_list and not actionParams.get("documentList"):
actionParams["documentList"] = doc_list
if reply_to and not actionParams.get("to"):
actionParams["to"] = [reply_to]
else:
# Direct connection to email.checkEmail/searchEmail: use incoming email as context
if not actionParams.get("context"):
incoming = _getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)
if incoming:
ctx, doc_list, reply_to = incoming
actionParams["context"] = ctx
if doc_list and not actionParams.get("documentList"):
actionParams["documentList"] = doc_list
if reply_to and not actionParams.get("to"):
actionParams["to"] = [reply_to]
# sharepoint.uploadFile: content from documentList (upstream) if not in params
if nodeType == "sharepoint.uploadFile" and "content" not in actionParams:
docList = actionParams.get("documentList") or resolvedParams.get("documentList")
if docList:
actionParams["content"] = docList[0] if isinstance(docList, list) and docList else docList
executor = ActionExecutor(self.services)
logger.info("ActionNodeExecutor node %s calling executeAction(%s, %s)", nodeId, methodName, actionName)
result = await executor.executeAction(methodName, actionName, actionParams)
out = {
"success": result.success,
"error": result.error,
"documents": [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])],
"data": result.model_dump() if hasattr(result, "model_dump") else {"success": result.success, "error": result.error},
}
logger.info(
"ActionNodeExecutor node %s result: success=%s error=%s doc_count=%d",
nodeId,
result.success,
result.error,
len(out.get("documents", [])),
)
return out

View file

@ -17,6 +17,16 @@ class PauseForHumanTaskError(Exception):
super().__init__(f"Pause for human task {taskId} (run {runId}, node {nodeId})") super().__init__(f"Pause for human task {taskId} (run {runId}, node {nodeId})")
class PauseForEmailWaitError(Exception):
"""Raised when execution must pause waiting for a new email. Background poller will resume."""
def __init__(self, runId: str, nodeId: str, waitConfig: Dict[str, Any]):
self.runId = runId
self.nodeId = nodeId
self.waitConfig = waitConfig
super().__init__(f"Pause for email wait (run {runId}, node {nodeId})")
class InputExecutor: class InputExecutor:
""" """
Execute input/human nodes. Creates a HumanTask, pauses the run, and raises Execute input/human nodes. Creates a HumanTask, pauses the run, and raises

View file

@ -20,11 +20,44 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A
bcc = parameters.get("bcc") or [] bcc = parameters.get("bcc") or []
emailStyle = parameters.get("emailStyle") or "business" emailStyle = parameters.get("emailStyle") or "business"
maxLength = parameters.get("maxLength") or 1000 maxLength = parameters.get("maxLength") or 1000
# Only connectionReference and context are required - to is optional for drafts # Direct content from upstream (e.g. AI node): skip internal AI, use subject/body/to directly
if not connectionReference or not context: email_content = parameters.get("emailContent")
return ActionResult.isFailure(error="connectionReference and context are required") if isinstance(email_content, dict):
direct_subject = email_content.get("subject")
direct_body = email_content.get("body")
direct_to = email_content.get("to")
if direct_subject and direct_body:
subject = str(direct_subject).strip()
body = str(direct_body).strip()
to = [direct_to] if isinstance(direct_to, str) else (direct_to or [])
if isinstance(to, str):
to = [to]
ai_attachments = []
# Jump to create-email section (see below)
else:
direct_subject = parameters.get("subject")
direct_body = parameters.get("body")
if direct_subject and direct_body:
subject = str(direct_subject).strip()
body = str(direct_body).strip()
if isinstance(to, str):
to = [to]
ai_attachments = []
else:
subject = None
body = None
ai_attachments = None
use_direct_content = bool(subject and body)
if not use_direct_content:
# Original path: require connectionReference and context
if not connectionReference or not context:
return ActionResult.isFailure(error="connectionReference and context are required")
elif not connectionReference:
return ActionResult.isFailure(error="connectionReference is required")
# Convert single values to lists for all recipient parameters # Convert single values to lists for all recipient parameters
if isinstance(to, str): if isinstance(to, str):
to = [to] to = [to]
@ -45,10 +78,10 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A
if not permissions_ok: if not permissions_ok:
return ActionResult.isFailure(error="Connection lacks necessary permissions for Outlook operations") return ActionResult.isFailure(error="Connection lacks necessary permissions for Outlook operations")
# Prepare documents for AI processing # Prepare documents for AI processing (only when using AI path)
from modules.datamodels.datamodelDocref import DocumentReferenceList from modules.datamodels.datamodelDocref import DocumentReferenceList
chatDocuments = [] chatDocuments = []
if documentList: if not use_direct_content and documentList:
# Convert to DocumentReferenceList if needed # Convert to DocumentReferenceList if needed
if isinstance(documentList, DocumentReferenceList): if isinstance(documentList, DocumentReferenceList):
docRefList = documentList docRefList = documentList
@ -60,33 +93,34 @@ async def composeAndDraftEmailWithContext(self, parameters: Dict[str, Any]) -> A
docRefList = DocumentReferenceList(references=[]) docRefList = DocumentReferenceList(references=[])
chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docRefList) chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docRefList)
# Create AI prompt for email composition if not use_direct_content:
# Build document reference list for AI with expanded list contents when possible # Create AI prompt for email composition
doc_references = documentList # Build document reference list for AI with expanded list contents when possible
doc_list_text = "" doc_references = documentList
if doc_references: doc_list_text = ""
lines = ["Available_Document_References:"] if doc_references:
for ref in doc_references: lines = ["Available_Document_References:"]
# Each item is a label: resolve to its document list and render contained items for ref in doc_references:
from modules.datamodels.datamodelDocref import DocumentReferenceList # Each item is a label: resolve to its document list and render contained items
list_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([ref])) or [] from modules.datamodels.datamodelDocref import DocumentReferenceList
if list_docs: list_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list([ref])) or []
for d in list_docs: if list_docs:
doc_ref_label = self.services.chat.getDocumentReferenceFromChatDocument(d) for d in list_docs:
lines.append(f"- {doc_ref_label}") doc_ref_label = self.services.chat.getDocumentReferenceFromChatDocument(d)
else: lines.append(f"- {doc_ref_label}")
lines.append(" - (no documents)") else:
doc_list_text = "\n" + "\n".join(lines) lines.append(" - (no documents)")
else: doc_list_text = "\n" + "\n".join(lines)
doc_list_text = "Available_Document_References: (No documents available for attachment)" else:
doc_list_text = "Available_Document_References: (No documents available for attachment)"
# Escape only the user-controlled context to prevent prompt injection
escaped_context = context.replace('"', '\\"').replace('\n', '\\n').replace('\r', '\\r') # Escape only the user-controlled context to prevent prompt injection
escaped_context = context.replace('"', '\\"').replace('\n', '\\n').replace('\r', '\\r')
# Build recipients text for prompt
recipients_text = f"Recipients: {to}" if to else "Recipients: (not specified - this is a draft)" # Build recipients text for prompt
recipients_text = f"Recipients: {to}" if to else "Recipients: (not specified - this is a draft)"
ai_prompt = f"""Compose an email based on this context:
ai_prompt = f"""Compose an email based on this context:
------- -------
{escaped_context} {escaped_context}
------- -------
@ -107,93 +141,93 @@ Return JSON:
"attachments": ["docItem:<documentId>:<filename>"] "attachments": ["docItem:<documentId>:<filename>"]
}} }}
""" """
# Call AI service to generate email content # Call AI service to generate email content
try:
ai_response = await self.services.ai.callAiPlanning(
prompt=ai_prompt,
placeholders=None,
debugType="email_composition"
)
# Parse AI response
try: try:
ai_content = ai_response ai_response = await self.services.ai.callAiPlanning(
# Extract JSON from AI response prompt=ai_prompt,
if "```json" in ai_content: placeholders=None,
json_start = ai_content.find("```json") + 7 debugType="email_composition"
json_end = ai_content.find("```", json_start) )
json_content = ai_content[json_start:json_end].strip()
elif "{" in ai_content and "}" in ai_content:
json_start = ai_content.find("{")
json_end = ai_content.rfind("}") + 1
json_content = ai_content[json_start:json_end]
else:
json_content = ai_content
email_data = json.loads(json_content)
subject = email_data.get("subject", "")
body = email_data.get("body", "")
ai_attachments = email_data.get("attachments", [])
if not subject or not body:
return ActionResult.isFailure(error="AI did not generate valid subject and body")
# Use AI-selected attachments if provided, otherwise use all documents
normalized_ai_attachments = []
if documentList:
try:
available_refs = [documentList] if isinstance(documentList, str) else documentList
from modules.datamodels.datamodelDocref import DocumentReferenceList
available_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list(available_refs)) or []
except Exception:
available_docs = []
# Normalize AI attachments to a list of strings # Parse AI response
if isinstance(ai_attachments, str): try:
ai_attachments = [ai_attachments] ai_content = ai_response
elif isinstance(ai_attachments, list): # Extract JSON from AI response
ai_attachments = [a for a in ai_attachments if isinstance(a, str)] if "```json" in ai_content:
json_start = ai_content.find("```json") + 7
json_end = ai_content.find("```", json_start)
json_content = ai_content[json_start:json_end].strip()
elif "{" in ai_content and "}" in ai_content:
json_start = ai_content.find("{")
json_end = ai_content.rfind("}") + 1
json_content = ai_content[json_start:json_end]
else:
json_content = ai_content
if ai_attachments: email_data = json.loads(json_content)
subject = email_data.get("subject", "")
body = email_data.get("body", "")
ai_attachments = email_data.get("attachments", [])
if not subject or not body:
return ActionResult.isFailure(error="AI did not generate valid subject and body")
# Use AI-selected attachments if provided, otherwise use all documents
normalized_ai_attachments = []
if documentList:
try: try:
ai_refs = [ai_attachments] if isinstance(ai_attachments, str) else ai_attachments available_refs = [documentList] if isinstance(documentList, str) else documentList
from modules.datamodels.datamodelDocref import DocumentReferenceList from modules.datamodels.datamodelDocref import DocumentReferenceList
ai_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list(ai_refs)) or [] available_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list(available_refs)) or []
except Exception: except Exception:
ai_docs = [] available_docs = []
# Intersect by document id # Normalize AI attachments to a list of strings
available_ids = {getattr(d, 'id', None) for d in available_docs} if isinstance(ai_attachments, str):
selected_docs = [d for d in ai_docs if getattr(d, 'id', None) in available_ids] ai_attachments = [ai_attachments]
elif isinstance(ai_attachments, list):
ai_attachments = [a for a in ai_attachments if isinstance(a, str)]
if selected_docs: if ai_attachments:
# Map selected ChatDocuments back to docItem references (with full filename) try:
documentList = [self.services.chat.getDocumentReferenceFromChatDocument(d) for d in selected_docs] ai_refs = [ai_attachments] if isinstance(ai_attachments, str) else ai_attachments
# Normalize ai_attachments to full format for storage from modules.datamodels.datamodelDocref import DocumentReferenceList
normalized_ai_attachments = documentList.copy() ai_docs = self.services.chat.getChatDocumentsFromDocumentList(DocumentReferenceList.from_string_list(ai_refs)) or []
logger.info(f"AI selected {len(documentList)} documents for attachment (resolved via ChatDocuments)") except Exception:
ai_docs = []
# Intersect by document id
available_ids = {getattr(d, 'id', None) for d in available_docs}
selected_docs = [d for d in ai_docs if getattr(d, 'id', None) in available_ids]
if selected_docs:
# Map selected ChatDocuments back to docItem references (with full filename)
documentList = [self.services.chat.getDocumentReferenceFromChatDocument(d) for d in selected_docs]
# Normalize ai_attachments to full format for storage
normalized_ai_attachments = documentList.copy()
logger.info(f"AI selected {len(documentList)} documents for attachment (resolved via ChatDocuments)")
else:
# No intersection; use all available documents
documentList = [self.services.chat.getDocumentReferenceFromChatDocument(d) for d in available_docs]
normalized_ai_attachments = documentList.copy()
logger.warning("AI selected attachments not found in available documents, using all documents")
else: else:
# No intersection; use all available documents # No AI selection; use all available documents
documentList = [self.services.chat.getDocumentReferenceFromChatDocument(d) for d in available_docs] documentList = [self.services.chat.getDocumentReferenceFromChatDocument(d) for d in available_docs]
normalized_ai_attachments = documentList.copy() normalized_ai_attachments = documentList.copy()
logger.warning("AI selected attachments not found in available documents, using all documents") logger.warning("AI did not specify attachments, using all available documents")
else: else:
# No AI selection; use all available documents logger.info("No documents provided in documentList; skipping attachment processing")
documentList = [self.services.chat.getDocumentReferenceFromChatDocument(d) for d in available_docs]
normalized_ai_attachments = documentList.copy() except json.JSONDecodeError as e:
logger.warning("AI did not specify attachments, using all available documents") logger.error(f"Failed to parse AI response as JSON: {str(e)}")
else: logger.error(f"AI response content: {ai_response}")
logger.info("No documents provided in documentList; skipping attachment processing") return ActionResult.isFailure(error="AI response was not valid JSON format")
except json.JSONDecodeError as e: except Exception as e:
logger.error(f"Failed to parse AI response as JSON: {str(e)}") logger.error(f"Error calling AI service: {str(e)}")
logger.error(f"AI response content: {ai_response}") return ActionResult.isFailure(error=f"Failed to generate email content: {str(e)}")
return ActionResult.isFailure(error="AI response was not valid JSON format")
except Exception as e:
logger.error(f"Error calling AI service: {str(e)}")
return ActionResult.isFailure(error=f"Failed to generate email content: {str(e)}")
# Now create the email with AI-generated content # Now create the email with AI-generated content
try: try:

View file

@ -49,9 +49,9 @@ async def readEmails(self, parameters: Dict[str, Any]) -> ActionResult:
if filter: if filter:
# Remove any potentially dangerous characters that could break the filter # Remove any potentially dangerous characters that could break the filter
filter = filter.strip() filter = filter.strip()
if len(filter) > 100: if len(filter) > 500:
logger.warning(f"Filter too long ({len(filter)} chars), truncating to 100 characters") logger.warning(f"Filter too long ({len(filter)} chars), truncating to 500 characters")
filter = filter[:100] filter = filter[:500]
# Get Microsoft connection # Get Microsoft connection

View file

@ -73,8 +73,12 @@ async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
logger.warning(f"Could not find folder ID for '{folder}', using folder name directly") logger.warning(f"Could not find folder ID for '{folder}', using folder name directly")
# Build the search API request # Build the search API request
api_url = f"{graph_url}/me/messages"
params = self.emailProcessing.buildSearchParameters(query, folder_id or folder, limit) params = self.emailProcessing.buildSearchParameters(query, folder_id or folder, limit)
# Use folder-specific URL when we have folder_id and $search - avoids InefficientFilter
if folder_id and params.get("$search"):
api_url = f"{graph_url}/me/mailFolders/{folder_id}/messages"
else:
api_url = f"{graph_url}/me/messages"
# Log search parameters for debugging # Log search parameters for debugging
logger.debug(f"Search query: '{query}'") logger.debug(f"Search query: '{query}'")

View file

@ -53,7 +53,7 @@ class EmailProcessingHelper:
# Handle common search operators # Handle common search operators
# Recognize Graph operators including both singular and plural forms for hasAttachments # Recognize Graph operators including both singular and plural forms for hasAttachments
lowered = clean_query.lower() lowered = clean_query.lower()
if any(op in lowered for op in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:', 'hasattachments:']): if any(op in lowered for op in ['from:', 'to:', 'subject:', 'body:', 'received:', 'hasattachment:', 'hasattachments:']):
# This is an advanced search query, return as-is # This is an advanced search query, return as-is
return clean_query return clean_query
@ -104,7 +104,7 @@ class EmailProcessingHelper:
# Check if this is a complex search query with multiple operators # Check if this is a complex search query with multiple operators
# Recognize Graph operators including both singular and plural forms for hasAttachments # Recognize Graph operators including both singular and plural forms for hasAttachments
lowered = clean_query.lower() lowered = clean_query.lower()
if any(op in lowered for op in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:', 'hasattachments:']): if any(op in lowered for op in ['from:', 'to:', 'subject:', 'body:', 'received:', 'hasattachment:', 'hasattachments:']):
# This is an advanced search query, use $search # This is an advanced search query, use $search
# Microsoft Graph API supports complex search syntax # Microsoft Graph API supports complex search syntax
params["$search"] = f'"{clean_query}"' params["$search"] = f'"{clean_query}"'
@ -113,34 +113,20 @@ class EmailProcessingHelper:
# We'll need to filter results after the API call # We'll need to filter results after the API call
# Folder filtering will be done after the API call # Folder filtering will be done after the API call
else: else:
# Use $filter for basic text search, but keep it simple to avoid "InefficientFilter" error # Use $search (KQL) instead of $filter to avoid "InefficientFilter" - Graph rejects
# Microsoft Graph API has limitations on complex filters # contains(subject,x) + parentFolderId + orderby. $search handles subject:query.
if len(clean_query) > 50: if len(clean_query) > 50:
# If query is too long, truncate it to avoid complex filter issues
clean_query = clean_query[:50] clean_query = clean_query[:50]
# Use only subject search to keep filter simple
# Handle wildcard queries specially
if clean_query == "*" or clean_query == "": if clean_query == "*" or clean_query == "":
# For wildcard or empty query, don't use contains filter
# Just use folder filter if specified
if folder and folder.lower() != "all": if folder and folder.lower() != "all":
params["$filter"] = f"parentFolderId eq '{folder}'" params["$filter"] = f"parentFolderId eq '{folder}'"
else: params["$orderby"] = "receivedDateTime desc"
# No filter needed for wildcard search across all folders
pass
else: else:
params["$filter"] = f"contains(subject,'{clean_query}')" # Use $search with subject: to avoid InefficientFilter
safe = clean_query.replace('"', '')
# Add folder filter if specified params["$search"] = f'"subject:{safe}"'
if folder and folder.lower() != "all": # Folder filtering done post-API in searchEmails when $search is used
params["$filter"] = f"{params['$filter']} and parentFolderId eq '{folder}'"
# Add orderby for basic queries
params["$orderby"] = "receivedDateTime desc"
return params return params
def buildGraphFilter(self, filter_text: str) -> Dict[str, str]: def buildGraphFilter(self, filter_text: str) -> Dict[str, str]:
@ -168,7 +154,7 @@ class EmailProcessingHelper:
# Handle search queries (from:, to:, subject:, etc.) - check this FIRST # Handle search queries (from:, to:, subject:, etc.) - check this FIRST
# Support both singular and plural forms for hasAttachments # Support both singular and plural forms for hasAttachments
lt = filter_text.lower() lt = filter_text.lower()
if any(lt.startswith(prefix) for prefix in ['from:', 'to:', 'subject:', 'received:', 'hasattachment:', 'hasattachments:']): if any(lt.startswith(prefix) for prefix in ['from:', 'to:', 'subject:', 'body:', 'received:', 'hasattachment:', 'hasattachments:']):
return {"$search": f'"{filter_text}"'} return {"$search": f'"{filter_text}"'}
# Handle email address filters (only if it's NOT a search query) # Handle email address filters (only if it's NOT a search query)

View file

@ -27,10 +27,15 @@ class FolderManagementHelper:
def getFolderId(self, folder_name: str, connection: Dict[str, Any]) -> Optional[str]: def getFolderId(self, folder_name: str, connection: Dict[str, Any]) -> Optional[str]:
""" """
Get the folder ID for a given folder name Get the folder ID for a given folder name or ID.
Returns the input as-is if it already looks like a Microsoft Graph folder ID.
This is needed for proper filtering when using advanced search queries
""" """
if not folder_name or not str(folder_name).strip():
return None
# Graph folder IDs are base64-like strings (e.g. AQMk...); return as-is
s = str(folder_name).strip()
if s.startswith("AQMk") and len(s) > 20 and " " not in s:
return s
try: try:
graph_url = "https://graph.microsoft.com/v1.0" graph_url = "https://graph.microsoft.com/v1.0"
headers = { headers = {

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Introduction",
"contentParts": {},
"generationHint": "Write an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Main Content",
"contentParts": {},
"generationHint": "Develop the main body of the email, addressing the points raised in the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Conclusion",
"contentParts": {},
"generationHint": "Conclude the email with a polite closing statement and any necessary follow-up actions.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Introduction",
"contentParts": {},
"generationHint": "Write an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Main Content",
"contentParts": {},
"generationHint": "Develop the main body of the email, addressing the points raised in the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Conclusion",
"contentParts": {},
"generationHint": "Conclude the email with a polite closing statement and any necessary follow-up actions.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,39 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Introduction",
"contentParts": {},
"generationHint": "Generate a brief introduction for the email response.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Main Content",
"contentParts": {},
"generationHint": "Generate the main body of the email response, addressing the points mentioned in the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Conclusion",
"contentParts": {},
"generationHint": "Generate a concise conclusion for the email response.",
"sections": []
}
]
}]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Introduction",
"contentParts": {},
"generationHint": "Generate a brief introduction for the email response.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Main Content",
"contentParts": {},
"generationHint": "Generate the main body of the email response, addressing the points mentioned in the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Conclusion",
"contentParts": {},
"generationHint": "Generate a concise conclusion for the email response.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,22 @@
Compose an email based on this context:
-------
Reply to the following email(s):\n\nFrom: ida-dittrich@proton.me\nSubject: test\nContent:\nYou don't often get email from ida-dittrich@proton.me. Learn why this is important\r\n\r\n\r\ndies ist eine testmail.\r\n\r\n\r\nSent from Proton Mail for iOS.
-------
Recipients: (not specified - this is a draft)
Style: business
Max length: 1000 characters
Available_Document_References:
- (no documents)
Based on the context, decide which documents to attach.
CRITICAL: Use EXACT document references from Available_Document_References above. For individual documents: ALWAYS use docItem:<documentId>:<filename> format (include filename)
Return JSON:
{
"subject": "subject line",
"body": "email body (HTML allowed)",
"attachments": ["docItem:<documentId>:<filename>"]
}

View file

@ -0,0 +1,7 @@
```json
{
"subject": "Re: Test",
"body": "Sehr geehrte Frau Dittrich,<br><br>vielen Dank für Ihre Testmail. Bitte lassen Sie mich wissen, wenn es noch etwas gibt, bei dem ich Ihnen behilflich sein kann.<br><br>Mit freundlichen Grüßen,<br>[Ihr Name]",
"attachments": []
}
```

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Generate an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Develop the main body of the email, addressing the key points from the incoming email and providing necessary information or responses.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schluss",
"contentParts": {},
"generationHint": "Conclude the email with a closing statement, including any follow-up actions or expressions of gratitude.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Generate an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Develop the main body of the email, addressing the key points from the incoming email and providing necessary information or responses.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schluss",
"contentParts": {},
"generationHint": "Conclude the email with a closing statement, including any follow-up actions or expressions of gratitude.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Introduction",
"contentParts": {},
"generationHint": "Generate an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Main Content",
"contentParts": {},
"generationHint": "Develop the main body of the email, addressing the points mentioned in the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Conclusion",
"contentParts": {},
"generationHint": "Conclude the email with a polite closing statement and any necessary follow-up actions.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Introduction",
"contentParts": {},
"generationHint": "Generate an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Main Content",
"contentParts": {},
"generationHint": "Develop the main body of the email, addressing the points mentioned in the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Conclusion",
"contentParts": {},
"generationHint": "Conclude the email with a polite closing statement and any necessary follow-up actions.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Generate an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Create the main body of the email, addressing the key points from the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schluss",
"contentParts": {},
"generationHint": "Conclude the email with a closing statement and any necessary follow-up actions.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Generate an introductory paragraph for the email response, setting the tone and context.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Create the main body of the email, addressing the key points from the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schluss",
"contentParts": {},
"generationHint": "Conclude the email with a closing statement and any necessary follow-up actions.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response Plan",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Formulate an introductory paragraph for the email response, setting the context for the reply.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Develop the main body of the email response, addressing the key points from the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schlussfolgerung",
"contentParts": {},
"generationHint": "Conclude the email with a summary or closing remarks, ensuring a polite and professional tone.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response Plan",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Formulate an introductory paragraph for the email response, setting the context for the reply.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Develop the main body of the email response, addressing the key points from the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schlussfolgerung",
"contentParts": {},
"generationHint": "Conclude the email with a summary or closing remarks, ensuring a polite and professional tone.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,22 @@
Compose an email based on this context:
-------
Reply to the following email(s):\n\nRecipient (reply to this address): ida-dittrich@proton.me\n\nFrom: ida-dittrich@proton.me\nSubject: test\nContent:\nYou don't often get email from ida-dittrich@proton.me. Learn why this is important\r\n\r\ndies ist eine testmail.\r\n\r\n\r\n\r\nSent from Proton Mail for iOS.\n\nFrom: p.motsch@valueon.ch\nSubject: FW: KI@SV Abgleich - Bericht Test Session\nContent:\nHallo Ida\r\n\r\nHier noch Futter für den Bericht nach der Session BEDAG von morgen.\r\nDies muss in unseren Bericht rein als Grundlage für die Entscheidungen auf Stufe Gesamtprojekt:\r\n\r\nFokus der Tests\r\n\r\n * Test auf «STA Sandbox» 19.03.2026, mit Cobra orgi\n\nFrom: notify@mail.notion.so\nSubject: Stephan Schellworth hat dich in Bedag Testschritt 3: Benchmark-Tests mit Cursor CLI erwähnt\nContent:\nStephan Schellworth hat dich in Bedag Testschritt 3: Benchmark-Tests mit Cursor CLI erwähnt\r\n17. März 2026, 08:15\r\n·PowerOn Startseite/05_Prozesse & Operations/Sprint Set Up/Tickets\r\nOwner\r\nI\r\nIda\r\nGehe zur Seite\r\nBenachrichtigungseinstellungen für diese \n\nFrom: notify@mail.notion.so\nSubject: Stephan Schellworth hat dich in Bedag Testschritt 4: Automatische Testfall-Ableitung erwähnt\nContent:\nStephan Schellworth hat dich in Bedag Testschritt 4: Automatische Testfall-Ableitung erwähnt\r\n17. März 2026, 08:15\r\n·PowerOn Startseite/05_Prozesse & Operations/Sprint Set Up/Tickets\r\nOwner\r\nI\r\nIda\r\nGehe zur Seite\r\nBenachrichtigungseinstellungen für diese\n\nFrom: notify@mail.notion.so\nSubject: Stephan Schellworth hat dich in Bedag Testschritt 2: Neutralisierten Code Dokumentieren erwähnt\nContent:\nStephan Schellworth hat dich in Bedag Testschritt 2: Neutralisierten Code Dokumentieren erwähnt\r\n17. März 2026, 08:15\r\n·PowerOn Startseite/05_Prozesse & Operations/Sprint Set Up/Tickets\r\nOwner\r\nI\r\nIda\r\nGehe zur Seite\r\nBenachrichtigungseinstellungen für di
-------
Recipients: ['ida-dittrich@proton.me']
Style: business
Max length: 1000 characters
Available_Document_References:
- (no documents)
Based on the context, decide which documents to attach.
CRITICAL: Use EXACT document references from Available_Document_References above. For individual documents: ALWAYS use docItem:<documentId>:<filename> format (include filename)
Return JSON:
{
"subject": "subject line",
"body": "email body (HTML allowed)",
"attachments": ["docItem:<documentId>:<filename>"]
}

View file

@ -0,0 +1,7 @@
```json
{
"subject": "Re: Test Email",
"body": "Hallo Ida,<br><br>Vielen Dank für Ihre Testmail. Ich habe die Informationen aus der letzten Sitzung erhalten und werde sicherstellen, dass diese in unseren Bericht aufgenommen werden. Bitte lassen Sie mich wissen, wenn es noch weitere Details gibt, die berücksichtigt werden sollten.<br><br>Mit freundlichen Grüßen,<br><br>P. Motsch",
"attachments": []
}
```

View file

@ -0,0 +1,119 @@
# TASK: Plan Document Structure (Documents + Chapters)
This is a STRUCTURE PLANNING task. You define which documents to create and which chapters each document will have.
Chapter CONTENT will be generated in a later step - here you only plan the STRUCTURE and assign content references.
Return EXACTLY ONE complete JSON object. Do not generate multiple JSON objects, alternatives, or variations. Do not use separators like "---" between JSON objects.
## USER REQUEST (for context)
```
formulier eine passende email zur eingehenden email
```
## AVAILABLE CONTENT PARTS
(No content parts available)
## CONTENT ASSIGNMENT RULE
CRITICAL: Every chapter MUST have contentParts assigned if it relates to documents/images/data from the user request.
If the user request mentions documents/images/data, then EVERY chapter that generates content related to those references MUST assign the relevant ContentParts explicitly.
Assignment logic:
- If chapter DISPLAYS a document/image → assign "object" format ContentPart with "caption"
- If chapter generates text content ABOUT a document/image/data → assign ContentPart with "instruction":
- Prefer "extracted" format if available (contains analyzed/extracted content)
- If only "object" format is available, use "object" format with "instruction" (to write ABOUT the image/document)
- If chapter's generationHint or purpose relates to a document/image/data mentioned in user request → it MUST have ContentParts assigned
- Multiple chapters might assign the same ContentPart (e.g., one chapter displays image, another writes about it)
- Use ContentPart IDs exactly as listed in AVAILABLE CONTENT PARTS above
- Empty contentParts are only allowed if chapter generates content WITHOUT referencing any documents/images/data from the user request
CRITICAL RULE: If the user request mentions BOTH:
a) Documents/images/data (listed in AVAILABLE CONTENT PARTS above), AND
b) Generic content types (article text, main content, body text, etc.)
Then chapters that generate those generic content types MUST assign the relevant ContentParts, because the content should relate to or be based on the provided documents/images/data.
## CONTENT EFFICIENCY PRINCIPLES
- Generate COMPACT content: Focus on essential information only
- AVOID verbose, lengthy, or repetitive text - be concise and direct
- Prioritize FACTS over filler text - no introductions like "In this chapter..."
- Minimize system resources: shorter content = faster processing
- Quality over quantity: precise, meaningful content rather than padding
## CHAPTER STRUCTURE REQUIREMENTS
- Generate chapters based on USER REQUEST - analyze what structure the user wants
- Create ONLY the minimum chapters needed to cover the user's request - avoid over-structuring
- HARD LIMIT: Maximum 7 chapters per document. If the topic can be covered in fewer, prefer fewer. Combine related topics into single chapters rather than creating many small ones.
- IMPORTANT: Each chapter MUST have ALL these fields:
- id: Unique identifier (e.g., "chapter_1")
- level: Heading level (1, 2, 3, etc.)
- title: Chapter title
- contentParts: Object mapping ContentPart IDs to usage instructions (MUST assign if chapter relates to documents/data from user request)
- generationHint: Description of what content to generate (including formatting/styling requirements)
- sections: Empty array [] (REQUIRED - sections are generated in next phase)
- contentParts: {"partId": {"instruction": "..."} or {"caption": "..."} or both} - Assign ContentParts as required by CONTENT ASSIGNMENT RULE above
- The "instruction" field for each ContentPart MUST contain ALL relevant details from the USER REQUEST that apply to content extraction for this specific chapter. Include all formatting rules, data requirements, constraints, and specifications mentioned in the user request that are relevant for processing this ContentPart in this chapter.
- generationHint: Keep CONCISE but include relevant details from the USER REQUEST. Focus on WHAT to generate, not HOW to phrase it verbosely.
- The number of chapters depends on the user request - create only what is requested. Do NOT create chapters for topics without available data.
CRITICAL: Only create chapters for CONTENT sections, not for formatting/styling requirements. Formatting/styling requirements to be included in each generationHint if needed.
## DOCUMENT STRUCTURE
For each document, determine:
- outputFormat: From USER REQUEST (explicit mention or infer from purpose/content type). Default: "txt". Multiple documents can have different formats.
- language: From USER REQUEST (map to ISO 639-1: de, en, fr, it...). Default: "de". Multiple documents can have different languages.
- chapters: Structure appropriately for the format (e.g., pptx=slides, docx=sections, xlsx=worksheets). Match format capabilities and constraints.
Required JSON fields:
- metadata: {"title": "...", "language": "..."}
- documents: Array with id, title, filename, outputFormat, language, chapters[]
- chapters: Array with id, level, title, contentParts, generationHint, sections[]
EXAMPLE STRUCTURE (for reference only - adapt to user request):
{
"metadata": {
"title": "Document Title",
"language": "de"
},
"documents": [{
"id": "doc_1",
"title": "Document Title",
"filename": "document.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Chapter Title",
"contentParts": {
"extracted_part_id": {
"instruction": "Use extracted content with ALL relevant details from user request"
}
},
"generationHint": "Detailed description including ALL relevant details from user request for this chapter",
"sections": []
}
]
}]
}
CRITICAL INSTRUCTIONS:
- Generate chapters based on USER REQUEST, NOT based on the example above
- The example shows the JSON structure format, NOT the required chapters
- Create only the chapters that match the user's request
- Adapt chapter titles and structure to match the user's specific request
- Determine outputFormat and language for each document by analyzing the USER REQUEST above
- The example shows placeholders "txt" and "de" - YOU MUST REPLACE THESE with actual values determined from the USER REQUEST
MANDATORY CONTENT ASSIGNMENT CHECK:
For each chapter, verify:
1. Does the user request mention documents/images/data? (e.g., "photo", "image", "document", "data", "based on", "about")
2. Does this chapter's generationHint, title, or purpose relate to those documents/images/data mentioned in step 1?
- Examples: "article about the photo", "text describing the image", "analysis of the document", "content based on the data"
- Even if chapter doesn't explicitly say "about the image", if user request mentions both the image AND this chapter's content type → relate them
3. If YES to both → chapter MUST have contentParts assigned (cannot be empty {})
4. If ContentPart is "object" format and chapter needs to write ABOUT it → assign with "instruction" field, not just "caption"
OUTPUT FORMAT: Start with { and end with }. Do NOT use markdown code fences (```json). Do NOT add explanatory text before or after the JSON. Return ONLY the JSON object itself.

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Formulate a suitable introduction for the email response.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Develop the main content of the email response, addressing the points from the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schlussfolgerung",
"contentParts": {},
"generationHint": "Conclude the email with a polite closing statement.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"metadata": {
"title": "Email Response",
"language": "de"
},
"documents": [
{
"id": "doc_1",
"title": "Email Response",
"filename": "email_response.txt",
"outputFormat": "txt",
"language": "de",
"chapters": [
{
"id": "chapter_1",
"level": 1,
"title": "Einleitung",
"contentParts": {},
"generationHint": "Formulate a suitable introduction for the email response.",
"sections": []
},
{
"id": "chapter_2",
"level": 1,
"title": "Hauptteil",
"contentParts": {},
"generationHint": "Develop the main content of the email response, addressing the points from the incoming email.",
"sections": []
},
{
"id": "chapter_3",
"level": 1,
"title": "Schlussfolgerung",
"contentParts": {},
"generationHint": "Conclude the email with a polite closing statement.",
"sections": []
}
]
}
]
}

View file

@ -0,0 +1,22 @@
Compose an email based on this context:
-------
Reply to the following email(s):\n\nRecipient (reply to this address): ida-dittrich@proton.me\n\nFrom: ida-dittrich@proton.me\nSubject: test\nContent:\nYou don't often get email from ida-dittrich@proton.me. Learn why this is important\r\n\r\ndies ist eine testmail.\r\n\r\n\r\n\r\nSent from Proton Mail for iOS.
-------
Recipients: ['ida-dittrich@proton.me']
Style: business
Max length: 1000 characters
Available_Document_References:
- (no documents)
Based on the context, decide which documents to attach.
CRITICAL: Use EXACT document references from Available_Document_References above. For individual documents: ALWAYS use docItem:<documentId>:<filename> format (include filename)
Return JSON:
{
"subject": "subject line",
"body": "email body (HTML allowed)",
"attachments": ["docItem:<documentId>:<filename>"]
}

View file

@ -0,0 +1,7 @@
```json
{
"subject": "Re: test",
"body": "Sehr geehrte Frau Dittrich,<br><br>vielen Dank für Ihre Nachricht. Ich habe Ihre Testmail erhalten und stehe Ihnen gerne für weitere Fragen oder Anliegen zur Verfügung.<br><br>Mit freundlichen Grüßen,<br><br>[Ihr Name]",
"attachments": []
}
```